95 Commits

Author SHA1 Message Date
Mukhtar Akere
22dae9efad Add a new worker that checks if an account is opened
Some checks failed
Release Docker Build / docker (push) Has been cancelled
GoReleaser / goreleaser (push) Has been cancelled
2025-09-17 23:30:45 +01:00
Mukhtar Akere
3f0870cd1c torbox: fix pagination bug, fix download uncached bug 2025-09-16 21:34:58 +01:00
Mukhtar Akere
30b2db06e7 Rewrote account switching, fix some minor bugs here and there 2025-09-16 21:15:24 +01:00
Mukhtar Akere
76f5b85313 Fix issues with dir-cache-time, umask and wrongly set gid,uid, add extra vfs options 2025-09-05 16:11:22 +01:00
Mukhtar Akere
85cd37f29b Revert former beta chnages 2025-08-30 04:10:18 +01:00
Mukhtar Akere
aff12c2e4b Fix Added bug in torrent 2025-08-28 03:26:43 +01:00
Mukhtar Akere
d76ca032ab hotfix config update 2025-08-28 01:30:54 +01:00
Mukhtar Akere
8bb786c689 hotfix nil downloadLink 2025-08-27 23:57:49 +01:00
Mukhtar Akere
83058489b6 Add callback URL for post-processing 2025-08-27 13:02:43 +01:00
Mukhtar Akere
267cc2d32b Fix issues with account swutching 2025-08-26 15:31:24 +01:00
Mukhtar Akere
eefe8a3901 Hotfix for download link generation and account switching 2025-08-24 21:54:26 +01:00
Mukhtar Akere
618eb73067 - Add support for multi-season imports
- Improve in-memoery storage, whic reduces memory usage
- Fix issues with rclone integration
2025-08-24 16:25:37 +01:00
Mukhtar Akere
f8667938b6 Add more rclone flags, fix minor issues 2025-08-23 06:00:07 +01:00
Mukhtar Akere
b0a698f15e - Imporve memeoery footprint
- Add batch processing for arr repairs
2025-08-21 03:32:46 +01:00
Mukhtar Akere
2548c21e5b Fix rclone file log
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-08-19 01:01:53 +01:00
Mukhtar Akere
1b03ccefbb Hotfix rclone logging flags 2025-08-19 00:55:43 +01:00
Mukhtar Akere
e3a249a9cc Fix issues with rclone mounting
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-08-18 22:12:26 +01:00
Mukhtar Akere
8696db42d2 - Add more rclone supports
- Add rclone log viewer
- Add more stats to Stats page
- Fix some minor bugs
2025-08-18 01:57:02 +01:00
Mukhtar Akere
742d8fb088 - Fix issues with cache dir
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
- Fix responsiveness issue with navbars
- Support user entry for users running as non-root
- Other minor fixes
2025-08-12 15:14:42 +01:00
Mukhtar Akere
a0e9f7f553 Fix issues with exit code on windows, fix gh-docs
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-08-10 11:35:50 +01:00
Mukhtar Akere
4be4f6b293 Merge branch 'beta' 2025-08-10 11:09:11 +01:00
Mukhtar Akere
6c8949b831 Add auth to qbittorent middleware 2025-08-09 20:25:16 +01:00
Mukhtar Akere
0dd1efb07c Final bug fixes 2025-08-09 19:57:32 +01:00
Mukhtar Akere
3aeb806033 Wrap up 1.1.0 2025-08-09 10:55:10 +01:00
Mukhtar Akere
7c8156eacf Fix nil map 2025-08-08 13:17:09 +01:00
Mukhtar Akere
d8a963f77f fix failed cache dir 2025-08-08 12:49:29 +01:00
Mukhtar Akere
27e7bc8f47 fix failed cache dir 2025-08-08 12:48:05 +01:00
Mukhtar Akere
1d243dd12b Add Stats page 2025-08-08 12:45:58 +01:00
Mukhtar Akere
b4efa22bfd Fix issues with no gobal config 2025-08-08 06:04:42 +01:00
Mukhtar Akere
6f9fafd7d8 Migrate to full rclone rcd 2025-08-08 05:22:52 +01:00
Mukhtar Akere
eba24c9d63 Fix issues with rclone setup 2025-08-07 05:31:07 +01:00
Mukhtar Akere
c620ba3d56 Add vfs cache poll interval 2025-08-05 12:29:55 +01:00
Mukhtar Akere
fab3a7e4f7 minor fixes, change help text 2025-08-05 11:49:52 +01:00
Mukhtar Akere
01615cb51e Cleanup mounts 2025-08-05 05:18:24 +01:00
Mukhtar Akere
cb63fc69f5 Final fix for writeheader 2025-08-05 05:01:34 +01:00
Mukhtar Akere
40755fbdde Fix issues with headers 2025-08-05 04:39:03 +01:00
Mukhtar Akere
d0ae839617 Fix issues with headers 2025-08-05 04:28:38 +01:00
Mukhtar Akere
ce972779c3 Fix superflous header issue 2025-08-05 04:01:41 +01:00
Mukhtar Akere
139249a1f3 - Add mounting support
- Fix minor issues
2025-08-04 16:57:09 +01:00
Mukhtar Akere
a60d93677f Fix config.html 2025-07-24 03:07:20 +01:00
Mukhtar Akere
9c31ad266e Fix config.html 2025-07-24 03:03:18 +01:00
Mukhtar Akere
3d2fcf5656 Fix superflous header, other minor bugs 2025-07-21 20:35:49 +01:00
Mukhtar Akere
afe577bf2f - Fix repair bugs
- Minor html/js bugs from new template
- Other minor issues
2025-07-13 06:30:02 +01:00
Mukhtar Akere
604402250e hotfix login and registration 2025-07-12 00:57:48 +01:00
Mukhtar Akere
74615a80ff Fix config.js 2025-07-11 13:17:43 +01:00
Sadman Sakib
b901bd5175 Feature/torbox provider improvements (#100)
- Add Torbox WebDAV implementation
- Fix Issues with sample and extension checks
2025-07-11 13:17:03 +01:00
Mukhtar Akere
8c56e59107 Fix some UI bugs; colors etc 2025-07-11 06:03:11 +01:00
Mukhtar Akere
b8b9e76753 Add seeders, add Remove selected from debrid button 2025-07-10 15:15:02 +01:00
Mukhtar Akere
6fb54d322e Fix dockerignore 2025-07-10 02:31:30 +01:00
Mukhtar Akere
cf61546bec Move to tailwind-build instead of CDNs 2025-07-10 02:17:35 +01:00
Mukhtar Akere
c72867ff57 Testing a new UI 2025-07-09 20:08:09 +01:00
Mukhtar Akere
fa6920f94a Merge branch 'beta'
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-07-09 05:14:39 +01:00
Mukhtar Akere
dba5604d79 fix refresh rclone http client 2025-07-07 00:08:48 +01:00
iPromKnight
f656b7e4e2 feat: Allow deleting all __bad__ with a single button (#98) 2025-07-04 20:13:12 +01:00
Mukhtar Akere
c7b07137c5 Fix repair bug 2025-07-03 23:36:30 +01:00
Mukhtar Akere
c0aa4eaeba Fix modtime bug 2025-07-02 01:17:31 +01:00
Mukhtar Akere
2c90e518aa fix playback issues 2025-07-01 16:10:23 +01:00
Mukhtar Akere
dec7d93272 fix streaming 2025-07-01 15:28:19 +01:00
Mukhtar Akere
8d092615db Update stream client; Add repair strategy 2025-07-01 04:42:33 +01:00
iPromKnight
a4ee0973cc fix: AllDebrid webdav compatibility, and uncached downloads (#97) 2025-07-01 04:10:21 +01:00
Mukhtar Akere
ab12610346 Merge branch 'beta'
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-06-26 21:15:22 +01:00
Mukhtar Akere
1d19be9013 hotfix repair html table 2025-06-26 07:31:12 +01:00
Mukhtar Akere
cee0e20fe1 hotfix repair and download rate limit 2025-06-26 06:08:50 +01:00
Mukhtar Akere
a3e698e04f Add repair and download rate limit 2025-06-26 05:45:20 +01:00
Mukhtar Akere
e123a2fd5e Hotfix issues with 1.0.3 2025-06-26 03:51:28 +01:00
Mukhtar Akere
817051589e Move to per-torrent repair; Fix issues issues with adding torrents 2025-06-23 18:54:52 +01:00
Mukhtar Akere
705de2d2bc Merge branch 'beta'
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-06-23 12:00:53 +01:00
Mukhtar Akere
54c421a480 Update Docs 2025-06-23 11:59:26 +01:00
Mukhtar Akere
1b98b994b7 Add size to arr ContentFile 2025-06-19 18:23:38 +01:00
Mukhtar Akere
06096c3748 Hotfix empty arr setup 2025-06-19 17:58:30 +01:00
Mukhtar Akere
7474011ef0 Update repair tool 2025-06-19 15:56:01 +01:00
Mukhtar Akere
086aa3b1ff Improve Arr integerations 2025-06-19 14:40:12 +01:00
Mukhtar Akere
c15e9d8f70 Updste repair 2025-06-18 12:44:05 +01:00
Mukhtar Akere
b2e99585f7 Fix issues with repair, move to a different streaming option 2025-06-18 10:42:44 +01:00
Mukhtar Akere
5661b05ec1 added CET timezone 2025-06-16 22:54:11 +01:00
Mukhtar Akere
b7226b21ec added CET timezone 2025-06-16 22:41:46 +01:00
Mukhtar Akere
605d5b81c2 Fix duration bug in config 2025-06-16 13:55:02 +01:00
Mukhtar Akere
8d87c602b9 - Add remove stalled torrent
- Few cleanup
2025-06-15 22:46:07 +01:00
Mukhtar Akere
7cf25f53e7 hotfix 2025-06-14 19:32:50 +01:00
Mukhtar Akere
22280f15cf cleanup torrent cache 2025-06-14 16:55:45 +01:00
Mukhtar Akere
a539aa53bd - Speed up repairs when checking links \n
- Remove run on start for repairs since it causes issues \n
- Add support for arr-specific debrid
- Support for queuing system
- Support for no-op when sending torrents to debrid
2025-06-14 16:09:28 +01:00
Mukhtar Akere
3efda45304 - IMplement multi-download api tokens
- Move things around a bit
2025-06-08 19:06:17 +01:00
Mukhtar Akere
5bf1dab5e6 Torrent Queuing for Botched torrent (#83)
* Implement a queue for handling failed torrent

* Add checks for getting slots

* Few other cleanups, change some function names
2025-06-07 17:23:41 +01:00
Mukhtar Akere
84603b084b Some improvements to beta 2025-06-07 10:03:01 +01:00
Mukhtar Akere
dfcf8708f1 final prep for 1.0.3 2025-06-03 10:45:23 +01:00
Mukhtar Akere
30a1dd74a7 Add Basic healtcheck 2025-06-02 20:45:39 +01:00
Mukhtar Akere
f041ef47a7 fix cloudflare, probably? 2025-06-02 20:04:41 +01:00
Mukhtar Akere
349a13468b fix cloudflare, maybe? 2025-06-02 15:44:03 +01:00
Mukhtar Akere
9c6c44d785 - Revamp decypharr arch \n
- Add callback_ur, download_folder to addContent API \n
- Fix few bugs \n
- More declarative UI keywords
- Speed up repairs
- Few other improvements/bug fixes
2025-06-02 12:57:36 +01:00
Mukhtar Akere
1cd09239f9 - Add more indepth stats like number of torrents, profile details etc
- Add torrent ingest endpoints
- Add issue template
2025-05-29 04:05:44 +01:00
Elias Benbourenane
f9c49cbbef Torrent list context menu (#40)
* feat: Torrent list context menu

* style: Leave more padding on the context menu for smaller screens
2025-05-28 07:29:18 -07:00
Mukhtar Akere
60b8d87f1c hotfix rar PR 2025-05-28 00:14:43 +01:00
Elias Benbourenane
fbd6cd5038 Random access for RARed RealDebrid torrents (#61)
* feat: AI translated port of RARAR.py in Go

* feat: Extract and cache byte ranges of RARed RD torrents

* feat: Stream and download files with byte ranges if specified

* refactor: Use a more structured data format for byte ranges

* fix: Rework streaming to fix error handling

* perf: More efficient RAR file pre-processing

* feat: Made the RAR unpacker an optional config option

* refactor: Remove unnecessary Rar prefix for more idiomatic code

* refactor: More appropriate private method declaration

* feat: Error handling for parsing RARed torrents with retry requests and EOF validation

* fix: Correctly parse unicode file names

* fix: Handle special character conversion for RAR torrent file names

* refactor: Removed debug logs

* feat: Only allow two concurrent RAR unpacking tasks

* fix: Include "<" and ">" as unsafe chars for RAR unpacking

* refactor: Seperate types into their own file

* refactor: Don't read RAR files on reader initialization
2025-05-27 16:10:23 -07:00
Mukhtar Akere
87bf8d0574 Merge branch 'beta'
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-05-27 23:45:13 +01:00
Mukhtar Akere
7f25599b60 - Add support for per-file deletion
- Per-file repair instead of per-torrent
- Fix issues with LoadLocation
- Fix other minor bug fixes woth torbox
2025-05-27 19:31:19 +01:00
176 changed files with 22073 additions and 7962 deletions

View File

@@ -7,16 +7,16 @@ tmp_dir = "tmp"
bin = "./tmp/main"
cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/decypharr/pkg/version.Version=0.0.0 -X github.com/sirrobot01/decypharr/pkg/version.Channel=dev\" -o ./tmp/main .'"
delay = 1000
exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"]
exclude_dir = ["tmp", "vendor", "testdata", "data", "logs", "docs", "dist", "node_modules", ".ven"]
exclude_file = []
exclude_regex = ["_test.go"]
exclude_unchanged = false
follow_symlink = false
full_bin = ""
include_dir = []
include_ext = ["go", "tpl", "tmpl", "html", ".json"]
include_ext = ["go", "tpl", "tmpl", "html", ".json", ".js", ".css"]
include_file = []
kill_delay = "0s"
kill_delay = "1s"
log = "build-errors.log"
poll = false
poll_interval = 0
@@ -24,8 +24,8 @@ tmp_dir = "tmp"
pre_cmd = []
rerun = false
rerun_delay = 500
send_interrupt = false
stop_on_error = false
send_interrupt = true
stop_on_error = true
[color]
app = ""

View File

@@ -11,3 +11,23 @@ torrents.json
*.json
.ven/**
docs/**
# Don't copy node modules
node_modules/
# Don't copy development files
.git/
.gitignore
*.md
.env*
*.log
# Build artifacts
decypharr
healthcheck
*.exe
.venv/
data/**
.stignore
.stfolder/**

76
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View File

@@ -0,0 +1,76 @@
name: Bug Report
description: 'Report a new bug'
labels: ['Type: Bug', 'Status: Needs Triage']
body:
- type: checkboxes
attributes:
label: Is there an existing issue for this?
description: Please search to see if an open or closed issue already exists for the bug you encountered. If a bug exists and is closed note that it may only be fixed in an unstable branch.
options:
- label: I have searched the existing open and closed issues
required: true
- type: textarea
attributes:
label: Current Behavior
description: A concise description of what you're experiencing.
validations:
required: true
- type: textarea
attributes:
label: Expected Behavior
description: A concise description of what you expected to happen.
validations:
required: true
- type: textarea
attributes:
label: Steps To Reproduce
description: Steps to reproduce the behavior.
placeholder: |
1. In this environment...
2. With this config...
3. Run '...'
4. See error...
validations:
required: false
- type: textarea
attributes:
label: Environment
description: |
examples:
- **OS**: Ubuntu 20.04
- **Version**: v1.0.0
- **Docker Install**: Yes
- **Browser**: Firefox 90 (If UI related)
value: |
- OS:
- Version:
- Docker Install:
- Browser:
render: markdown
validations:
required: true
- type: dropdown
attributes:
label: What branch are you running?
options:
- Main/Latest
- Beta
- Experimental
validations:
required: true
- type: textarea
attributes:
label: Trace Logs? **Not Optional**
description: |
Trace Logs
- are **required** for bug reports
- are not optional
validations:
required: true
- type: checkboxes
attributes:
label: Trace Logs have been provided as applicable
description: Trace logs are **generally required** and are not optional for all bug reports and contain `trace`. Info logs are invalid for bug reports and do not contain `debug` nor `trace`
options:
- label: I have read and followed the steps in the documentation link and provided the required trace logs - the logs contain `trace` - that are relevant and show this issue.
required: true

View File

@@ -0,0 +1,38 @@
name: Feature Request
description: 'Suggest an idea for Decypharr'
labels: ['Type: Feature Request', 'Status: Needs Triage']
body:
- type: checkboxes
attributes:
label: Is there an existing issue for this?
description: Please search to see if an open or closed issue already exists for the feature you are requesting. If a request exists and is closed note that it may only be fixed in an unstable branch.
options:
- label: I have searched the existing open and closed issues
required: true
- type: textarea
attributes:
label: Is your feature request related to a problem? Please describe
description: A clear and concise description of what the problem is.
validations:
required: true
- type: textarea
attributes:
label: Describe the solution you'd like
description: A clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
attributes:
label: Describe alternatives you've considered
description: A clear and concise description of any alternative solutions or features you've considered.
validations:
required: true
- type: textarea
attributes:
label: Anything else?
description: |
Links? References? Mockups? Anything that will give us more context about the feature you are encountering!
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
validations:
required: true

View File

@@ -24,5 +24,5 @@ jobs:
path: .cache
restore-keys: |
mkdocs-material-
- run: pip install mkdocs-material
- run: cd docs && pip install -r requirements.txt
- run: cd docs && mkdocs gh-deploy --force

7
.gitignore vendored
View File

@@ -12,4 +12,9 @@ tmp/**
torrents.json
logs/**
auth.json
.ven/
.ven/
.env
node_modules/
.venv/
.stignore
.stfolder/**

View File

@@ -29,38 +29,49 @@ RUN --mount=type=cache,target=/go/pkg/mod \
go build -trimpath -ldflags="-w -s" \
-o /healthcheck cmd/healthcheck/main.go
# Stage 2: Create directory structure
FROM alpine:3.19 as dirsetup
RUN mkdir -p /app/logs && \
mkdir -p /app/cache && \
chmod 777 /app/logs && \
touch /app/logs/decypharr.log && \
chmod 666 /app/logs/decypharr.log
# Stage 2: Final image
FROM alpine:latest
# Stage 3: Final image
FROM gcr.io/distroless/static-debian12:nonroot
ARG VERSION=0.0.0
ARG CHANNEL=dev
LABEL version = "${VERSION}-${CHANNEL}"
LABEL org.opencontainers.image.source = "https://github.com/sirrobot01/decypharr"
LABEL org.opencontainers.image.title = "decypharr"
LABEL org.opencontainers.image.authors = "sirrobot01"
LABEL org.opencontainers.image.documentation = "https://github.com/sirrobot01/decypharr/blob/main/README.md"
# Copy binaries
COPY --from=builder --chown=nonroot:nonroot /decypharr /usr/bin/decypharr
COPY --from=builder --chown=nonroot:nonroot /healthcheck /usr/bin/healthcheck
# Install dependencies including rclone
RUN apk add --no-cache fuse3 ca-certificates su-exec shadow curl unzip && \
echo "user_allow_other" >> /etc/fuse.conf && \
case "$(uname -m)" in \
x86_64) ARCH=amd64 ;; \
aarch64) ARCH=arm64 ;; \
armv7l) ARCH=arm ;; \
*) echo "Unsupported architecture: $(uname -m)" && exit 1 ;; \
esac && \
curl -O "https://downloads.rclone.org/rclone-current-linux-${ARCH}.zip" && \
unzip "rclone-current-linux-${ARCH}.zip" && \
cp rclone-*/rclone /usr/local/bin/ && \
chmod +x /usr/local/bin/rclone && \
rm -rf rclone-* && \
apk del curl unzip
# Copy pre-made directory structure
COPY --from=dirsetup --chown=nonroot:nonroot /app /app
# Copy binaries and entrypoint
COPY --from=builder /decypharr /usr/bin/decypharr
COPY --from=builder /healthcheck /usr/bin/healthcheck
COPY scripts/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Metadata
# Set environment variables
ENV PUID=1000
ENV PGID=1000
ENV LOG_PATH=/app/logs
EXPOSE 8282
VOLUME ["/app"]
USER nonroot:nonroot
HEALTHCHECK --interval=3s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app"]
HEALTHCHECK --interval=10s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app", "--basic"]
ENTRYPOINT ["/entrypoint.sh"]
CMD ["/usr/bin/decypharr", "--config", "/app"]

View File

@@ -6,16 +6,16 @@
## What is Decypharr?
Decypharr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications while leveraging the capabilities of Debrid providers.
Decypharr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications.
## Features
- 🔄 Mock Qbittorent API that supports the Arrs (Sonarr, Radarr, Lidarr etc)
- 🖥️ Full-fledged UI for managing torrents
- 🛡️ Proxy support for filtering out un-cached Debrid torrents
- 🔌 Multiple Debrid providers support
- 📁 WebDAV server support for each debrid provider
- 🔧 Repair Worker for missing files
- Mock Qbittorent API that supports the Arrs (Sonarr, Radarr, Lidarr etc)
- Full-fledged UI for managing torrents
- Multiple Debrid providers support
- WebDAV server support for each debrid provider
- Optional mounting of WebDAV to your system(using [Rclone](https://rclone.org/))
- Repair Worker for missing files
## Supported Debrid Providers
@@ -29,22 +29,22 @@ Decypharr combines the power of QBittorrent with popular Debrid services to enha
### Docker (Recommended)
```yaml
version: '3.7'
services:
decypharr:
image: cy01/blackhole:latest # or cy01/blackhole:beta
image: cy01/blackhole:latest
container_name: decypharr
ports:
- "8282:8282" # qBittorrent
user: "1000:1000"
- "8282:8282"
volumes:
- /mnt/:/mnt
- /mnt/:/mnt:rshared
- ./configs/:/app # config.json must be in this directory
environment:
- PUID=1000
- PGID=1000
- UMASK=002
restart: unless-stopped
devices:
- /dev/fuse:/dev/fuse:rwm
cap_add:
- SYS_ADMIN
security_opt:
- apparmor:unconfined
```
## Documentation
@@ -62,25 +62,7 @@ The documentation includes:
## Basic Configuration
```json
{
"debrids": [
{
"name": "realdebrid",
"api_key": "your_api_key_here",
"folder": "/mnt/remote/realdebrid/__all__/",
"use_webdav": true
}
],
"qbittorrent": {
"download_folder": "/mnt/symlinks/",
"categories": ["sonarr", "radarr"]
},
"use_auth": false,
"log_level": "info",
"port": "8282"
}
```
You can configure Decypharr through the Web UI or by editing the `config.json` file directly.
## Contributing

View File

@@ -7,11 +7,10 @@ import (
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/qbit"
"github.com/sirrobot01/decypharr/pkg/server"
"github.com/sirrobot01/decypharr/pkg/service"
"github.com/sirrobot01/decypharr/pkg/version"
"github.com/sirrobot01/decypharr/pkg/web"
"github.com/sirrobot01/decypharr/pkg/webdav"
"github.com/sirrobot01/decypharr/pkg/worker"
"github.com/sirrobot01/decypharr/pkg/wire"
"net/http"
"os"
"runtime"
@@ -41,6 +40,7 @@ func Start(ctx context.Context) error {
svcCtx, cancelSvc := context.WithCancel(ctx)
defer cancelSvc()
// Create the logger path if it doesn't exist
for {
cfg := config.Get()
_log := logger.Default()
@@ -62,7 +62,7 @@ func Start(ctx context.Context) error {
qb := qbit.New()
wd := webdav.New()
ui := web.New(qb).Routes()
ui := web.New().Routes()
webdavRoutes := wd.Routes()
qbitRoutes := qb.Routes()
@@ -74,9 +74,17 @@ func Start(ctx context.Context) error {
}
srv := server.New(handlers)
reset := func() {
// Reset the store and services
qb.Reset()
wire.Reset()
// refresh GC
runtime.GC()
}
done := make(chan struct{})
go func(ctx context.Context) {
if err := startServices(ctx, wd, srv); err != nil {
if err := startServices(ctx, cancelSvc, wd, srv); err != nil {
_log.Error().Err(err).Msg("Error starting services")
cancelSvc()
}
@@ -88,27 +96,23 @@ func Start(ctx context.Context) error {
// graceful shutdown
cancelSvc() // propagate to services
<-done // wait for them to finish
_log.Info().Msg("Decypharr has been stopped gracefully.")
reset() // reset store and services
return nil
case <-restartCh:
cancelSvc() // tell existing services to shut down
_log.Info().Msg("Restarting Decypharr...")
<-done // wait for them to finish
qb.Reset()
service.Reset()
_log.Info().Msg("Decypharr has been restarted.")
reset() // reset store and services
// rebuild svcCtx off the original parent
svcCtx, cancelSvc = context.WithCancel(ctx)
runtime.GC()
config.Reload()
service.Reset()
// loop will restart services automatically
}
}
}
func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) error {
func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav.WebDav, srv *server.Server) error {
var wg sync.WaitGroup
errChan := make(chan error)
@@ -145,23 +149,20 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e
return srv.Start(ctx)
})
// Start rclone RC server if enabled
safeGo(func() error {
return worker.Start(ctx)
})
safeGo(func() error {
arr := service.GetService().Arr
if arr == nil {
rcManager := wire.Get().RcloneManager()
if rcManager == nil {
return nil
}
return arr.StartSchedule(ctx)
return rcManager.Start(ctx)
})
if cfg := config.Get(); cfg.Repair.Enabled {
safeGo(func() error {
r := service.GetService().Repair
if r != nil {
if err := r.Start(ctx); err != nil {
repair := wire.Get().Repair()
if repair != nil {
if err := repair.Start(ctx); err != nil {
_log.Error().Err(err).Msg("repair failed")
}
}
@@ -169,6 +170,11 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e
})
}
safeGo(func() error {
wire.Get().StartWorkers(ctx)
return nil
})
go func() {
wg.Wait()
close(errChan)
@@ -178,7 +184,11 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e
for err := range errChan {
if err != nil {
_log.Error().Err(err).Msg("Service error detected")
// Don't shut down the whole app
// If the error is critical, return it to stop the main loop
if ctx.Err() == nil {
_log.Error().Msg("Stopping services due to error")
cancelSvc() // Cancel the service context to stop all services
}
}
}
}()

View File

@@ -22,8 +22,14 @@ type HealthStatus struct {
}
func main() {
var configPath string
var (
configPath string
isBasicCheck bool
debug bool
)
flag.StringVar(&configPath, "config", "/data", "path to the data folder")
flag.BoolVar(&isBasicCheck, "basic", false, "perform basic health check without WebDAV")
flag.BoolVar(&debug, "debug", false, "enable debug mode for detailed output")
flag.Parse()
config.SetConfigPath(configPath)
cfg := config.Get()
@@ -63,16 +69,17 @@ func main() {
status.WebUI = true
}
// Check WebDAV if enabled
if webdavPath != "" {
if checkWebDAV(ctx, baseUrl, port, webdavPath) {
if isBasicCheck {
status.WebDAVService = checkBaseWebdav(ctx, baseUrl, port)
} else {
// If not a basic check, check WebDAV with debrid path
if webdavPath != "" {
status.WebDAVService = checkDebridWebDAV(ctx, baseUrl, port, webdavPath)
} else {
// If no WebDAV path is set, consider it healthy
status.WebDAVService = true
}
} else {
// If WebDAV is not enabled, consider it healthy
status.WebDAVService = true
}
// Determine overall status
// Consider the application healthy if core services are running
status.OverallStatus = status.QbitAPI && status.WebUI
@@ -81,7 +88,7 @@ func main() {
}
// Optional: output health status as JSON for logging
if os.Getenv("DEBUG") == "true" {
if debug {
statusJSON, _ := json.MarshalIndent(status, "", " ")
fmt.Println(string(statusJSON))
}
@@ -132,7 +139,24 @@ func checkWebUI(ctx context.Context, baseUrl, port string) bool {
return resp.StatusCode == http.StatusOK
}
func checkWebDAV(ctx context.Context, baseUrl, port, path string) bool {
func checkBaseWebdav(ctx context.Context, baseUrl, port string) bool {
url := fmt.Sprintf("http://localhost:%s%swebdav/", port, baseUrl)
req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil)
if err != nil {
return false
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false
}
defer resp.Body.Close()
return resp.StatusCode == http.StatusMultiStatus ||
resp.StatusCode == http.StatusOK
}
func checkDebridWebDAV(ctx context.Context, baseUrl, port, path string) bool {
url := fmt.Sprintf("http://localhost:%s%swebdav/%s", port, baseUrl, path)
req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil)
if err != nil {
@@ -145,5 +169,7 @@ func checkWebDAV(ctx context.Context, baseUrl, port, path string) bool {
}
defer resp.Body.Close()
return resp.StatusCode == 207 || resp.StatusCode == http.StatusOK
return resp.StatusCode == http.StatusMultiStatus ||
resp.StatusCode == http.StatusOK
}

418
docs/docs/api-spec.yaml Normal file
View File

@@ -0,0 +1,418 @@
openapi: 3.0.3
info:
title: Decypharr API
description: QbitTorrent with Debrid Support API
version: 1.0.0
contact:
name: Decypharr
url: https://github.com/sirrobot01/decypharr
servers:
- url: /api
description: API endpoints
security:
- cookieAuth: []
- bearerAuth: []
paths:
/arrs:
get:
summary: Get all configured Arrs
description: Retrieve a list of all configured Arr applications (Sonarr, Radarr, etc.)
tags:
- Arrs
responses:
'200':
description: Successfully retrieved Arrs
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/Arr'
/add:
post:
summary: Add content for processing
description: Add torrent files or magnet links for processing through debrid services
tags:
- Content
requestBody:
content:
multipart/form-data:
schema:
type: object
properties:
arr:
type: string
description: Name of the Arr application
action:
type: string
description: Action to perform
debrid:
type: string
description: Debrid service to use
callbackUrl:
type: string
description: Optional callback URL
downloadFolder:
type: string
description: Download folder path
downloadUncached:
type: boolean
description: Whether to download uncached content
urls:
type: string
description: Newline-separated URLs or magnet links
files:
type: array
items:
type: string
format: binary
description: Torrent files to upload
responses:
'200':
description: Content added successfully
content:
application/json:
schema:
type: object
properties:
results:
type: array
items:
$ref: '#/components/schemas/ImportRequest'
errors:
type: array
items:
type: string
'400':
description: Bad request
/repair:
post:
summary: Repair media
description: Start a repair process for specified media items
tags:
- Repair
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/RepairRequest'
responses:
'200':
description: Repair started or completed
content:
application/json:
schema:
type: string
'400':
description: Bad request
'404':
description: Arr not found
'500':
description: Internal server error
/repair/jobs:
get:
summary: Get repair jobs
description: Retrieve all repair jobs
tags:
- Repair
responses:
'200':
description: Successfully retrieved repair jobs
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/RepairJob'
delete:
summary: Delete repair jobs
description: Delete multiple repair jobs by IDs
tags:
- Repair
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
ids:
type: array
items:
type: string
required:
- ids
responses:
'200':
description: Jobs deleted successfully
'400':
description: Bad request
/repair/jobs/{id}/process:
post:
summary: Process repair job
description: Process a specific repair job by ID
tags:
- Repair
parameters:
- name: id
in: path
required: true
schema:
type: string
description: Job ID
responses:
'200':
description: Job processed successfully
'400':
description: Bad request
/repair/jobs/{id}/stop:
post:
summary: Stop repair job
description: Stop a running repair job by ID
tags:
- Repair
parameters:
- name: id
in: path
required: true
schema:
type: string
description: Job ID
responses:
'200':
description: Job stopped successfully
'400':
description: Bad request
'500':
description: Internal server error
/torrents:
get:
summary: Get all torrents
description: Retrieve all torrents sorted by added date
tags:
- Torrents
responses:
'200':
description: Successfully retrieved torrents
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/Torrent'
delete:
summary: Delete multiple torrents
description: Delete multiple torrents by hash list
tags:
- Torrents
parameters:
- name: hashes
in: query
required: true
schema:
type: string
description: Comma-separated list of torrent hashes
- name: removeFromDebrid
in: query
schema:
type: boolean
default: false
description: Whether to remove from debrid service
responses:
'200':
description: Torrents deleted successfully
'400':
description: Bad request
/torrents/{category}/{hash}:
delete:
summary: Delete single torrent
description: Delete a specific torrent by category and hash
tags:
- Torrents
parameters:
- name: category
in: path
required: true
schema:
type: string
description: Torrent category
- name: hash
in: path
required: true
schema:
type: string
description: Torrent hash
- name: removeFromDebrid
in: query
schema:
type: boolean
default: false
description: Whether to remove from debrid service
responses:
'200':
description: Torrent deleted successfully
'400':
description: Bad request
components:
securitySchemes:
cookieAuth:
type: apiKey
in: cookie
name: auth-session
bearerAuth:
type: http
scheme: bearer
bearerFormat: token
description: API token for authentication
schemas:
Arr:
type: object
properties:
name:
type: string
description: Name of the Arr application
host:
type: string
description: Host URL of the Arr application
token:
type: string
description: API token for the Arr application
cleanup:
type: boolean
description: Whether to cleanup after processing
skipRepair:
type: boolean
description: Whether to skip repair operations
downloadUncached:
type: boolean
description: Whether to download uncached content
selectedDebrid:
type: string
description: Selected debrid service
source:
type: string
description: Source of the Arr configuration
ImportRequest:
type: object
properties:
debridName:
type: string
description: Name of the debrid service
downloadFolder:
type: string
description: Download folder path
magnet:
type: string
description: Magnet link
arr:
$ref: '#/components/schemas/Arr'
action:
type: string
description: Action to perform
downloadUncached:
type: boolean
description: Whether to download uncached content
callbackUrl:
type: string
description: Callback URL
importType:
type: string
description: Type of import (API, etc.)
RepairRequest:
type: object
properties:
arrName:
type: string
description: Name of the Arr application
mediaIds:
type: array
items:
type: string
description: List of media IDs to repair
autoProcess:
type: boolean
description: Whether to auto-process the repair
async:
type: boolean
description: Whether to run repair asynchronously
required:
- arrName
RepairJob:
type: object
properties:
id:
type: string
description: Job ID
status:
type: string
description: Job status
arrName:
type: string
description: Associated Arr application
mediaIds:
type: array
items:
type: string
description: Media IDs being repaired
createdAt:
type: string
format: date-time
description: Job creation timestamp
Torrent:
type: object
properties:
hash:
type: string
description: Torrent hash
name:
type: string
description: Torrent name
category:
type: string
description: Torrent category
addedOn:
type: string
format: date-time
description: Date when torrent was added
size:
type: integer
description: Torrent size in bytes
progress:
type: number
format: float
description: Download progress (0-1)
status:
type: string
description: Torrent status
tags:
- name: Arrs
description: Arr application management
- name: Content
description: Content addition and processing
- name: Repair
description: Media repair operations
- name: Torrents
description: Torrent management
- name: Configuration
description: Application configuration
- name: Authentication
description: API token management

90
docs/docs/api.md Normal file
View File

@@ -0,0 +1,90 @@
# API Documentation
Decypharr provides a RESTful API for managing torrents, debrid services, and Arr integrations. The API requires authentication and all endpoints are prefixed with `/api`.
## Authentication
The API supports two authentication methods:
### 1. Session-based Authentication (Cookies)
Log in through the web interface (`/login`) to establish an authenticated session. The session cookie (`auth-session`) will be automatically included in subsequent API requests from the same browser session.
### 2. API Token Authentication (Bearer Token)
Use API tokens for programmatic access. Include the token in the `Authorization` header for each request:
- `Authorization: Bearer <your-token>`
## Interactive API Documentation
<swagger-ui src="api-spec.yaml"/>
## API Endpoints Overview
### Arrs Management
- `GET /api/arrs` - Get all configured Arr applications (Sonarr, Radarr, etc.)
### Content Management
- `POST /api/add` - Add torrent files or magnet links for processing through debrid services
### Repair Operations
- `POST /api/repair` - Start repair process for media items
- `GET /api/repair/jobs` - Get all repair jobs
- `POST /api/repair/jobs/{id}/process` - Process a specific repair job
- `POST /api/repair/jobs/{id}/stop` - Stop a running repair job
- `DELETE /api/repair/jobs` - Delete multiple repair jobs
### Torrent Management
- `GET /api/torrents` - Get all torrents
- `DELETE /api/torrents/{category}/{hash}` - Delete a specific torrent
- `DELETE /api/torrents/` - Delete multiple torrents
## Usage Examples
### Adding Content via API
#### Using API Token:
```bash
curl -H "Authorization: Bearer $API_TOKEN" -X POST http://localhost:8080/api/add \
-F "arr=sonarr" \
-F "debrid=realdebrid" \
-F "urls=magnet:?xt=urn:btih:..." \
-F "downloadUncached=true"
-F "file=@/path/to/torrent/file.torrent"
-F "callbackUrl=http://your.callback.url/endpoint"
```
#### Using Session Cookies:
```bash
# Login first (this sets the session cookie)
curl -c cookies.txt -X POST http://localhost:8080/login \
-H "Content-Type: application/json" \
-d '{"username": "your_username", "password": "your_password"}'
# Then use the session cookie for API calls
curl -b cookies.txt -X POST http://localhost:8080/api/add \
-F "arr=sonarr" \
-F "debrid=realdebrid" \
-F "urls=magnet:?xt=urn:btih:..." \
-F "downloadUncached=true"
```
### Getting Torrents
```bash
# With API token
curl -H "Authorization: Bearer $API_TOKEN" -X GET http://localhost:8080/api/torrents
```
### Starting a Repair Job
```bash
# With API token
curl -H "Authorization: Bearer $API_TOKEN" -X POST http://localhost:8080/api/repair \
-H "Content-Type: application/json" \
-d '{
"arrName": "sonarr",
"mediaIds": ["123", "456"],
"autoProcess": true,
"async": true
}'
```

View File

@@ -1,186 +0,0 @@
# Changelog
## 1.0.0
- Add WebDAV support for debrid providers
- Some refactoring and code cleanup
- Fixes
- Fix Alldebrid not downloading torrents
- Fix Alldebrid not downloading uncached torrents
- Fix uncached torrents not being downloaded for RealDebrid
- Add support for multiple download API keys for debrid providers
- Add support for editable config.json via the UI
- Fix downloading timeout
- Fix UMASK for Windows
- Retries 50x(except 503) errors for RD
## 0.5.0
- A more refined repair worker (with more control)
- UI Improvements
- Pagination for torrents
- Dark mode
- Ordered torrents table
- Fix Arr API flaky behavior
- Discord Notifications
- Minor bug fixes
- Add Tautulli support
- playback_failed event triggers a repair
- Miscellaneous improvements
- Add an option to skip the repair worker for a specific arr
- Arr specific uncached downloading option
- Option to download uncached torrents from UI
- Remove QbitTorrent Log level (Use the global log level)
## 0.4.2
- Hotfixes
- Fix saving torrents error
- Fix bugs with the UI
- Speed improvements
## 0.4.1
- Adds optional UI authentication
- Downloaded Torrents persist on restart
- Fixes
- Fix Alldebrid struggling to find the correct file
- Minor bug fixes or speed-gains
- A new cleanup worker to clean up ARR queues
## 0.4.0
- Add support for multiple debrid providers
- A full-fledged UI for adding torrents, repairing files, viewing config and managing torrents
- Fix issues with Alldebrid
- Fix file transversal bug
- Fix files with no parent directory
- Logging
- Add a more robust logging system
- Add logging to a file
- Add logging to the UI
- Qbittorrent
- Add support for tags (creating, deleting, listing)
- Add support for categories (creating, deleting, listing)
- Fix issues with arr sending torrents using a different content type
## 0.3.3
- Add AllDebrid Support
- Fix Torbox not downloading uncached torrents
- Fix Rar files being downloaded
## 0.3.2
- Fix DebridLink not downloading
- Fix Torbox with uncached torrents
- Add new /internal/cached endpoint to check if an hash is cached
- Implement per-debrid local cache
- Fix file check for torbox
- Other minor bug fixes
## 0.3.1
- Add DebridLink Support
- Refactor error handling
## 0.3.0
- Add UI for adding torrents
- Refraction of the code
- Fix Torbox bug
- Update CI/CD
- Update Readme
## 0.2.7
- Add support for multiple debrid providers
- Add Torbox support
- Add support for configurable debrid cache checks
- Add support for configurable debrid download uncached torrents
## 0.2.6
- Delete torrent for empty matched files
- Update Readme
## 0.2.5
- Fix ContentPath not being set prior
- Rewrote Readme
- Cleaned up the code
## 0.2.4
- Add file download support (Sequential Download)
- Fix http handler error
- Fix *arrs map failing concurrently
- Fix cache not being updated
## 0.2.3
- Delete uncached items from RD
- Fail if the torrent is not cached (optional)
- Fix cache not being updated
## 0.2.2
- Fix name mismatch in the cache
- Fix directory mapping with mounts
- Add Support for refreshing the *arrs
## 0.2.1
- Fix Uncached torrents not being downloaded/downloaded
- Minor bug fixed
- Fix Race condition in the cache and file system
## 0.2.0
- Implement 0.2.0-beta changes
- Removed Blackhole
- Added QbitTorrent API
- Cleaned up the code
## 0.2.0-beta
- Switch to QbitTorrent API instead of Blackhole
- Rewrote the whole codebase
## 0.1.4
- Rewrote Report log
- Fix YTS, 1337x not grabbing infohash
- Fix Torrent symlink bug
## 0.1.3
- Searching for infohashes in the xml description/summary/comments
- Added local cache support
- Added max cache size
- Rewrite blackhole.go
- Bug fixes
- Fixed indexer getting disabled
- Fixed blackhole not working
## 0.1.2
- Bug fixes
- Code cleanup
- Get available hashes at once
## 0.1.1
- Added support for "No Blackhole" for Arrs
- Added support for "Cached Only" for Proxy
- Bug Fixes
## 0.1.0
- Initial Release
- Added Real Debrid Support
- Added Arrs Support
- Added Proxy Support
- Added Basic Authentication for Proxy
- Added Rate Limiting for Debrid Providers

View File

@@ -1,77 +0,0 @@
# Arr Applications Configuration
Decypharr can integrate directly with Sonarr, Radarr, and other Arr applications. This section explains how to configure the Arr integration in your `config.json` file.
## Basic Configuration
The Arr applications are configured under the `arrs` key:
```json
"arrs": [
{
"name": "sonarr",
"host": "http://sonarr:8989",
"token": "your-sonarr-api-key",
"cleanup": true
},
{
"name": "radarr",
"host": "http://radarr:7878",
"token": "your-radarr-api-key",
"cleanup": true
}
]
```
### !!! note
This configuration is optional if you've already set up the qBittorrent client in your Arr applications with the correct host and token information. It's particularly useful for the Repair Worker functionality.
### Configuration Options
Each Arr application supports the following options:
- `name`: The name of the Arr application, which should match the category in qBittorrent
- `host`: The host URL of the Arr application, including protocol and port
- `token`: The API token/key of the Arr application
- `cleanup`: Whether to clean up the Arr queue (removes completed downloads). This is only useful for Sonarr.
- `skip_repair`: Automated repair will be skipped for this *arr.
- `download_uncached`: Whether to download uncached torrents (defaults to debrid/manual setting)
### Finding Your API Key
#### Sonarr/Radarr/Lidarr
1. Go to Sonarr > Settings > General
2. Look for "API Key" in the "Security" section
3. Copy the API key
### Multiple Arr Applications
You can configure multiple Arr applications by adding more entries to the arrs array:
```json
"arrs": [
{
"name": "sonarr",
"host": "http://sonarr:8989",
"token": "your-sonarr-api-key",
"cleanup": true
},
{
"name": "sonarr-anime",
"host": "http://sonarr-anime:8989",
"token": "your-sonarr-anime-api-key",
"cleanup": true
},
{
"name": "radarr",
"host": "http://radarr:7878",
"token": "your-radarr-api-key",
"cleanup": false
},
{
"name": "lidarr",
"host": "http://lidarr:8686",
"token": "your-lidarr-api-key",
"cleanup": false
}
]
```

View File

@@ -1,131 +0,0 @@
# Debrid Providers Configuration
Decypharr supports multiple Debrid providers. This section explains how to configure each provider in your `config.json` file.
## Basic Configuration
Each Debrid provider is configured in the `debrids` array:
```json
"debrids": [
{
"name": "realdebrid",
"api_key": "your-api-key",
"folder": "/mnt/remote/realdebrid/__all__/",
},
{
"name": "alldebrid",
"api_key": "your-api-key",
"folder": "/mnt/remote/alldebrid/downloads/"
}
]
```
### Provider Options
Each Debrid provider accepts the following configuration options:
#### Basic(Required) Options
- `name`: The name of the Debrid provider (realdebrid, alldebrid, debridlink, torbox)
- `host`: The API endpoint of the Debrid provider
- `api_key`: Your API key for the Debrid service (can be comma-separated for multiple keys)
- `folder`: The folder where your Debrid content is mounted (via webdav, rclone, zurg, etc.)
#### Advanced Options
- `rate_limit`: Rate limit for API requests (null by default)
- `download_uncached`: Whether to download uncached torrents (disabled by default)
- `check_cached`: Whether to check if torrents are cached (disabled by default)
- `use_webdav`: Whether to create a WebDAV server for this Debrid provider (disabled by default)
- `proxy`: Proxy URL for the Debrid provider (optional)
#### WebDAV and Rclone Options
- `torrents_refresh_interval`: Interval for refreshing torrent data (e.g., `15s`, `1m`, `1h`).
- `download_links_refresh_interval`: Interval for refreshing download links (e.g., `40m`, `1h`).
- `workers`: Number of concurrent workers for processing requests.
- `serve_from_rclone`: Whether to serve files directly from Rclone (disabled by default)
- `add_samples`: Whether to add sample files when adding torrents to debrid (disabled by default)
- `folder_naming`: Naming convention for folders:
- `original_no_ext`: Original file name without extension
- `original`: Original file name with extension
- `filename`: Torrent filename
- `filename_no_ext`: Torrent filename without extension
- `id`: Torrent ID
- `hash`: Torrent hash
- `auto_expire_links_after`: Time after which download links will expire (e.g., `3d`, `1w`).
- `rc_url`, `rc_user`, `rc_pass`, `rc_refresh_dirs`: Rclone RC configuration for VFS refreshes
- `directories`: A map of virtual folders to serve via the webDAV server. The key is the virtual folder name, and the values are map of filters and their value
#### Example of `directories` configuration
```json
"directories": {
"Newly Added": {
"filters": {
"exclude": "9-1-1",
"last_added": "20h"
}
},
"Spiderman Collection": {
"filters": {
"regex": "(?i)spider[-\\s]?man(\\s+collection|\\s+\\d|\\s+trilogy|\\s+complete|\\s+ultimate|\\s+box\\s+set|:?\\s+homecoming|:?\\s+far\\s+from\\s+home|:?\\s+no\\s+way\\s+home)"
}
}
}
```
### Example Configuration
#### Real Debrid
```json
{
"name": "realdebrid",
"api_key": "your-api-key",
"folder": "/mnt/remote/realdebrid/__all__/",
"rate_limit": null,
"download_uncached": false,
"use_webdav": true
}
```
#### All Debrid
```json
{
"name": "alldebrid",
"api_key": "your-api-key",
"folder": "/mnt/remote/alldebrid/torrents/",
"rate_limit": null,
"download_uncached": false,
"use_webdav": true
}
```
#### Debrid Link
```json
{
"name": "debridlink",
"api_key": "your-api-key",
"folder": "/mnt/remote/debridlink/torrents/",
"rate_limit": null,
"download_uncached": false,
"use_webdav": true
}
```
#### Torbox
```json
{
"name": "torbox",
"api_key": "your-api-key",
"folder": "/mnt/remote/torbox/torrents/",
"rate_limit": null,
"download_uncached": false,
"use_webdav": true
}
```

View File

@@ -1,81 +0,0 @@
# General Configuration
This section covers the basic configuration options for Decypharr that apply to the entire application.
## Basic Settings
Here are the fundamental configuration options:
```json
{
"use_auth": false,
"port": 8282,
"log_level": "info",
"discord_webhook_url": "",
"min_file_size": 0,
"max_file_size": 0,
"allowed_file_types": [".mp4", ".mkv", ".avi", ...],
}
```
### Configuration Options
#### Log Level
The `log_level` setting determines how verbose the application logs will be:
- `debug`: Detailed information, useful for troubleshooting
- `info`: General operational information (default)
- `warn`: Warning messages
- `error`: Error messages only
- `trace`: Very detailed information, including all requests and responses
#### Port
The `port` setting specifies the port on which Decypharr will run. The default is `8282`. You can change this to any available port on your server.
Ensure this port:
- Is not used by other applications
- Is accessible to your Arr applications
- Is properly exposed if using Docker (see the Docker Compose example in the Installation guide)
#### Authentication
The `use_auth` option enables basic authentication for the UI:
```json
"use_auth": true
```
When enabled, you'll need to provide a username and password to access the Decypharr interface.
#### File Size Limits
You can set minimum and maximum file size limits for torrents:
```json
"min_file_size": 0, // Minimum file size in bytes (0 = no minimum)
"max_file_size": 0 // Maximum file size in bytes (0 = no maximum)
```
#### Allowed File Types
You can restrict the types of files that Decypharr will process by specifying allowed file extensions. This is useful for filtering out unwanted file types.
```json
"allowed_file_types": [
".mp4", ".mkv", ".avi", ".mov",
".m4v", ".mpg", ".mpeg", ".wmv",
".m4a", ".mp3", ".flac", ".wav"
]
```
If not specified, all movie, TV show, and music file types are allowed by default.
#### Discord Notifications
To receive notifications on Discord, add your webhook URL:
```json
"discord_webhook_url": "https://discord.com/api/webhooks/..."
```
This will send notifications for various events, such as successful downloads or errors.

View File

@@ -1,44 +0,0 @@
# Configuration Overview
Decypharr uses a JSON configuration file to manage its settings. This file should be named `config.json` and placed in your configured directory.
## Basic Configuration
Here's a minimal configuration to get started:
```json
{
"debrids": [
{
"name": "realdebrid",
"api_key": "realdebrid_key",
"folder": "/mnt/remote/realdebrid/__all__/",
"use_webdav": true
}
],
"qbittorrent": {
"port": "8282",
"download_folder": "/mnt/symlinks/",
"categories": ["sonarr", "radarr"]
},
"repair": {
"enabled": false,
"interval": "12h",
"run_on_start": false
},
"use_auth": false,
"log_level": "info"
}
```
### Configuration Sections
Decypharr's configuration is divided into several sections:
- [General Configuration](general.md) - Basic settings like logging and authentication
- [Debrid Providers](debrid.md) - Configure one or more Debrid services
- [qBittorrent Settings](qbittorrent.md) - Settings for the qBittorrent API
- [Arr Integration](arrs.md) - Configuration for Sonarr, Radarr, etc.
Full Configuration Example
For a complete configuration file with all available options, see our [full configuration example](../extras/config.full.json).

View File

@@ -1,61 +0,0 @@
# qBittorrent Configuration
Decypharr emulates a qBittorrent instance to integrate with Arr applications. This section explains how to configure the qBittorrent settings in your `config.json` file.
## Basic Configuration
The qBittorrent functionality is configured under the `qbittorrent` key:
```json
"qbittorrent": {
"download_folder": "/mnt/symlinks/",
"categories": ["sonarr", "radarr", "lidarr"],
"refresh_interval": 5
}
```
### Configuration Options
#### Required Settings
- `download_folder`: The folder where symlinks or downloaded files will be placed
- `categories`: An array of categories to organize downloads (usually matches your Arr applications)
#### Advanced Settings
- `refresh_interval`: How often (in seconds) to refresh the Arrs Monitored Downloads (default: 5)
- `max_downloads`: The maximum number of concurrent downloads. This is only for downloading real files(Not symlinks). If you set this to 0, it will download all files at once. This is not recommended for most users.(default: 5)
- `skip_pre_cache`: This option disables the process of pre-caching files. This caches a small portion of the file to speed up your *arrs import process.
#### Categories
Categories help organize your downloads and match them to specific Arr applications. Typically, you'll want to configure categories that match your Sonarr, Radarr, or other Arr applications:
```json
"categories": ["sonarr", "radarr", "lidarr", "readarr"]
```
When setting up your Arr applications to connect to Decypharr, you'll specify these same category names.
#### Download Folder
The `download_folder` setting specifies where Decypharr will place downloaded files or create symlinks:
```json
"download_folder": "/mnt/symlinks/"
```
This folder should be:
- Accessible to Decypharr
- Accessible to your Arr applications
- Have sufficient space if downloading files locally
#### Refresh Interval
The refresh_interval setting controls how often Decypharr checks for updates from your Arr applications:
```json
"refresh_interval": 5
```
This value is in seconds. Lower values provide more responsive updates but may increase CPU usage.

View File

@@ -1,88 +0,0 @@
{
"debrids": [
{
"name": "realdebrid",
"api_key": "realdebrid_key",
"folder": "/mnt/remote/realdebrid/__all__/",
"download_api_keys": [],
"proxy": "",
"rate_limit": "250/minute",
"download_uncached": false,
"use_webdav": true,
"torrents_refresh_interval": "15s",
"folder_naming": "original_no_ext",
"auto_expire_links_after": "3d",
"rc_url": "http://your-ip-address:9990",
"rc_user": "your_rclone_rc_user",
"rc_pass": "your_rclone_rc_pass"
},
{
"name": "torbox",
"api_key": "torbox_api_key",
"folder": "/mnt/remote/torbox/torrents/",
"rate_limit": "250/minute",
"download_uncached": false,
},
{
"name": "debridlink",
"api_key": "debridlink_key",
"folder": "/mnt/remote/debridlink/torrents/",
"rate_limit": "250/minute",
"download_uncached": false,
},
{
"name": "alldebrid",
"api_key": "alldebrid_key",
"folder": "/mnt/remote/alldebrid/magnet/",
"rate_limit": "600/minute",
"download_uncached": false,
}
],
"max_cache_size": 1000,
"qbittorrent": {
"port": "8282",
"download_folder": "/mnt/symlinks/",
"categories": ["sonarr", "radarr"],
"refresh_interval": 5,
"skip_pre_cache": false
},
"arrs": [
{
"name": "sonarr",
"host": "http://sonarr:8989",
"token": "arr_key",
"cleanup": true,
"skip_repair": true,
"download_uncached": false
},
{
"name": "radarr",
"host": "http://radarr:7878",
"token": "arr_key",
"cleanup": false,
"download_uncached": false
},
{
"name": "lidarr",
"host": "http://lidarr:8686",
"token": "arr_key",
"cleanup": false,
"skip_repair": true,
"download_uncached": false
}
],
"repair": {
"enabled": false,
"interval": "12h",
"run_on_start": false,
"zurg_url": "",
"use_webdav": false,
"auto_process": false
},
"log_level": "info",
"min_file_size": "",
"max_file_size": "",
"allowed_file_types": [],
"use_auth": false,
"discord_webhook_url": "https://discord.com/api/webhooks/..."
}

View File

@@ -1,5 +0,0 @@
[decypharr]
type = webdav
url = http://decypharr:8282/webdav/realdebrid
vendor = other
pacer_min_sleep = 0

View File

@@ -25,8 +25,10 @@ The Decypharr user interface provides:
Decypharr includes several advanced features that extend its capabilities:
- [Repair Worker](repair-worker.md): Identifies and fixes issues with your media files
- [WebDAV Server](webdav.md): Provides direct access to your Debrid files
- [Repair Support](repair-worker.md): Identifies and fixes issues with your media files
- WebDav Server: Provides direct access to your Debrid files
- Mounting Support: Allows you to mount Debrid services using [rclone](https://rclone.org), making it easy to access your files directly from your system
- Multiple Debrid Providers: Supports Real Debrid, Torbox, Debrid Link, and All Debrid, allowing you to choose the best service for your needs
## Supported Debrid Providers
@@ -36,5 +38,7 @@ Decypharr supports multiple Debrid providers:
- Torbox
- Debrid Link
- All Debrid
- Premiumize(Coming Soon)
- Usenet(Coming Soon)
Each provider can be configured separately, allowing you to use one or multiple services simultaneously.

View File

@@ -1,5 +1,7 @@
# Repair Worker
![Repair Worker](../images/repair.png)
The Repair Worker is a powerful feature that helps maintain the health of your media library by scanning for and fixing issues with files.
## What It Does
@@ -13,29 +15,4 @@ The Repair Worker performs the following tasks:
## Configuration
To enable and configure the Repair Worker, add the following to your `config.json`:
```json
"repair": {
"enabled": true,
"interval": "12h",
"run_on_start": false,
"use_webdav": false,
"zurg_url": "http://localhost:9999",
"auto_process": true
}
```
### Configuration Options
- `enabled`: Set to `true` to enable the Repair Worker.
- `interval`: The time interval for the Repair Worker to run (e.g., `12h`, `1d`).
- `run_on_start`: If set to `true`, the Repair Worker will run immediately after Decypharr starts.
- `use_webdav`: If set to `true`, the Repair Worker will use WebDAV for file operations.
- `zurg_url`: The URL for the Zurg service (if using).
- `auto_process`: If set to `true`, the Repair Worker will automatically process files that it finds issues with.
### Performance Tips
- For users of the WebDAV server, enable `use_webdav` for exponentially faster repair processes
- If using Zurg, set the `zurg_url` parameter to greatly improve repair speed
You can enable and configure the Repair Worker in the Decypharr settings. It can be set to run at regular intervals, such as every 12 hours or daily.

View File

@@ -1,72 +0,0 @@
# WebDAV Server
Decypharr includes a built-in WebDAV server that provides direct access to your Debrid files, making them easily accessible to media players and other applications.
## Overview
While most Debrid providers have their own WebDAV servers, Decypharr's implementation offers faster access and additional features.
## Accessing the WebDAV Server
- URL: `http://localhost:8282/webdav` or `http://<your-server-ip>:8282/webdav`
## Configuration
You can configure WebDAV settings either globally or per-Debrid provider in your `config.json`:
```json
"webdav": {
"torrents_refresh_interval": "15s",
"download_links_refresh_interval": "40m",
"folder_naming": "original_no_ext",
"auto_expire_links_after": "3d",
"rc_url": "http://localhost:5572",
"rc_user": "username",
"rc_pass": "password",
"serve_from_rclone": false,
"directories": {
"Newly Added": {
"filters": {
"exclude": "9-1-1",
"last_added": "20h"
}
}
}
}
```
### Configuration Options
- `torrents_refresh_interval`: Interval for refreshing torrent data (e.g., `15s`, `1m`, `1h`).
- `download_links_refresh_interval`: Interval for refreshing download links (e.g., `40m`, `1h`).
- `workers`: Number of concurrent workers for processing requests.
- folder_naming: Naming convention for folders:
- `original_no_ext`: Original file name without extension
- `original`: Original file name with extension
- `filename`: Torrent filename
- `filename_no_ext`: Torrent filename without extension
- `id`: Torrent ID
- `auto_expire_links_after`: Time after which download links will expire (e.g., `3d`, `1w`).
- `rc_url`, `rc_user`, `rc_pass`: Rclone RC configuration for VFS refreshes
- `directories`: A map of virtual folders to serve via the WebDAV server. The key is the virtual folder name, and the values are a map of filters and their values.
- `serve_from_rclone`: Whether to serve files directly from Rclone (disabled by default).
### Using with Media Players
The WebDAV server works well with media players like:
- Infuse
- VidHub
- Plex, Emby, Jellyfin (with rclone, Check [this guide](../guides/rclone.md))
- Kodi
### Mounting with Rclone
You can mount the WebDAV server locally using Rclone. Example configuration:
```conf
[decypharr]
type = webdav
url = http://localhost:8282/webdav/realdebrid
vendor = other
```
For a complete Rclone configuration example, see our [sample rclone.conf](../extras/rclone.conf).

View File

@@ -0,0 +1,26 @@
### Downloading with Decypharr
While Decypharr provides a Qbittorent API for integration with media management applications, it also allows you to manually download torrents directly through its interface. This guide will walk you through the process of downloading torrents using Decypharr.
- You can either use the Decypharr UI to add torrents manually or use its [API](../api.md) to automate the process.
## Manual Downloading
![Downloading UI](../images/download.png)
To manually download a torrent using Decypharr, follow these steps:
1. Navigate to the "Download" section in the Decypharr UI.
2. You can either upload torrent file(s) or paste magnet links directly into the input fields
3. Select the action(defaults to Symlink)
4. Add any additional options, such as:
- *Download Folder*: Specify the folder where the downloaded files will be saved.
- *Arr Category*: Choose the category for the download, which helps in organizing files in your media management applications.
- **Post Download Action**: Select what to do after the download completes:
- **Create Symlink**: Create a symlink to the downloaded files in the mount folder(default)
- **Download**: Download the file directly.
- **No Action**: Do nothing after the download completes.
- **Debrid Provider**: Choose which Debrid service to use for the download(if you have multiple)
- **Download Uncached**: If enabled, Decypharr will attempt to download uncached files from the Debrid service.
Note:
- If you use an arr category, your download will go into **{download_folder}/{arr}**

View File

@@ -1,4 +1,4 @@
# Guides for setting up Decypharr
- [Setting up with Rclone](rclone.md)
- [Manual Downloading with Decypharr](downloading.md)
- [Internal Mounting](internal-mounting.md)

View File

@@ -0,0 +1,85 @@
# Internal Mounting
This guide explains how to use Decypharr's internal mounting feature to eliminate the need for external rclone setup.
## Overview
![Decypharr Internal Mounting](../images/settings/rclone.png)
Instead of requiring users to install and configure rclone separately, Decypharr can now mount your WebDAV endpoints internally using rclone as a library dependency. This provides a seamless experience where files appear as regular filesystem paths without any external dependencies.
## Prerequisites
- **Docker users**: FUSE support may need to be enabled in the container depending on your Docker setup
- **macOS users**: May need [macFUSE](https://osxfuse.github.io/) installed for mounting functionality
- **Linux users**: FUSE should be available by default on most distributions
- **Windows users**: Mounting functionality may be limited
### Configuration Options
You can set the options in the Web UI or directly in the configuration file:
#### Note:
Check the Rclone documentation for more details on the available options: [Rclone Mount Options](https://rclone.org/commands/rclone_mount/).
## How It Works
1. **WebDAV Server**: Decypharr starts its internal WebDAV server for enabled providers
2. **Internal Mount**: Rclone is used internally to mount the WebDAV endpoint to a local filesystem path
3. **File Access**: Your applications can access files using regular filesystem paths like `/mnt/decypharr/realdebrid/__all__/MyMovie/`
## Benefits
- **Automatic Setup**: Mounting is handled automatically by Decypharr using internal rclone rcd
- **Filesystem Access**: Files appear as regular directories and files
- **Seamless Integration**: Works with existing media servers without changes
## Docker Compose
```yaml
version: '3.8'
services:
decypharr:
image: sirrobot01/decypharr:latest
container_name: decypharr
ports:
- "8282:8282"
volumes:
- ./config:/config
- /mnt:/mnt:rshared # Important: use 'rshared' for mount propagation
devices:
- /dev/fuse:/dev/fuse:rwm
cap_add:
- SYS_ADMIN
security_opt:
- apparmor:unconfined
environment:
- UMASK=002
- PUID=1000 # Change to your user ID
- PGID=1000 # Change to your group ID
```
**Important Docker Notes:**
- Mount volumes with `:rshared` to allow mount propagation
- Include `/dev/fuse` device for FUSE mounting
## Troubleshooting
### Mount Failures
If mounting fails, check:
1. **FUSE Installation**:
- **macOS**: Install macFUSE from https://osxfuse.github.io/
- **Linux**: Install fuse package (`apt install fuse` or `yum install fuse`)
- **Docker**: Fuse is already included in the container, but ensure the host supports it
2. **Permissions**: Ensure the application has sufficient privileges
### No Mount Methods Available
If you see "no mount method available" errors:
1. **Check Platform Support**: Some platforms have limited FUSE support
2. **Install Dependencies**: Ensure FUSE libraries are installed
3. **Use WebDAV Directly**: Access files via `http://localhost:8282/webdav/provider/`
4. **External Mounting**: Use OS-native WebDAV mounting as fallback

View File

@@ -1,142 +0,0 @@
# Setting up Decypharr with Rclone
This guide will help you set up Decypharr with Rclone, allowing you to use your Debrid providers as a remote storage solution.
#### Rclone
Make sure you have Rclone installed and configured on your system. You can follow the [Rclone installation guide](https://rclone.org/install/) for instructions.
It's recommended to use docker version of Rclone, as it provides a consistent environment across different platforms.
### Steps
We'll be using docker compose to set up Rclone and Decypharr together.
#### Note
This guide assumes you have a basic understanding of Docker and Docker Compose. If you're new to Docker, consider checking out the [Docker documentation](https://docs.docker.com/get-started/) for more information.
Also, ensure you have Docker and Docker Compose installed on your system. You can find installation instructions in the [Docker documentation](https://docs.docker.com/get-docker/) and [Docker Compose documentation](https://docs.docker.com/compose/install/).
Create a directory for your Decypharr and Rclone setup:
```bash
mkdir -p /opt/decypharr
mkdir -p /opt/rclone
mkdir -p /mnt/remote/realdebrid
# Set permissions
chown -R $USER:$USER /opt/decypharr
chown -R $USER:$USER /opt/rclone
chown -R $USER:$USER /mnt/remote/realdebrid
```
Create a `rclone.conf` file in `/opt/rclone/` with your Rclone configuration.
```conf
[decypharr]
type = webdav
url = https://your-ip-or-domain:8282/webdav/realdebrid
vendor = other
pacer_min_sleep = 0
```
Create a `config.json` file in `/opt/decypharr/` with your Decypharr configuration.
```json
{
"debrids": [
{
"name": "realdebrid",
"api_key": "realdebrid_key",
"folder": "/mnt/remote/realdebrid/__all__/",
"rate_limit": "250/minute",
"use_webdav": true,
"rc_url": "http://your-ip-address:5572" // Rclone RC URL
}
],
"qbittorrent": {
"download_folder": "data/media/symlinks/",
"refresh_interval": 10
}
}
```
Create a `docker-compose.yml` file with the following content:
```yaml
services:
decypharr:
image: cy01/blackhole:latest
container_name: decypharr
user: "1000:1000"
volumes:
- /mnt/:/mnt
- /opt/decypharr/:/app
environment:
- PUID=1000
- PGID=1000
- UMASK=002
ports:
- "8282:8282/tcp"
restart: unless-stopped
rclone:
image: rclone/rclone:latest
container_name: rclone
restart: unless-stopped
environment:
TZ: UTC
PUID: 1000
PGID: 1000
ports:
- 5572:5572
volumes:
- /mnt/remote/realdebrid:/data:rshared
- /opt/rclone/rclone.conf:/config/rclone/rclone.conf
- /mnt:/mnt
cap_add:
- SYS_ADMIN
security_opt:
- apparmor:unconfined
devices:
- /dev/fuse:/dev/fuse:rwm
depends_on:
decypharr:
condition: service_healthy
restart: true
command: "mount decypharr: /data --allow-non-empty --allow-other --uid=1000 --gid=1000 --umask=002 --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth "
```
Start the containers:
```bash
docker-compose up -d
```
Access the Decypharr web interface at `http://your-ip-address:8282` and configure your settings as needed.
- Access your webdav server at `http://your-ip-address:8282/webdav` to see your files.
- You should be able to see your files in the `/mnt/remote/realdebrid/__all__/` directory.
- You can now use your Debrid provider as a remote storage solution with Rclone and Decypharr.
- You can also use the Rclone mount command to mount your Debrid provider locally. For example:
### Notes
- Make sure to replace `your-ip-address` with the actual IP address of your server.
- You can use multiple Debrid providers by adding them to the `debrids` array in the `config.json` file.
For each provider, you'll need a different rclone. OR you can change your `rclone.conf`
```apache
[decypharr]
type = webdav
url = https://your-ip-or-domain:8282/webdav/
vendor = other
pacer_min_sleep = 0
```
You'll still be able to access the directories via `/mnt/remote/realdebrid, /mnt/remote/alldebrid` etc

Binary file not shown.

After

Width:  |  Height:  |  Size: 293 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 431 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 188 KiB

After

Width:  |  Height:  |  Size: 417 KiB

BIN
docs/docs/images/repair.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 286 KiB

View File

Before

Width:  |  Height:  |  Size: 264 KiB

After

Width:  |  Height:  |  Size: 264 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 264 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 169 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 364 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 216 KiB

BIN
docs/docs/images/webdav.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

View File

@@ -1,20 +1,19 @@
# Decypharr
![Decypharr UI](images/main.png)
![Decypharr UI - Light Mode](images/main-light.png){: .light-mode-image}
![Decypharr UI - Dark Mode](images/main.png){: .dark-mode-image}
**Decypharr** is an implementation of QbitTorrent with **Multiple Debrid service support**, written in Go.
## What is Decypharr?
TLDR; Decypharr is a self-hosted, open-source torrent client that integrates with multiple Debrid services. It provides a user-friendly interface for managing torrents and supports popular media management applications like Sonarr and Radarr.
**TLDR**; Decypharr is a self-hosted, open-source download client that integrates with multiple Debrid services. It provides a user-friendly interface for managing files and supports popular media management applications like Sonarr and Radarr.
## Key Features
- Mock Qbittorent API that supports Sonarr, Radarr, Lidarr, and other Arr applications
- Full-fledged UI for managing torrents
- Multiple Debrid providers support
- WebDAV server support for each Debrid provider
- WebDAV server support for each Debrid provider with an optional mounting feature(using [rclone](https://rclone.org))
- Repair Worker for missing files, symlinks etc
## Supported Debrid Providers

View File

@@ -18,7 +18,6 @@ You can use either Docker Hub or GitHub Container Registry to pull the image:
- `latest`: The latest stable release
- `beta`: The latest beta release
- `vX.Y.Z`: A specific version (e.g., `v0.1.0`)
- `nightly`: The latest nightly build (usually unstable)
- `experimental`: The latest experimental build (highly unstable)
### Docker CLI Setup
@@ -31,12 +30,13 @@ Run the Docker container:
```bash
docker run -d \
--name decypharr \
--restart unless-stopped \
-p 8282:8282 \
-v /mnt/:/mnt \
-v /mnt/:/mnt:rshared \
-v ./config/:/app \
-e PUID=1000 \
-e PGID=1000 \
-e UMASK=002 \
--device /dev/fuse:/dev/fuse:rwm \
--cap-add SYS_ADMIN \
--security-opt apparmor:unconfined \
cy01/blackhole:latest
```
@@ -45,23 +45,22 @@ docker run -d \
Create a `docker-compose.yml` file with the following content:
```yaml
version: '3.7'
services:
decypharr:
image: cy01/blackhole:latest
container_name: decypharr
ports:
- "8282:8282"
user: "1000:1000"
volumes:
- /mnt/:/mnt # Mount your media directory
- ./config/:/app # config.json must be in this directory
environment:
- PUID=1000
- PGID=1000
- UMASK=002
- QBIT_PORT=8282 # qBittorrent Port (optional)
- /mnt/:/mnt:rshared
- ./config/:/app
restart: unless-stopped
devices:
- /dev/fuse:/dev/fuse:rwm
cap_add:
- SYS_ADMIN
security_opt:
- apparmor:unconfined
```
Run the Docker Compose setup:
@@ -73,44 +72,36 @@ docker-compose up -d
## Binary Installation
If you prefer not to use Docker, you can download and run the binary directly.
Download the binary from the releases page
Download your OS-specific release from the [release page](https://github.com/sirrobot01/decypharr/releases).
Create a configuration file (see Configuration)
Run the binary:
```bash
chmod +x decypharr
./decypharr --config /path/to/config/folder
```
The config directory should contain your config.json file.
### Notes for Docker Users
## config.json
- Ensure that the `/mnt/` directory is mounted correctly to access your media files.
- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions.
- The `UMASK` environment variable can be set to control file permissions created by Decypharr.
The `config.json` file is where you configure Decypharr. You can find a sample configuration file in the `configs` directory of the repository.
##### Health Checks
- Health checks are disabled by default. You can enable them by adding a `healthcheck` section in your `docker-compose.yml` file.
- Health checks the availability of several parts of the application;
- The main web interface
- The qBittorrent API
- The WebDAV server (if enabled). You should disable health checks for the initial indexes as they can take a long time to complete.
You can also configure Decypharr through the web interface, but it's recommended to start with the config file for initial setup.
```json
{
"debrids": [
{
"name": "realdebrid",
"api_key": "your_api_key_here",
"folder": "/mnt/remote/realdebrid/__all__/",
"use_webdav": true
}
],
"qbittorrent": {
"download_folder": "/mnt/symlinks/",
"categories": ["sonarr", "radarr"]
},
"use_auth": false,
"log_level": "info",
"port": "8282"
}
```yaml
services:
decypharr:
...
...
healthcheck:
test: ["CMD", "/usr/bin/healthcheck", "--config", "/app/"]
interval: 10s
timeout: 10s
retries: 3
```
### Few Notes
- Make sure decypharr has access to the directories specified in the configuration file.
- Ensure decypharr have write permissions to the qbittorrent download folder.
- Make sure decypharr can write to the `./config/` directory.

View File

@@ -0,0 +1,24 @@
/* Light mode image - visible by default */
.light-mode-image {
display: block;
}
/* Dark mode image - hidden by default */
.dark-mode-image {
display: none;
}
/* When dark theme (slate) is active */
[data-md-color-scheme="slate"] .light-mode-image {
display: none;
}
[data-md-color-scheme="slate"] .dark-mode-image {
display: block;
}
/* Optional: smooth transition */
.light-mode-image,
.dark-mode-image {
transition: opacity 0.2s ease-in-out;
}

View File

@@ -2,15 +2,35 @@
This guide will help you get started with Decypharr after installation.
## Basic Setup
After installing Decypharr, you can access the web interface at `http://localhost:8282` or your configured host/port.
1. Create your `config.json` file (see [Configuration](configuration/index.md) for details)
2. Start the Decypharr service using Docker or binary
3. Access the UI at `http://localhost:8282` (or your configured host/port)
4. Connect your Arr applications (Sonarr, Radarr, etc.)
### Initial Configuration
If it's the first time you're accessing the UI, you will be prompted to set up your credentials. You can skip this step if you don't want to enable authentication. If you choose to set up credentials, enter a username and password confirm password, then click **Save**. You will be redirected to the settings page.
## Connecting to Sonarr/Radarr
### Debrid Configuration
![Decypharr Settings](images/settings/debrid.png)
- Click on **Debrid** in the tab
- Add your desired Debrid services (Real Debrid, Torbox, Debrid Link, All Debrid) by entering the required API keys or tokens.
- Set the **Mount/Rclone Folder**. This is where decypharr will look for added torrents to symlink them to your media library.
- If you're using internal webdav, do not forget the `/__all__` suffix
- Enable WebDAV
- You can leave the remaining settings as default for now.
### Qbittorent Configuration
![Qbittorrent Settings](images/settings/qbittorent.png)
- Click on **Qbittorrent** in the tab
- Set the **Download Folder** to where you want Decypharr to save downloaded files. These files will be symlinked to the mount folder you configured earlier.
You can leave the remaining settings as default for now.
### Arrs Configuration
You can skip Arr configuration for now. Decypharr will auto-add them when you connect to Sonarr or Radarr later.
#### Connecting to Sonarr/Radarr
![Sonarr/Radarr Setup](images/settings/arr.png)
To connect Decypharr to your Sonarr or Radarr instance:
1. In Sonarr/Radarr, go to **Settings → Download Client → Add Client → qBittorrent**
@@ -18,22 +38,38 @@ To connect Decypharr to your Sonarr or Radarr instance:
- **Host**: `localhost` (or the IP of your Decypharr server)
- **Port**: `8282` (or your configured qBittorrent port)
- **Username**: `http://sonarr:8989` (your Arr host with http/https)
- **Password**: `sonarr_token` (your Arr API token)
- **Password**: `sonarr_token` (your Arr API token, you can get this from Sonarr/Radarr settings)
- **Category**: e.g., `sonarr`, `radarr` (match what you configured in Decypharr)
- **Use SSL**: `No`
- **Sequential Download**: `No` or `Yes` (if you want to download torrents locally instead of symlink)
3. Click **Test** to verify the connection
4. Click **Save** to add the download client
![Sonarr/Radarr Setup](images/sonarr-setup.png)
## Using the UI
### Rclone Configuration
The Decypharr UI provides a familiar qBittorrent-like interface with additional features for Debrid services:
![Rclone Settings](images/settings/rclone.png)
- Add new torrents
- Monitor download status
- Access WebDAV functionality
- Edit your configuration
If you want Decypharr to automatically mount WebDAV folders using Rclone, you need to set up Rclone first:
Access the UI at `http://localhost:8282` or your configured host/port.
If you're using Docker, the rclone binary is already included in the container. If you're running Decypharr directly, make sure Rclone is installed on your system.
Enable **Mount**
- **Global Mount Path**: Set the path where you want to mount the WebDAV folders (e.g., `/mnt/remote`). Decypharr will create subfolders for each Debrid service. For example, if you set `/mnt/remote`, it will create `/mnt/remote/realdebrid`, `/mnt/remote/torbox`, etc. This should be the grandparent of your mount folder set in the Debrid configuration.
- **User ID**: Set the user ID for Rclone mounts (default is gotten from the environment variable `PUID`).
- **Group ID**: Set the group ID for Rclone mounts (default is gotten from the environment variable `PGID`).
- **Buffer Size**: Set the buffer size for Rclone mounts.
You should set other options based on your use case. If you don't know what you're doing, leave it as defaults. Checkout the [Rclone documentation](https://rclone.org/commands/rclone_mount/) for more details.
### Repair Configuration
![Repair Settings](images/settings/repair.png)
Repair is an optional feature that allows you to fix missing files, symlinks, and other issues in your media library.
- Click on **Repair** in the tab
- Enable **Scheduled Repair** if you want Decypharr to automatically check for missing files at your specified interval.
- Set the **Repair Interval** to how often you want Decypharr to check for missing files (e.g 1h, 6h, 12h, 24h, you can also use cron syntax like `0 0 * * *` for daily checks).
- Enable **WebDav**(You shoukd enable this, if you enabled WebDav in Debrid configuration)
- **Auto Process**: Enable this if you want Decypharr to automatically process repair jobs when they are done. This could delete the original files, symlinks, be wary!!!
- **Worker Threads**: Set the number of worker threads for processing repair jobs. More threads can speed up the process but may consume more resources.

View File

@@ -6,6 +6,9 @@ repo_name: sirrobot01/decypharr
edit_uri: blob/main/docs
extra_css:
- styles/styles.css
theme:
name: material
logo: images/logo.png
@@ -59,22 +62,17 @@ nav:
- Home: index.md
- Installation: installation.md
- Usage: usage.md
- Configuration:
- Overview: configuration/index.md
- General: configuration/general.md
- Debrid Providers: configuration/debrid.md
- qBittorrent: configuration/qbittorrent.md
- Arr Integration: configuration/arrs.md
- API Documentation: api.md
- Features:
- Overview: features/index.md
- Repair Worker: features/repair-worker.md
- WebDAV: features/webdav.md
- Guides:
- Overview: guides/index.md
- Setting Up with Rclone: guides/rclone.md
- Changelog: changelog.md
- Manual Downloading: guides/downloading.md
- Internal Mounting: guides/internal-mounting.md
plugins:
- search
- tags
- tags
- swagger-ui-tag

3
docs/requirements.txt Normal file
View File

@@ -0,0 +1,3 @@
mkdocs==1.6.1
mkdocs-material==9.6.16
mkdocs-swagger-ui-tag==0.6.10

20
go.mod
View File

@@ -7,32 +7,34 @@ toolchain go1.24.3
require (
github.com/anacrolix/torrent v1.55.0
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/go-chi/chi/v5 v5.1.0
github.com/go-chi/chi/v5 v5.2.2
github.com/go-co-op/gocron/v2 v2.16.1
github.com/google/uuid v1.6.0
github.com/gorilla/sessions v1.4.0
github.com/robfig/cron/v3 v3.0.1
github.com/rs/zerolog v1.33.0
github.com/stanNthe5/stringbuf v0.0.3
golang.org/x/crypto v0.33.0
golang.org/x/net v0.35.0
golang.org/x/sync v0.12.0
golang.org/x/time v0.8.0
go.uber.org/ratelimit v0.3.1
golang.org/x/crypto v0.39.0
golang.org/x/net v0.41.0
golang.org/x/sync v0.15.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
require (
github.com/anacrolix/missinggo v1.3.0 // indirect
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/gorilla/securecookie v1.1.2 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/jonboulle/clockwork v0.5.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
golang.org/x/sys v0.30.0 // indirect
github.com/puzpuzpuz/xsync/v4 v4.1.0 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
golang.org/x/sys v0.33.0 // indirect
)

41
go.sum
View File

@@ -36,6 +36,8 @@ github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CM
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -68,8 +70,8 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
github.com/go-co-op/gocron/v2 v2.16.1 h1:ux/5zxVRveCaCuTtNI3DiOk581KC1KpJbpJFYUEVYwo=
github.com/go-co-op/gocron/v2 v2.16.1/go.mod h1:opexeOFy5BplhsKdA7bzY9zeYih8I8/WNJ4arTIFPVc=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@@ -100,8 +102,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -141,8 +143,9 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
@@ -183,11 +186,13 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
@@ -216,12 +221,16 @@ github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPy
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0=
go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -237,8 +246,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -246,8 +255,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -262,12 +271,10 @@ golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -2,6 +2,8 @@ package config
import (
"cmp"
"crypto/rand"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
@@ -12,6 +14,13 @@ import (
"sync"
)
type RepairStrategy string
const (
RepairStrategyPerFile RepairStrategy = "per_file"
RepairStrategyPerTorrent RepairStrategy = "per_torrent"
)
var (
instance *Config
once sync.Once
@@ -19,15 +28,21 @@ var (
)
type Debrid struct {
Name string `json:"name,omitempty"`
APIKey string `json:"api_key,omitempty"`
DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
Folder string `json:"folder,omitempty"`
DownloadUncached bool `json:"download_uncached,omitempty"`
CheckCached bool `json:"check_cached,omitempty"`
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
Proxy string `json:"proxy,omitempty"`
AddSamples bool `json:"add_samples,omitempty"`
Name string `json:"name,omitempty"`
APIKey string `json:"api_key,omitempty"`
DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
Folder string `json:"folder,omitempty"`
RcloneMountPath string `json:"rclone_mount_path,omitempty"` // Custom rclone mount path for this debrid service
DownloadUncached bool `json:"download_uncached,omitempty"`
CheckCached bool `json:"check_cached,omitempty"`
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
RepairRateLimit string `json:"repair_rate_limit,omitempty"`
DownloadRateLimit string `json:"download_rate_limit,omitempty"`
Proxy string `json:"proxy,omitempty"`
UnpackRar bool `json:"unpack_rar,omitempty"`
AddSamples bool `json:"add_samples,omitempty"`
MinimumFreeSlot int `json:"minimum_free_slot,omitempty"` // Minimum active pots to use this debrid
Limit int `json:"limit,omitempty"` // Maximum number of total torrents
UseWebDav bool `json:"use_webdav,omitempty"`
WebDav
@@ -51,22 +66,68 @@ type Arr struct {
Cleanup bool `json:"cleanup,omitempty"`
SkipRepair bool `json:"skip_repair,omitempty"`
DownloadUncached *bool `json:"download_uncached,omitempty"`
SelectedDebrid string `json:"selected_debrid,omitempty"`
Source string `json:"source,omitempty"` // The source of the arr, e.g. "auto", "config", "". Auto means it was automatically detected from the arr
}
type Repair struct {
Enabled bool `json:"enabled,omitempty"`
Interval string `json:"interval,omitempty"`
RunOnStart bool `json:"run_on_start,omitempty"`
ZurgURL string `json:"zurg_url,omitempty"`
AutoProcess bool `json:"auto_process,omitempty"`
UseWebDav bool `json:"use_webdav,omitempty"`
Workers int `json:"workers,omitempty"`
ReInsert bool `json:"reinsert,omitempty"`
Enabled bool `json:"enabled,omitempty"`
Interval string `json:"interval,omitempty"`
ZurgURL string `json:"zurg_url,omitempty"`
AutoProcess bool `json:"auto_process,omitempty"`
UseWebDav bool `json:"use_webdav,omitempty"`
Workers int `json:"workers,omitempty"`
ReInsert bool `json:"reinsert,omitempty"`
Strategy RepairStrategy `json:"strategy,omitempty"`
}
type Auth struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
APIToken string `json:"api_token,omitempty"`
}
type Rclone struct {
// Global mount folder where all providers will be mounted as subfolders
Enabled bool `json:"enabled,omitempty"`
MountPath string `json:"mount_path,omitempty"`
RcPort string `json:"rc_port,omitempty"`
// Cache settings
CacheDir string `json:"cache_dir,omitempty"`
// VFS settings
VfsCacheMode string `json:"vfs_cache_mode,omitempty"` // off, minimal, writes, full
VfsCacheMaxAge string `json:"vfs_cache_max_age,omitempty"` // Maximum age of objects in the cache (default 1h)
VfsDiskSpaceTotal string `json:"vfs_disk_space_total,omitempty"` // Total disk space available for the cache (default off)
VfsCacheMaxSize string `json:"vfs_cache_max_size,omitempty"` // Maximum size of the cache (default off)
VfsCachePollInterval string `json:"vfs_cache_poll_interval,omitempty"` // How often to poll for changes (default 1m)
VfsReadChunkSize string `json:"vfs_read_chunk_size,omitempty"` // Read chunk size (default 128M)
VfsReadChunkSizeLimit string `json:"vfs_read_chunk_size_limit,omitempty"` // Max chunk size (default off)
VfsReadAhead string `json:"vfs_read_ahead,omitempty"` // read ahead size
BufferSize string `json:"buffer_size,omitempty"` // Buffer size for reading files (default 16M)
VfsCacheMinFreeSpace string `json:"vfs_cache_min_free_space,omitempty"`
VfsFastFingerprint bool `json:"vfs_fast_fingerprint,omitempty"`
VfsReadChunkStreams int `json:"vfs_read_chunk_streams,omitempty"`
AsyncRead *bool `json:"async_read,omitempty"` // Use async read for files
Transfers int `json:"transfers,omitempty"` // Number of transfers to use (default 4)
UseMmap bool `json:"use_mmap,omitempty"`
// File system settings
UID uint32 `json:"uid,omitempty"` // User ID for mounted files
GID uint32 `json:"gid,omitempty"` // Group ID for mounted files
Umask string `json:"umask,omitempty"`
// Timeout settings
AttrTimeout string `json:"attr_timeout,omitempty"` // Attribute cache timeout (default 1s)
DirCacheTime string `json:"dir_cache_time,omitempty"` // Directory cache time (default 5m)
// Performance settings
NoModTime bool `json:"no_modtime,omitempty"` // Don't read/write modification time
NoChecksum bool `json:"no_checksum,omitempty"` // Don't checksum files on upload
LogLevel string `json:"log_level,omitempty"`
}
type Config struct {
@@ -75,19 +136,22 @@ type Config struct {
URLBase string `json:"url_base,omitempty"`
Port string `json:"port,omitempty"`
LogLevel string `json:"log_level,omitempty"`
Debrids []Debrid `json:"debrids,omitempty"`
QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"`
Arrs []Arr `json:"arrs,omitempty"`
Repair Repair `json:"repair,omitempty"`
WebDav WebDav `json:"webdav,omitempty"`
AllowedExt []string `json:"allowed_file_types,omitempty"`
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
Path string `json:"-"` // Path to save the config file
UseAuth bool `json:"use_auth,omitempty"`
Auth *Auth `json:"-"`
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
LogLevel string `json:"log_level,omitempty"`
Debrids []Debrid `json:"debrids,omitempty"`
QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"`
Arrs []Arr `json:"arrs,omitempty"`
Repair Repair `json:"repair,omitempty"`
WebDav WebDav `json:"webdav,omitempty"`
Rclone Rclone `json:"rclone,omitempty"`
AllowedExt []string `json:"allowed_file_types,omitempty"`
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
Path string `json:"-"` // Path to save the config file
UseAuth bool `json:"use_auth,omitempty"`
Auth *Auth `json:"-"`
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
CallbackURL string `json:"callback_url,omitempty"`
}
func (c *Config) JsonFile() string {
@@ -97,6 +161,10 @@ func (c *Config) AuthFile() string {
return filepath.Join(c.Path, "auth.json")
}
func (c *Config) TorrentsFile() string {
return filepath.Join(c.Path, "torrents.json")
}
func (c *Config) loadConfig() error {
// Load the config file
if configPath == "" {
@@ -179,6 +247,15 @@ func ValidateConfig(config *Config) error {
return nil
}
// generateAPIToken creates a new random API token
func generateAPIToken() (string, error) {
bytes := make([]byte, 32) // 256-bit token
if _, err := rand.Read(bytes); err != nil {
return "", err
}
return hex.EncodeToString(bytes), nil
}
func SetConfigPath(path string) {
configPath = path
}
@@ -231,6 +308,10 @@ func (c *Config) IsSizeAllowed(size int64) bool {
return true
}
func (c *Config) SecretKey() string {
return cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"")
}
func (c *Config) GetAuth() *Auth {
if !c.UseAuth {
return nil
@@ -261,26 +342,29 @@ func (c *Config) NeedsSetup() error {
}
func (c *Config) NeedsAuth() bool {
if c.UseAuth {
return c.GetAuth().Username == ""
}
return false
return !c.UseAuth && c.GetAuth().Username == ""
}
func (c *Config) updateDebrid(d Debrid) Debrid {
workers := runtime.NumCPU() * 50
perDebrid := workers / len(c.Debrids)
if len(d.DownloadAPIKeys) == 0 {
d.DownloadAPIKeys = append(d.DownloadAPIKeys, d.APIKey)
var downloadKeys []string
if len(d.DownloadAPIKeys) > 0 {
downloadKeys = d.DownloadAPIKeys
} else {
// If no download API keys are specified, use the main API key
downloadKeys = []string{d.APIKey}
}
d.DownloadAPIKeys = downloadKeys
if !d.UseWebDav {
return d
}
if d.TorrentsRefreshInterval == "" {
d.TorrentsRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "15s") // 15 seconds
d.TorrentsRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "45s") // 45 seconds
}
if d.WebDav.DownloadLinksRefreshInterval == "" {
d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes
@@ -336,8 +420,55 @@ func (c *Config) setDefaults() {
c.URLBase += "/"
}
// Set repair defaults
if c.Repair.Strategy == "" {
c.Repair.Strategy = RepairStrategyPerTorrent
}
// Rclone defaults
if c.Rclone.Enabled {
c.Rclone.RcPort = cmp.Or(c.Rclone.RcPort, "5572")
if c.Rclone.AsyncRead == nil {
_asyncTrue := true
c.Rclone.AsyncRead = &_asyncTrue
}
c.Rclone.VfsCacheMode = cmp.Or(c.Rclone.VfsCacheMode, "off")
if c.Rclone.UID == 0 {
c.Rclone.UID = uint32(os.Getuid())
}
if c.Rclone.GID == 0 {
if runtime.GOOS == "windows" {
// On Windows, we use the current user's SID as GID
c.Rclone.GID = uint32(os.Getuid()) // Windows does not have GID, using UID instead
} else {
c.Rclone.GID = uint32(os.Getgid())
}
}
if c.Rclone.Transfers == 0 {
c.Rclone.Transfers = 4 // Default number of transfers
}
if c.Rclone.VfsCacheMode != "off" {
c.Rclone.VfsCachePollInterval = cmp.Or(c.Rclone.VfsCachePollInterval, "1m") // Clean cache every minute
}
c.Rclone.DirCacheTime = cmp.Or(c.Rclone.DirCacheTime, "5m")
c.Rclone.LogLevel = cmp.Or(c.Rclone.LogLevel, "INFO")
}
// Load the auth file
c.Auth = c.GetAuth()
// Generate API token if auth is enabled and no token exists
if c.UseAuth {
if c.Auth == nil {
c.Auth = &Auth{}
}
if c.Auth.APIToken == "" {
if token, err := generateAPIToken(); err == nil {
c.Auth.APIToken = token
// Save the updated auth config
_ = c.SaveAuth(c.Auth)
}
}
}
}
func (c *Config) Save() error {
@@ -379,3 +510,7 @@ func Reload() {
instance = nil
once = sync.Once{}
}
func DefaultFreeSlot() int {
return 10
}

View File

@@ -24,7 +24,7 @@ func (c *Config) IsAllowedFile(filename string) bool {
}
func getDefaultExtensions() []string {
videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,img,iso,vob,mkv,mk3d,ts,wtv,m2ts'", ",")
videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,vob,mkv,mk3d,ts,wtv,m2ts", ",")
musicExts := strings.Split("MP3,WAV,FLAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",")
// Combine both slices

View File

@@ -26,7 +26,7 @@ func GetLogPath() string {
}
}
return filepath.Join(logsDir, "decypharr.log")
return logsDir
}
func New(prefix string) zerolog.Logger {
@@ -34,7 +34,7 @@ func New(prefix string) zerolog.Logger {
level := config.Get().LogLevel
rotatingLogFile := &lumberjack.Logger{
Filename: GetLogPath(),
Filename: filepath.Join(GetLogPath(), "decypharr.log"),
MaxSize: 10,
MaxAge: 15,
Compress: true,

View File

@@ -45,6 +45,8 @@ func getDiscordHeader(event string) string {
return "[Decypharr] Repair Completed, Awaiting action"
case "repair_complete":
return "[Decypharr] Repair Complete"
case "repair_cancelled":
return "[Decypharr] Repair Cancelled"
default:
// split the event string and capitalize the first letter of each word
evs := strings.Split(event, "_")

View File

@@ -2,7 +2,6 @@ package request
import (
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"encoding/json"
@@ -10,10 +9,9 @@ import (
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/logger"
"go.uber.org/ratelimit"
"golang.org/x/net/proxy"
"golang.org/x/time/rate"
"io"
"math"
"math/rand"
"net"
"net/http"
@@ -53,7 +51,7 @@ type ClientOption func(*Client)
// Client represents an HTTP client with additional capabilities
type Client struct {
client *http.Client
rateLimiter *rate.Limiter
rateLimiter ratelimit.Limiter
headers map[string]string
headersMu sync.RWMutex
maxRetries int
@@ -85,7 +83,7 @@ func WithRedirectPolicy(policy func(req *http.Request, via []*http.Request) erro
}
// WithRateLimiter sets a rate limiter
func WithRateLimiter(rl *rate.Limiter) ClientOption {
func WithRateLimiter(rl ratelimit.Limiter) ClientOption {
return func(c *Client) {
c.rateLimiter = rl
}
@@ -137,9 +135,11 @@ func WithProxy(proxyURL string) ClientOption {
// doRequest performs a single HTTP request with rate limiting
func (c *Client) doRequest(req *http.Request) (*http.Response, error) {
if c.rateLimiter != nil {
err := c.rateLimiter.Wait(req.Context())
if err != nil {
return nil, fmt.Errorf("rate limiter wait: %w", err)
select {
case <-req.Context().Done():
return nil, req.Context().Err()
default:
c.rateLimiter.Take()
}
}
@@ -298,40 +298,7 @@ func New(options ...ClientOption) *Client {
}
// Configure proxy if needed
if client.proxy != "" {
if strings.HasPrefix(client.proxy, "socks5://") {
// Handle SOCKS5 proxy
socksURL, err := url.Parse(client.proxy)
if err != nil {
client.logger.Error().Msgf("Failed to parse SOCKS5 proxy URL: %v", err)
} else {
auth := &proxy.Auth{}
if socksURL.User != nil {
auth.User = socksURL.User.Username()
password, _ := socksURL.User.Password()
auth.Password = password
}
dialer, err := proxy.SOCKS5("tcp", socksURL.Host, auth, proxy.Direct)
if err != nil {
client.logger.Error().Msgf("Failed to create SOCKS5 dialer: %v", err)
} else {
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialer.Dial(network, addr)
}
}
}
} else {
proxyURL, err := url.Parse(client.proxy)
if err != nil {
client.logger.Error().Msgf("Failed to parse proxy URL: %v", err)
} else {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
} else {
transport.Proxy = http.ProxyFromEnvironment
}
SetProxy(transport, client.proxy)
// Set the transport to the client
client.client.Transport = transport
@@ -340,7 +307,10 @@ func New(options ...ClientOption) *Client {
return client
}
func ParseRateLimit(rateStr string) *rate.Limiter {
func ParseRateLimit(rateStr string) ratelimit.Limiter {
if rateStr == "" {
return nil
}
parts := strings.SplitN(rateStr, "/", 2)
if len(parts) != 2 {
return nil
@@ -352,23 +322,21 @@ func ParseRateLimit(rateStr string) *rate.Limiter {
return nil
}
// Set slack size to 10%
slackSize := count / 10
// normalize unit
unit := strings.ToLower(strings.TrimSpace(parts[1]))
unit = strings.TrimSuffix(unit, "s")
burstSize := int(math.Ceil(float64(count) * 0.1))
if burstSize < 1 {
burstSize = 1
}
if burstSize > count {
burstSize = count
}
switch unit {
case "minute", "min":
return rate.NewLimiter(rate.Limit(float64(count)/60.0), burstSize)
return ratelimit.New(count, ratelimit.Per(time.Minute), ratelimit.WithSlack(slackSize))
case "second", "sec":
return rate.NewLimiter(rate.Limit(float64(count)), burstSize)
return ratelimit.New(count, ratelimit.Per(time.Second), ratelimit.WithSlack(slackSize))
case "hour", "hr":
return rate.NewLimiter(rate.Limit(float64(count)/3600.0), burstSize)
return ratelimit.New(count, ratelimit.Per(time.Hour), ratelimit.WithSlack(slackSize))
case "day", "d":
return ratelimit.New(count, ratelimit.Per(24*time.Hour), ratelimit.WithSlack(slackSize))
default:
return nil
}
@@ -383,31 +351,6 @@ func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
}
}
func Gzip(body []byte) []byte {
if len(body) == 0 {
return nil
}
// Check if the pool is nil
buf := bytes.NewBuffer(make([]byte, 0, len(body)))
gz, err := gzip.NewWriterLevel(buf, gzip.BestSpeed)
if err != nil {
return nil
}
if _, err := gz.Write(body); err != nil {
return nil
}
if err := gz.Close(); err != nil {
return nil
}
result := make([]byte, buf.Len())
copy(result, buf.Bytes())
return result
}
func Default() *Client {
once.Do(func() {
instance = New()
@@ -435,9 +378,47 @@ func isRetryableError(err error) bool {
var netErr net.Error
if errors.As(err, &netErr) {
// Retry on timeout errors and temporary errors
return netErr.Timeout() || netErr.Temporary()
return netErr.Timeout()
}
// Not a retryable error
return false
}
func SetProxy(transport *http.Transport, proxyURL string) {
if proxyURL != "" {
if strings.HasPrefix(proxyURL, "socks5://") {
// Handle SOCKS5 proxy
socksURL, err := url.Parse(proxyURL)
if err != nil {
return
} else {
auth := &proxy.Auth{}
if socksURL.User != nil {
auth.User = socksURL.User.Username()
password, _ := socksURL.User.Password()
auth.Password = password
}
dialer, err := proxy.SOCKS5("tcp", socksURL.Host, auth, proxy.Direct)
if err != nil {
return
} else {
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialer.Dial(network, addr)
}
}
}
} else {
_proxy, err := url.Parse(proxyURL)
if err != nil {
return
} else {
transport.Proxy = http.ProxyURL(_proxy)
}
}
} else {
transport.Proxy = http.ProxyFromEnvironment
}
return
}

View File

@@ -1,4 +1,6 @@
package request
package utils
import "errors"
type HTTPError struct {
StatusCode int
@@ -33,3 +35,13 @@ var TorrentNotFoundError = &HTTPError{
Message: "Torrent not found",
Code: "torrent_not_found",
}
var TooManyActiveDownloadsError = &HTTPError{
StatusCode: 509,
Message: "Too many active downloads",
Code: "too_many_active_downloads",
}
func IsTooManyActiveDownloadsError(err error) bool {
return errors.As(err, &TooManyActiveDownloadsError)
}

View File

@@ -1,7 +1,10 @@
package utils
import (
"fmt"
"io"
"net/url"
"os"
"strings"
)
@@ -19,3 +22,65 @@ func PathUnescape(path string) string {
return unescapedPath
}
func PreCacheFile(filePaths []string) error {
if len(filePaths) == 0 {
return fmt.Errorf("no file paths provided")
}
for _, filePath := range filePaths {
err := func(f string) error {
file, err := os.Open(f)
if err != nil {
if os.IsNotExist(err) {
// File has probably been moved by arr, return silently
return nil
}
return fmt.Errorf("failed to open file: %s: %v", f, err)
}
defer file.Close()
// Pre-cache the file header (first 256KB) using 16KB chunks.
if err := readSmallChunks(file, 0, 256*1024, 16*1024); err != nil {
return err
}
if err := readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil {
return err
}
return nil
}(filePath)
if err != nil {
return err
}
}
return nil
}
func readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error {
_, err := file.Seek(startPos, 0)
if err != nil {
return err
}
buf := make([]byte, chunkSize)
bytesRemaining := totalToRead
for bytesRemaining > 0 {
toRead := chunkSize
if bytesRemaining < chunkSize {
toRead = bytesRemaining
}
n, err := file.Read(buf[:toRead])
if err != nil {
if err == io.EOF {
break
}
return err
}
bytesRemaining -= n
}
return nil
}

View File

@@ -25,11 +25,11 @@ var (
)
type Magnet struct {
Name string
InfoHash string
Size int64
Link string
File []byte
Name string `json:"name"`
InfoHash string `json:"infoHash"`
Size int64 `json:"size"`
Link string `json:"link"`
File []byte `json:"-"`
}
func (m *Magnet) IsTorrent() bool {
@@ -83,7 +83,6 @@ func GetMagnetFromBytes(torrentData []byte) (*Magnet, error) {
if err != nil {
return nil, err
}
log.Println("InfoHash: ", infoHash)
magnet := &Magnet{
InfoHash: infoHash,
Name: info.Name,

View File

@@ -22,3 +22,15 @@ func Contains(slice []string, value string) bool {
}
return false
}
func Mask(text string) string {
res := ""
if len(text) > 12 {
res = text[:8] + "****" + text[len(text)-4:]
} else if len(text) > 8 {
res = text[:4] + "****" + text[len(text)-2:]
} else {
res = "****"
}
return res
}

View File

@@ -7,7 +7,7 @@ import (
)
var (
videoMatch = "(?i)(\\.)(webm|m4v|3gp|nsv|ty|strm|rm|rmvb|m3u|ifo|mov|qt|divx|xvid|bivx|nrg|pva|wmv|asf|asx|ogm|ogv|m2v|avi|bin|dat|dvr-ms|mpg|mpeg|mp4|avc|vp3|svq3|nuv|viv|dv|fli|flv|wpl|img|iso|vob|mkv|mk3d|ts|wtv|m2ts)$"
videoMatch = "(?i)(\\.)(webm|m4v|3gp|nsv|ty|strm|rm|rmvb|m3u|ifo|mov|qt|divx|xvid|bivx|nrg|pva|wmv|asf|asx|ogm|ogv|m2v|avi|bin|dat|dvr-ms|mpg|mpeg|mp4|avc|vp3|svq3|nuv|viv|dv|fli|flv|wpl|vob|mkv|mk3d|ts|wtv|m2ts)$"
musicMatch = "(?i)(\\.)(mp2|mp3|m4a|m4b|m4p|ogg|oga|opus|wma|wav|wv|flac|ape|aif|aiff|aifc)$"
sampleMatch = `(?i)(^|[\s/\\])(sample|trailer|thumb|special|extras?)s?[-/]|(\((sample|trailer|thumb|special|extras?)s?\))|(-\s*(sample|trailer|thumb|special|extras?)s?)`
)
@@ -40,12 +40,10 @@ func RemoveInvalidChars(value string) string {
}
func RemoveExtension(value string) string {
loc := mediaRegex.FindStringIndex(value)
if loc != nil {
if loc := mediaRegex.FindStringIndex(value); loc != nil {
return value[:loc[0]]
} else {
return value
}
return value
}
func IsMediaFile(path string) bool {
@@ -53,7 +51,8 @@ func IsMediaFile(path string) bool {
}
func IsSampleFile(path string) bool {
if strings.HasSuffix(strings.ToLower(path), "sample.mkv") {
filename := filepath.Base(path)
if strings.HasSuffix(strings.ToLower(filename), "sample.mkv") {
return true
}
return RegexMatch(sampleRegex, path)

View File

@@ -1,7 +1,6 @@
package utils
import (
"context"
"fmt"
"github.com/go-co-op/gocron/v2"
"github.com/robfig/cron/v3"
@@ -10,25 +9,6 @@ import (
"time"
)
func ScheduleJob(ctx context.Context, interval string, loc *time.Location, jobFunc func()) (gocron.Scheduler, error) {
if loc == nil {
loc = time.Local
}
s, err := gocron.NewScheduler(gocron.WithLocation(loc))
if err != nil {
return s, fmt.Errorf("failed to create scheduler: %w", err)
}
jd, err := ConvertToJobDef(interval)
if err != nil {
return s, fmt.Errorf("failed to convert interval to job definition: %w", err)
}
// Schedule the job
if _, err = s.NewJob(jd, gocron.NewTask(jobFunc), gocron.WithContext(ctx)); err != nil {
return s, fmt.Errorf("failed to create job: %w", err)
}
return s, nil
}
// ConvertToJobDef converts a string interval to a gocron.JobDefinition.
func ConvertToJobDef(interval string) (gocron.JobDefinition, error) {
// Parse the interval string

1624
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

19
package.json Normal file
View File

@@ -0,0 +1,19 @@
{
"name": "decypharr",
"version": "1.0.0",
"description": "Media management tool",
"scripts": {
"build-css": "tailwindcss -i ./pkg/web/assets/styles.css -o ./pkg/web/assets/build/css/styles.css --minify",
"minify-js": "node scripts/minify-js.js",
"download-assets": "node scripts/download-assets.js",
"build": "npm run build-css && npm run minify-js",
"build-all": "npm run download-assets && npm run build",
"dev": "npm run build && air"
},
"devDependencies": {
"tailwindcss": "^3.4.0",
"daisyui": "^4.12.10",
"terser": "^5.24.0",
"clean-css": "^5.3.3"
}
}

View File

@@ -3,23 +3,31 @@ package arr
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
)
// Type is a type of arr
type Type string
var sharedClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
Timeout: 60 * time.Second,
}
const (
Sonarr Type = "sonarr"
Radarr Type = "radarr"
@@ -35,10 +43,11 @@ type Arr struct {
Cleanup bool `json:"cleanup"`
SkipRepair bool `json:"skip_repair"`
DownloadUncached *bool `json:"download_uncached"`
client *request.Client
SelectedDebrid string `json:"selected_debrid,omitempty"` // The debrid service selected for this arr
Source string `json:"source,omitempty"` // The source of the arr, e.g. "auto", "manual". Auto means it was automatically detected from the arr
}
func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool) *Arr {
func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool, selectedDebrid, source string) *Arr {
return &Arr{
Name: name,
Host: host,
@@ -47,7 +56,8 @@ func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *b
Cleanup: cleanup,
SkipRepair: skipRepair,
DownloadUncached: downloadUncached,
client: request.New(),
SelectedDebrid: selectedDebrid,
Source: source,
}
}
@@ -74,14 +84,11 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-Api-Key", a.Token)
if a.client == nil {
a.client = request.New()
}
var resp *http.Response
for attempts := 0; attempts < 5; attempts++ {
resp, err = a.client.Do(req)
resp, err = sharedClient.Do(req)
if err != nil {
return nil, err
}
@@ -109,8 +116,10 @@ func (a *Arr) Validate() error {
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("arr test failed: %s", resp.Status)
defer resp.Body.Close()
// If response is not 200 or 404(this is the case for Lidarr, etc), return an error
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNotFound {
return fmt.Errorf("failed to validate arr %s: %s", a.Name, resp.Status)
}
return nil
}
@@ -121,10 +130,10 @@ type Storage struct {
logger zerolog.Logger
}
func (as *Storage) Cleanup() {
as.mu.Lock()
defer as.mu.Unlock()
as.Arrs = make(map[string]*Arr)
func (s *Storage) Cleanup() {
s.mu.Lock()
defer s.mu.Unlock()
s.Arrs = make(map[string]*Arr)
}
func InferType(host, name string) Type {
@@ -145,8 +154,11 @@ func InferType(host, name string) Type {
func NewStorage() *Storage {
arrs := make(map[string]*Arr)
for _, a := range config.Get().Arrs {
if a.Host == "" || a.Token == "" || a.Name == "" {
continue // Skip if host or token is not set
}
name := a.Name
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached)
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid, a.Source)
}
return &Storage{
Arrs: arrs,
@@ -154,46 +166,38 @@ func NewStorage() *Storage {
}
}
func (as *Storage) AddOrUpdate(arr *Arr) {
as.mu.Lock()
defer as.mu.Unlock()
if arr.Name == "" {
func (s *Storage) AddOrUpdate(arr *Arr) {
s.mu.Lock()
defer s.mu.Unlock()
if arr.Host == "" || arr.Token == "" || arr.Name == "" {
return
}
as.Arrs[arr.Name] = arr
s.Arrs[arr.Name] = arr
}
func (as *Storage) Get(name string) *Arr {
as.mu.Lock()
defer as.mu.Unlock()
return as.Arrs[name]
func (s *Storage) Get(name string) *Arr {
s.mu.Lock()
defer s.mu.Unlock()
return s.Arrs[name]
}
func (as *Storage) GetAll() []*Arr {
as.mu.Lock()
defer as.mu.Unlock()
arrs := make([]*Arr, 0, len(as.Arrs))
for _, arr := range as.Arrs {
if arr.Host != "" && arr.Token != "" {
arrs = append(arrs, arr)
}
func (s *Storage) GetAll() []*Arr {
s.mu.Lock()
defer s.mu.Unlock()
arrs := make([]*Arr, 0, len(s.Arrs))
for _, arr := range s.Arrs {
arrs = append(arrs, arr)
}
return arrs
}
func (as *Storage) Clear() {
as.mu.Lock()
defer as.mu.Unlock()
as.Arrs = make(map[string]*Arr)
}
func (as *Storage) StartSchedule(ctx context.Context) error {
func (s *Storage) StartWorker(ctx context.Context) error {
ticker := time.NewTicker(10 * time.Second)
select {
case <-ticker.C:
as.cleanupArrsQueue()
s.cleanupArrsQueue()
case <-ctx.Done():
ticker.Stop()
return nil
@@ -201,9 +205,9 @@ func (as *Storage) StartSchedule(ctx context.Context) error {
return nil
}
func (as *Storage) cleanupArrsQueue() {
func (s *Storage) cleanupArrsQueue() {
arrs := make([]*Arr, 0)
for _, arr := range as.Arrs {
for _, arr := range s.Arrs {
if !arr.Cleanup {
continue
}
@@ -212,26 +216,18 @@ func (as *Storage) cleanupArrsQueue() {
if len(arrs) > 0 {
for _, arr := range arrs {
if err := arr.CleanupQueue(); err != nil {
as.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name)
s.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name)
}
}
}
}
func (a *Arr) Refresh() error {
func (a *Arr) Refresh() {
payload := struct {
Name string `json:"name"`
}{
Name: "RefreshMonitoredDownloads",
}
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
if err == nil && resp != nil {
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
if statusOk {
return nil
}
}
return fmt.Errorf("failed to refresh: %v", err)
_, _ = a.Request(http.MethodPost, "api/v3/command", payload)
}

View File

@@ -105,6 +105,7 @@ func (a *Arr) GetMedia(mediaId string) ([]Content, error) {
Id: d.Id,
EpisodeId: eId,
SeasonNumber: file.SeasonNumber,
Size: file.Size,
})
}
if len(files) == 0 {
@@ -148,6 +149,7 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) {
FileId: movie.MovieFile.Id,
Id: movie.Id,
Path: movie.MovieFile.Path,
Size: movie.MovieFile.Size,
})
ct.Files = files
contents = append(contents, ct)
@@ -232,6 +234,35 @@ func (a *Arr) searchRadarr(files []ContentFile) error {
}
func (a *Arr) SearchMissing(files []ContentFile) error {
if len(files) == 0 {
return nil
}
return a.batchSearchMissing(files)
}
func (a *Arr) batchSearchMissing(files []ContentFile) error {
if len(files) == 0 {
return nil
}
BatchSize := 50
// Batch search for missing files
if len(files) > BatchSize {
for i := 0; i < len(files); i += BatchSize {
end := i + BatchSize
if end > len(files) {
end = len(files)
}
if err := a.searchMissing(files[i:end]); err != nil {
// continue searching the rest of the files
continue
}
}
return nil
}
return a.searchMissing(files)
}
func (a *Arr) searchMissing(files []ContentFile) error {
switch a.Type {
case Sonarr:
return a.searchSonarr(files)
@@ -243,6 +274,28 @@ func (a *Arr) SearchMissing(files []ContentFile) error {
}
func (a *Arr) DeleteFiles(files []ContentFile) error {
if len(files) == 0 {
return nil
}
BatchSize := 50
// Batch delete files
if len(files) > BatchSize {
for i := 0; i < len(files); i += BatchSize {
end := i + BatchSize
if end > len(files) {
end = len(files)
}
if err := a.batchDeleteFiles(files[i:end]); err != nil {
// continue deleting the rest of the files
continue
}
}
return nil
}
return a.batchDeleteFiles(files)
}
func (a *Arr) batchDeleteFiles(files []ContentFile) error {
ids := make([]int, 0)
for _, f := range files {
ids = append(ids, f.FileId)

View File

@@ -133,7 +133,7 @@ func (a *Arr) CleanupQueue() error {
messages := q.StatusMessages
if len(messages) > 0 {
for _, m := range messages {
if strings.Contains(strings.Join(m.Messages, " "), "No files found are eligible for import in") {
if strings.Contains(strings.Join(m.Messages, " "), "No files found are eligible") {
isMessedUp = true
break
}

View File

@@ -205,5 +205,4 @@ func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, e
}
defer resp.Body.Close()
return resp.Body, nil
}

View File

@@ -11,6 +11,7 @@ type Movie struct {
RelativePath string `json:"relativePath"`
Path string `json:"path"`
Id int `json:"id"`
Size int64 `json:"size"`
} `json:"movieFile"`
Id int `json:"id"`
}
@@ -25,6 +26,8 @@ type ContentFile struct {
IsSymlink bool `json:"isSymlink"`
IsBroken bool `json:"isBroken"`
SeasonNumber int `json:"seasonNumber"`
Processed bool `json:"processed"`
Size int64 `json:"size"`
}
func (file *ContentFile) Delete() {
@@ -44,4 +47,5 @@ type seriesFile struct {
SeasonNumber int `json:"seasonNumber"`
Path string `json:"path"`
Id int `json:"id"`
Size int64 `json:"size"`
}

View File

@@ -0,0 +1,119 @@
package account
import (
"fmt"
"net/http"
"sync/atomic"
"github.com/puzpuzpuz/xsync/v4"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type Account struct {
Debrid string `json:"debrid"` // The debrid service name, e.g. "realdebrid"
links *xsync.Map[string, types.DownloadLink] // key is the sliced file link
Index int `json:"index"` // The index of the account in the config
Disabled atomic.Bool `json:"disabled"`
Token string `json:"token"`
TrafficUsed atomic.Int64 `json:"traffic_used"` // Traffic used in bytes
Username string `json:"username"` // Username for the account
httpClient *request.Client
// Account reactivation tracking
DisableCount atomic.Int32 `json:"disable_count"`
}
func (a *Account) Equals(other *Account) bool {
if other == nil {
return false
}
return a.Token == other.Token && a.Debrid == other.Debrid
}
func (a *Account) Client() *request.Client {
return a.httpClient
}
// slice download link
func (a *Account) sliceFileLink(fileLink string) string {
if a.Debrid != "realdebrid" {
return fileLink
}
if len(fileLink) < 39 {
return fileLink
}
return fileLink[0:39]
}
func (a *Account) GetDownloadLink(fileLink string) (types.DownloadLink, error) {
slicedLink := a.sliceFileLink(fileLink)
dl, ok := a.links.Load(slicedLink)
if !ok {
return types.DownloadLink{}, types.ErrDownloadLinkNotFound
}
return dl, nil
}
func (a *Account) StoreDownloadLink(dl types.DownloadLink) {
slicedLink := a.sliceFileLink(dl.Link)
a.links.Store(slicedLink, dl)
}
func (a *Account) DeleteDownloadLink(fileLink string) {
slicedLink := a.sliceFileLink(fileLink)
a.links.Delete(slicedLink)
}
func (a *Account) ClearDownloadLinks() {
a.links.Clear()
}
func (a *Account) DownloadLinksCount() int {
return a.links.Size()
}
func (a *Account) StoreDownloadLinks(dls map[string]*types.DownloadLink) {
for _, dl := range dls {
a.StoreDownloadLink(*dl)
}
}
// MarkDisabled marks the account as disabled and increments the disable count
func (a *Account) MarkDisabled() {
a.Disabled.Store(true)
a.DisableCount.Add(1)
}
func (a *Account) Reset() {
a.DisableCount.Store(0)
a.Disabled.Store(false)
}
func (a *Account) CheckBandwidth() error {
// Get a one of the download links to check if the account is still valid
downloadLink := ""
a.links.Range(func(key string, dl types.DownloadLink) bool {
if dl.DownloadLink != "" {
downloadLink = dl.DownloadLink
return false
}
return true
})
if downloadLink == "" {
return fmt.Errorf("no download link found")
}
// Let's check the download link status
req, err := http.NewRequest(http.MethodGet, downloadLink, nil)
if err != nil {
return err
}
// Use a simple client
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return fmt.Errorf("account check failed with status code %d", resp.StatusCode)
}
return nil
}

View File

@@ -0,0 +1,239 @@
package account
import (
"fmt"
"slices"
"sync/atomic"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"go.uber.org/ratelimit"
)
const (
MaxDisableCount = 3
)
type Manager struct {
debrid string
current atomic.Pointer[Account]
accounts *xsync.Map[string, *Account]
logger zerolog.Logger
}
func NewManager(debridConf config.Debrid, downloadRL ratelimit.Limiter, logger zerolog.Logger) *Manager {
m := &Manager{
debrid: debridConf.Name,
accounts: xsync.NewMap[string, *Account](),
logger: logger,
}
var firstAccount *Account
for idx, token := range debridConf.DownloadAPIKeys {
if token == "" {
continue
}
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", token),
}
account := &Account{
Debrid: debridConf.Name,
Token: token,
Index: idx,
links: xsync.NewMap[string, types.DownloadLink](),
httpClient: request.New(
request.WithRateLimiter(downloadRL),
request.WithLogger(logger),
request.WithHeaders(headers),
request.WithMaxRetries(3),
request.WithRetryableStatus(429, 447, 502),
request.WithProxy(debridConf.Proxy),
),
}
m.accounts.Store(token, account)
if firstAccount == nil {
firstAccount = account
}
}
m.current.Store(firstAccount)
return m
}
func (m *Manager) Active() []*Account {
activeAccounts := make([]*Account, 0)
m.accounts.Range(func(key string, acc *Account) bool {
if !acc.Disabled.Load() {
activeAccounts = append(activeAccounts, acc)
}
return true
})
slices.SortFunc(activeAccounts, func(i, j *Account) int {
return i.Index - j.Index
})
return activeAccounts
}
func (m *Manager) All() []*Account {
allAccounts := make([]*Account, 0)
m.accounts.Range(func(key string, acc *Account) bool {
allAccounts = append(allAccounts, acc)
return true
})
slices.SortFunc(allAccounts, func(i, j *Account) int {
return i.Index - j.Index
})
return allAccounts
}
func (m *Manager) Current() *Account {
// Fast path - most common case
current := m.current.Load()
if current != nil && !current.Disabled.Load() {
return current
}
// Slow path - find new current account
activeAccounts := m.Active()
if len(activeAccounts) == 0 {
// No active accounts left, try to use disabled ones
m.logger.Warn().Str("debrid", m.debrid).Msg("No active accounts available, all accounts are disabled")
allAccounts := m.All()
if len(allAccounts) == 0 {
m.logger.Error().Str("debrid", m.debrid).Msg("No accounts configured")
m.current.Store(nil)
return nil
}
m.current.Store(allAccounts[0])
return allAccounts[0]
}
newCurrent := activeAccounts[0]
m.current.Store(newCurrent)
return newCurrent
}
func (m *Manager) Disable(account *Account) {
if account == nil {
return
}
account.MarkDisabled()
// If we're disabling the current account, it will be replaced
// on the next Current() call - no need to proactively update
current := m.current.Load()
if current != nil && current.Token == account.Token {
// Optional: immediately find replacement
activeAccounts := m.Active()
if len(activeAccounts) > 0 {
m.current.Store(activeAccounts[0])
} else {
m.current.Store(nil)
}
}
}
func (m *Manager) Reset() {
m.accounts.Range(func(key string, acc *Account) bool {
acc.Reset()
return true
})
// Set current to first active account
activeAccounts := m.Active()
if len(activeAccounts) > 0 {
m.current.Store(activeAccounts[0])
} else {
m.current.Store(nil)
}
}
func (m *Manager) GetAccount(token string) (*Account, error) {
if token == "" {
return nil, fmt.Errorf("token cannot be empty")
}
acc, ok := m.accounts.Load(token)
if !ok {
return nil, fmt.Errorf("account not found for token")
}
return acc, nil
}
func (m *Manager) GetDownloadLink(fileLink string) (types.DownloadLink, error) {
current := m.Current()
if current == nil {
return types.DownloadLink{}, fmt.Errorf("no active account for debrid service %s", m.debrid)
}
return current.GetDownloadLink(fileLink)
}
func (m *Manager) GetAccountFromDownloadLink(downloadLink types.DownloadLink) (*Account, error) {
if downloadLink.Link == "" {
return nil, fmt.Errorf("cannot get account from empty download link")
}
if downloadLink.Token == "" {
return nil, fmt.Errorf("cannot get account from download link without token")
}
return m.GetAccount(downloadLink.Token)
}
func (m *Manager) StoreDownloadLink(downloadLink types.DownloadLink) {
if downloadLink.Link == "" || downloadLink.Token == "" {
return
}
account, err := m.GetAccount(downloadLink.Token)
if err != nil || account == nil {
return
}
account.StoreDownloadLink(downloadLink)
}
func (m *Manager) Stats() []map[string]any {
stats := make([]map[string]any, 0)
for _, acc := range m.All() {
maskedToken := utils.Mask(acc.Token)
accountDetail := map[string]any{
"in_use": acc.Equals(m.Current()),
"order": acc.Index,
"disabled": acc.Disabled.Load(),
"token_masked": maskedToken,
"username": acc.Username,
"traffic_used": acc.TrafficUsed.Load(),
"links_count": acc.DownloadLinksCount(),
"debrid": acc.Debrid,
}
stats = append(stats, accountDetail)
}
return stats
}
func (m *Manager) CheckAndResetBandwidth() {
found := false
m.accounts.Range(func(key string, acc *Account) bool {
if acc.Disabled.Load() && acc.DisableCount.Load() < MaxDisableCount {
if err := acc.CheckBandwidth(); err == nil {
acc.Disabled.Store(false)
found = true
m.logger.Info().Str("debrid", m.debrid).Str("token", utils.Mask(acc.Token)).Msg("Re-activated disabled account")
} else {
m.logger.Debug().Err(err).Str("debrid", m.debrid).Str("token", utils.Mask(acc.Token)).Msg("Account still disabled")
}
}
return true
})
if found {
// If we re-activated any account, reset current to first active
activeAccounts := m.Active()
if len(activeAccounts) > 0 {
m.current.Store(activeAccounts[0])
}
}
}

View File

@@ -0,0 +1,30 @@
package common
import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type Client interface {
SubmitMagnet(tr *types.Torrent) (*types.Torrent, error)
CheckStatus(tr *types.Torrent) (*types.Torrent, error)
GetFileDownloadLinks(tr *types.Torrent) error
GetDownloadLink(tr *types.Torrent, file *types.File) (types.DownloadLink, error)
DeleteTorrent(torrentId string) error
IsAvailable(infohashes []string) map[string]bool
GetDownloadUncached() bool
UpdateTorrent(torrent *types.Torrent) error
GetTorrent(torrentId string) (*types.Torrent, error)
GetTorrents() ([]*types.Torrent, error)
Name() string
Logger() zerolog.Logger
GetDownloadingStatus() []string
RefreshDownloadLinks() error
CheckLink(link string) error
GetMountPath() string
AccountManager() *account.Manager // Returns the active download account/token
GetProfile() (*types.Profile, error)
GetAvailableSlots() (int, error)
SyncAccounts() error // Updates each accounts details(like traffic, username, etc.)
}

369
pkg/debrid/debrid.go Normal file
View File

@@ -0,0 +1,369 @@
package debrid
import (
"cmp"
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/common"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/debridlink"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox"
debridStore "github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/rclone"
"go.uber.org/ratelimit"
)
type Debrid struct {
cache *debridStore.Cache // Could be nil if not using WebDAV
client common.Client // HTTP client for making requests to the debrid service
}
func (de *Debrid) Client() common.Client {
return de.client
}
func (de *Debrid) Cache() *debridStore.Cache {
return de.cache
}
func (de *Debrid) Reset() {
if de.cache != nil {
de.cache.Reset()
}
}
type Storage struct {
debrids map[string]*Debrid
mu sync.RWMutex
lastUsed string
}
func NewStorage(rcManager *rclone.Manager) *Storage {
cfg := config.Get()
_logger := logger.Default()
debrids := make(map[string]*Debrid)
bindAddress := cfg.BindAddress
if bindAddress == "" {
bindAddress = "localhost"
}
webdavUrl := fmt.Sprintf("http://%s:%s%s/webdav", bindAddress, cfg.Port, cfg.URLBase)
for _, dc := range cfg.Debrids {
client, err := createDebridClient(dc)
if err != nil {
_logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client")
continue
}
var (
cache *debridStore.Cache
mounter *rclone.Mount
)
_log := client.Logger()
if dc.UseWebDav {
if cfg.Rclone.Enabled && rcManager != nil {
mounter = rclone.NewMount(dc.Name, dc.RcloneMountPath, webdavUrl, rcManager)
}
cache = debridStore.NewDebridCache(dc, client, mounter)
_log.Info().Msg("Debrid Service started with WebDAV")
} else {
_log.Info().Msg("Debrid Service started")
}
debrids[dc.Name] = &Debrid{
cache: cache,
client: client,
}
}
d := &Storage{
debrids: debrids,
lastUsed: "",
}
return d
}
func (d *Storage) Debrid(name string) *Debrid {
d.mu.RLock()
defer d.mu.RUnlock()
if debrid, exists := d.debrids[name]; exists {
return debrid
}
return nil
}
func (d *Storage) StartWorker(ctx context.Context) error {
if ctx == nil {
ctx = context.Background()
}
// Start syncAccounts worker
go d.syncAccountsWorker(ctx)
// Start bandwidth reset worker
go d.checkBandwidthWorker(ctx)
return nil
}
func (d *Storage) checkBandwidthWorker(ctx context.Context) {
if ctx == nil {
ctx = context.Background()
}
ticker := time.NewTicker(30 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
d.checkAccountBandwidth()
}
}
}()
}
func (d *Storage) checkAccountBandwidth() {
d.mu.Lock()
defer d.mu.Unlock()
for _, debrid := range d.debrids {
if debrid == nil || debrid.client == nil {
continue
}
accountManager := debrid.client.AccountManager()
if accountManager == nil {
continue
}
accountManager.CheckAndResetBandwidth()
}
}
func (d *Storage) syncAccountsWorker(ctx context.Context) {
if ctx == nil {
ctx = context.Background()
}
_ = d.syncAccounts()
ticker := time.NewTicker(5 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
_ = d.syncAccounts()
}
}
}()
}
func (d *Storage) syncAccounts() error {
d.mu.Lock()
defer d.mu.Unlock()
for name, debrid := range d.debrids {
if debrid == nil || debrid.client == nil {
continue
}
_log := debrid.client.Logger()
if err := debrid.client.SyncAccounts(); err != nil {
_log.Error().Err(err).Msgf("Failed to sync account for %s", name)
continue
}
}
return nil
}
func (d *Storage) Debrids() map[string]*Debrid {
d.mu.RLock()
defer d.mu.RUnlock()
debridsCopy := make(map[string]*Debrid)
for name, debrid := range d.debrids {
if debrid != nil {
debridsCopy[name] = debrid
}
}
return debridsCopy
}
func (d *Storage) Client(name string) common.Client {
d.mu.RLock()
defer d.mu.RUnlock()
if client, exists := d.debrids[name]; exists {
return client.client
}
return nil
}
func (d *Storage) Reset() {
d.mu.Lock()
defer d.mu.Unlock()
// Reset all debrid clients and caches
for _, debrid := range d.debrids {
if debrid != nil {
debrid.Reset()
}
}
// Reinitialize the debrids map
d.debrids = make(map[string]*Debrid)
d.lastUsed = ""
}
func (d *Storage) Clients() map[string]common.Client {
d.mu.RLock()
defer d.mu.RUnlock()
clientsCopy := make(map[string]common.Client)
for name, debrid := range d.debrids {
if debrid != nil && debrid.client != nil {
clientsCopy[name] = debrid.client
}
}
return clientsCopy
}
func (d *Storage) Caches() map[string]*debridStore.Cache {
d.mu.RLock()
defer d.mu.RUnlock()
cachesCopy := make(map[string]*debridStore.Cache)
for name, debrid := range d.debrids {
if debrid != nil && debrid.cache != nil {
cachesCopy[name] = debrid.cache
}
}
return cachesCopy
}
func (d *Storage) FilterClients(filter func(common.Client) bool) map[string]common.Client {
d.mu.Lock()
defer d.mu.Unlock()
filteredClients := make(map[string]common.Client)
for name, client := range d.debrids {
if client != nil && filter(client.client) {
filteredClients[name] = client.client
}
}
return filteredClients
}
func createDebridClient(dc config.Debrid) (common.Client, error) {
rateLimits := map[string]ratelimit.Limiter{}
mainRL := request.ParseRateLimit(dc.RateLimit)
repairRL := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
downloadRL := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
rateLimits["main"] = mainRL
rateLimits["repair"] = repairRL
rateLimits["download"] = downloadRL
switch dc.Name {
case "realdebrid":
return realdebrid.New(dc, rateLimits)
case "torbox":
return torbox.New(dc, rateLimits)
case "debridlink":
return debridlink.New(dc, rateLimits)
case "alldebrid":
return alldebrid.New(dc, rateLimits)
default:
return realdebrid.New(dc, rateLimits)
}
}
func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, action string, overrideDownloadUncached bool) (*types.Torrent, error) {
debridTorrent := &types.Torrent{
InfoHash: magnet.InfoHash,
Magnet: magnet,
Name: magnet.Name,
Arr: a,
Size: magnet.Size,
Files: make(map[string]types.File),
}
clients := store.FilterClients(func(c common.Client) bool {
if selectedDebrid != "" && c.Name() != selectedDebrid {
return false
}
return true
})
if len(clients) == 0 {
return nil, fmt.Errorf("no debrid clients available")
}
errs := make([]error, 0, len(clients))
// Override first, arr second, debrid third
if overrideDownloadUncached {
debridTorrent.DownloadUncached = true
} else if a.DownloadUncached != nil {
// Arr cached is set
debridTorrent.DownloadUncached = *a.DownloadUncached
} else {
debridTorrent.DownloadUncached = false
}
for _, db := range clients {
_logger := db.Logger()
_logger.Info().
Str("Debrid", db.Name()).
Str("Arr", a.Name).
Str("Hash", debridTorrent.InfoHash).
Str("Name", debridTorrent.Name).
Str("Action", action).
Msg("Processing torrent")
if !overrideDownloadUncached && a.DownloadUncached == nil {
debridTorrent.DownloadUncached = db.GetDownloadUncached()
}
dbt, err := db.SubmitMagnet(debridTorrent)
if err != nil || dbt == nil || dbt.Id == "" {
errs = append(errs, err)
continue
}
dbt.Arr = a
_logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.Name())
store.lastUsed = db.Name()
torrent, err := db.CheckStatus(dbt)
if err != nil && torrent != nil && torrent.Id != "" {
// Delete the torrent if it was not downloaded
go func(id string) {
_ = db.DeleteTorrent(id)
}(torrent.Id)
}
if err != nil {
errs = append(errs, err)
continue
}
if torrent == nil {
errs = append(errs, fmt.Errorf("torrent %s returned nil after checking status", dbt.Name))
continue
}
return torrent, nil
}
if len(errs) == 0 {
return nil, fmt.Errorf("failed to process torrent: no clients available")
}
joinedErrors := errors.Join(errs...)
return nil, fmt.Errorf("failed to process torrent: %w", joinedErrors)
}

View File

@@ -1,103 +0,0 @@
package debrid
import (
"fmt"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/alldebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid_link"
"github.com/sirrobot01/decypharr/pkg/debrid/realdebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/torbox"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"strings"
)
func createDebridClient(dc config.Debrid) types.Client {
switch dc.Name {
case "realdebrid":
return realdebrid.New(dc)
case "torbox":
return torbox.New(dc)
case "debridlink":
return debrid_link.New(dc)
case "alldebrid":
return alldebrid.New(dc)
default:
return realdebrid.New(dc)
}
}
func ProcessTorrent(d *Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) {
debridTorrent := &types.Torrent{
InfoHash: magnet.InfoHash,
Magnet: magnet,
Name: magnet.Name,
Arr: a,
Size: magnet.Size,
Files: make(map[string]types.File),
}
errs := make([]error, 0, len(d.Clients))
// Override first, arr second, debrid third
if overrideDownloadUncached {
debridTorrent.DownloadUncached = true
} else if a.DownloadUncached != nil {
// Arr cached is set
debridTorrent.DownloadUncached = *a.DownloadUncached
} else {
debridTorrent.DownloadUncached = false
}
for index, db := range d.Clients {
logger := db.GetLogger()
logger.Info().Str("Debrid", db.GetName()).Str("Hash", debridTorrent.InfoHash).Msg("Processing torrent")
if !overrideDownloadUncached && a.DownloadUncached == nil {
debridTorrent.DownloadUncached = db.GetDownloadUncached()
}
//if db.GetCheckCached() {
// hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
// if !exists || !hash {
// logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name)
// continue
// } else {
// logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
// }
//}
dbt, err := db.SubmitMagnet(debridTorrent)
if err != nil || dbt == nil || dbt.Id == "" {
errs = append(errs, err)
continue
}
dbt.Arr = a
logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.GetName())
d.LastUsed = index
torrent, err := db.CheckStatus(dbt, isSymlink)
if err != nil && torrent != nil && torrent.Id != "" {
// Delete the torrent if it was not downloaded
go func(id string) {
_ = db.DeleteTorrent(id)
}(torrent.Id)
}
return torrent, err
}
if len(errs) == 0 {
return nil, fmt.Errorf("failed to process torrent: no clients available")
}
if len(errs) == 1 {
return nil, fmt.Errorf("failed to process torrent: %w", errs[0])
} else {
errStrings := make([]string, 0, len(errs))
for _, err := range errs {
errStrings = append(errStrings, err.Error())
}
return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", "))
}
}

View File

@@ -1,224 +0,0 @@
package debrid
import (
"errors"
"fmt"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
"time"
)
type linkCache struct {
Id string
link string
accountId string
expiresAt time.Time
}
type downloadLinkCache struct {
data map[string]linkCache
mu sync.Mutex
}
func newDownloadLinkCache() *downloadLinkCache {
return &downloadLinkCache{
data: make(map[string]linkCache),
}
}
func (c *downloadLinkCache) reset() {
c.mu.Lock()
c.data = make(map[string]linkCache)
c.mu.Unlock()
}
func (c *downloadLinkCache) Load(key string) (linkCache, bool) {
c.mu.Lock()
defer c.mu.Unlock()
dl, ok := c.data[key]
return dl, ok
}
func (c *downloadLinkCache) Store(key string, value linkCache) {
c.mu.Lock()
defer c.mu.Unlock()
c.data[key] = value
}
func (c *downloadLinkCache) Delete(key string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.data, key)
}
type downloadLinkRequest struct {
result string
err error
done chan struct{}
}
func newDownloadLinkRequest() *downloadLinkRequest {
return &downloadLinkRequest{
done: make(chan struct{}),
}
}
func (r *downloadLinkRequest) Complete(result string, err error) {
r.result = result
r.err = err
close(r.done)
}
func (r *downloadLinkRequest) Wait() (string, error) {
<-r.done
return r.result, r.err
}
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
// Check link cache
if dl := c.checkDownloadLink(fileLink); dl != "" {
return dl, nil
}
if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight {
// Wait for the other request to complete and use its result
result := req.(*downloadLinkRequest)
return result.Wait()
}
// Create a new request object
req := newDownloadLinkRequest()
c.downloadLinkRequests.Store(fileLink, req)
downloadLink, err := c.fetchDownloadLink(torrentName, filename, fileLink)
// Complete the request and remove it from the map
req.Complete(downloadLink, err)
c.downloadLinkRequests.Delete(fileLink)
return downloadLink, err
}
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (string, error) {
ct := c.GetTorrentByName(torrentName)
if ct == nil {
return "", fmt.Errorf("torrent not found")
}
file := ct.Files[filename]
if file.Link == "" {
// file link is empty, refresh the torrent to get restricted links
ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid
if ct == nil {
return "", fmt.Errorf("failed to refresh torrent")
} else {
file = ct.Files[filename]
}
}
// If file.Link is still empty, return
if file.Link == "" {
// Try to reinsert the torrent?
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return "", fmt.Errorf("failed to reinsert torrent. %w", err)
}
ct = newCt
file = ct.Files[filename]
}
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
if errors.Is(err, request.HosterUnavailableError) {
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return "", fmt.Errorf("failed to reinsert torrent: %w", err)
}
ct = newCt
file = ct.Files[filename]
// Retry getting the download link
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
return "", err
}
if downloadLink == nil {
return "", fmt.Errorf("download link is empty for")
}
c.updateDownloadLink(downloadLink)
return "", nil
} else if errors.Is(err, request.TrafficExceededError) {
// This is likely a fair usage limit error
return "", err
} else {
return "", fmt.Errorf("failed to get download link: %w", err)
}
}
if downloadLink == nil {
return "", fmt.Errorf("download link is empty")
}
c.updateDownloadLink(downloadLink)
return downloadLink.DownloadLink, nil
}
func (c *Cache) GenerateDownloadLinks(t CachedTorrent) {
if err := c.client.GenerateDownloadLinks(t.Torrent); err != nil {
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("Failed to generate download links")
return
}
for _, file := range t.Files {
if file.DownloadLink != nil {
c.updateDownloadLink(file.DownloadLink)
}
}
c.setTorrent(t, nil)
}
func (c *Cache) updateDownloadLink(dl *types.DownloadLink) {
c.downloadLinks.Store(dl.Link, linkCache{
Id: dl.Id,
link: dl.DownloadLink,
expiresAt: time.Now().Add(c.autoExpiresLinksAfterDuration),
accountId: dl.AccountId,
})
}
func (c *Cache) checkDownloadLink(link string) string {
if dl, ok := c.downloadLinks.Load(link); ok {
if dl.expiresAt.After(time.Now()) && !c.IsDownloadLinkInvalid(dl.link) {
return dl.link
}
}
return ""
}
func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
c.invalidDownloadLinks.Store(downloadLink, reason)
// Remove the download api key from active
if reason == "bandwidth_exceeded" {
if dl, ok := c.downloadLinks.Load(link); ok {
if dl.accountId != "" && dl.link == downloadLink {
c.client.DisableAccount(dl.accountId)
}
}
}
c.removeDownloadLink(link)
}
func (c *Cache) removeDownloadLink(link string) {
if dl, ok := c.downloadLinks.Load(link); ok {
// Delete dl from cache
c.downloadLinks.Delete(link)
// Delete dl from debrid
if dl.Id != "" {
_ = c.client.DeleteDownloadLink(dl.Id)
}
}
}
func (c *Cache) IsDownloadLinkInvalid(downloadLink string) bool {
if reason, ok := c.invalidDownloadLinks.Load(downloadLink); ok {
c.logger.Debug().Msgf("Download link %s is invalid: %s", downloadLink, reason)
return true
}
return false
}

View File

@@ -1,61 +0,0 @@
package debrid
import (
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
)
type Engine struct {
Clients map[string]types.Client
clientsMu sync.Mutex
Caches map[string]*Cache
CacheMu sync.Mutex
LastUsed string
}
func NewEngine() *Engine {
cfg := config.Get()
clients := make(map[string]types.Client)
caches := make(map[string]*Cache)
for _, dc := range cfg.Debrids {
client := createDebridClient(dc)
logger := client.GetLogger()
if dc.UseWebDav {
caches[dc.Name] = New(dc, client)
logger.Info().Msg("Debrid Service started with WebDAV")
} else {
logger.Info().Msg("Debrid Service started")
}
clients[dc.Name] = client
}
d := &Engine{
Clients: clients,
LastUsed: "",
Caches: caches,
}
return d
}
func (d *Engine) GetClient(name string) types.Client {
d.clientsMu.Lock()
defer d.clientsMu.Unlock()
return d.Clients[name]
}
func (d *Engine) Reset() {
d.clientsMu.Lock()
d.Clients = make(map[string]types.Client)
d.clientsMu.Unlock()
d.CacheMu.Lock()
d.Caches = make(map[string]*Cache)
d.CacheMu.Unlock()
}
func (d *Engine) GetDebrids() map[string]types.Client {
return d.Clients
}

View File

@@ -1,298 +0,0 @@
package debrid
import (
"fmt"
"os"
"regexp"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
)
const (
filterByInclude string = "include"
filterByExclude string = "exclude"
filterByStartsWith string = "starts_with"
filterByEndsWith string = "ends_with"
filterByNotStartsWith string = "not_starts_with"
filterByNotEndsWith string = "not_ends_with"
filterByRegex string = "regex"
filterByNotRegex string = "not_regex"
filterByExactMatch string = "exact_match"
filterByNotExactMatch string = "not_exact_match"
filterBySizeGT string = "size_gt"
filterBySizeLT string = "size_lt"
filterBLastAdded string = "last_added"
)
type directoryFilter struct {
filterType string
value string
regex *regexp.Regexp // only for regex/not_regex
sizeThreshold int64 // only for size_gt/size_lt
ageThreshold time.Duration // only for last_added
}
type torrentCache struct {
mu sync.Mutex
byID map[string]CachedTorrent
byName map[string]CachedTorrent
listing atomic.Value
folderListing map[string][]os.FileInfo
folderListingMu sync.RWMutex
directoriesFilters map[string][]directoryFilter
sortNeeded atomic.Bool
}
type sortableFile struct {
id string
name string
modTime time.Time
size int64
bad bool
}
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
tc := &torrentCache{
byID: make(map[string]CachedTorrent),
byName: make(map[string]CachedTorrent),
folderListing: make(map[string][]os.FileInfo),
directoriesFilters: dirFilters,
}
tc.sortNeeded.Store(false)
tc.listing.Store(make([]os.FileInfo, 0))
return tc
}
func (tc *torrentCache) reset() {
tc.mu.Lock()
tc.byID = make(map[string]CachedTorrent)
tc.byName = make(map[string]CachedTorrent)
tc.mu.Unlock()
// reset the sorted listing
tc.sortNeeded.Store(false)
tc.listing.Store(make([]os.FileInfo, 0))
// reset any per-folder views
tc.folderListingMu.Lock()
tc.folderListing = make(map[string][]os.FileInfo)
tc.folderListingMu.Unlock()
}
func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) {
tc.mu.Lock()
defer tc.mu.Unlock()
torrent, exists := tc.byID[id]
return torrent, exists
}
func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) {
tc.mu.Lock()
defer tc.mu.Unlock()
torrent, exists := tc.byName[name]
return torrent, exists
}
func (tc *torrentCache) set(name string, torrent, newTorrent CachedTorrent) {
tc.mu.Lock()
// Set the id first
tc.byID[newTorrent.Id] = torrent // This is the unadulterated torrent
tc.byName[name] = newTorrent // This is likely the modified torrent
tc.mu.Unlock()
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) getListing() []os.FileInfo {
// Fast path: if we have a sorted list and no changes since last sort
if !tc.sortNeeded.Load() {
return tc.listing.Load().([]os.FileInfo)
}
// Slow path: need to sort
tc.refreshListing()
return tc.listing.Load().([]os.FileInfo)
}
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
tc.folderListingMu.RLock()
defer tc.folderListingMu.RUnlock()
if folderName == "" {
return tc.getListing()
}
if folder, ok := tc.folderListing[folderName]; ok {
return folder
}
// If folder not found, return empty slice
return []os.FileInfo{}
}
func (tc *torrentCache) refreshListing() {
tc.mu.Lock()
all := make([]sortableFile, 0, len(tc.byName))
for name, t := range tc.byName {
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
}
tc.sortNeeded.Store(false)
tc.mu.Unlock()
sort.Slice(all, func(i, j int) bool {
if all[i].name != all[j].name {
return all[i].name < all[j].name
}
return all[i].modTime.Before(all[j].modTime)
})
wg := sync.WaitGroup{}
wg.Add(1) // for all listing
go func() {
listing := make([]os.FileInfo, len(all))
for i, sf := range all {
listing[i] = &fileInfo{sf.id, sf.name, sf.size, 0755 | os.ModeDir, sf.modTime, true}
}
tc.listing.Store(listing)
}()
wg.Done()
wg.Add(1)
// For __bad__
go func() {
listing := make([]os.FileInfo, 0)
for _, sf := range all {
if sf.bad {
listing = append(listing, &fileInfo{
id: sf.id,
name: fmt.Sprintf("%s || %s", sf.name, sf.id),
size: sf.size,
mode: 0755 | os.ModeDir,
modTime: sf.modTime,
isDir: true,
})
}
}
tc.folderListingMu.Lock()
if len(listing) > 0 {
tc.folderListing["__bad__"] = listing
} else {
delete(tc.folderListing, "__bad__")
}
tc.folderListingMu.Unlock()
}()
wg.Done()
now := time.Now()
wg.Add(len(tc.directoriesFilters)) // for each directory filter
for dir, filters := range tc.directoriesFilters {
go func(dir string, filters []directoryFilter) {
defer wg.Done()
var matched []os.FileInfo
for _, sf := range all {
if tc.torrentMatchDirectory(filters, sf, now) {
matched = append(matched, &fileInfo{
id: sf.id,
name: sf.name, size: sf.size,
mode: 0755 | os.ModeDir, modTime: sf.modTime, isDir: true,
})
}
}
tc.folderListingMu.Lock()
if len(matched) > 0 {
tc.folderListing[dir] = matched
} else {
delete(tc.folderListing, dir)
}
tc.folderListingMu.Unlock()
}(dir, filters)
}
wg.Wait()
}
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
torrentName := strings.ToLower(file.name)
for _, filter := range filters {
matched := false
switch filter.filterType {
case filterByInclude:
matched = strings.Contains(torrentName, filter.value)
case filterByStartsWith:
matched = strings.HasPrefix(torrentName, filter.value)
case filterByEndsWith:
matched = strings.HasSuffix(torrentName, filter.value)
case filterByExactMatch:
matched = torrentName == filter.value
case filterByExclude:
matched = !strings.Contains(torrentName, filter.value)
case filterByNotStartsWith:
matched = !strings.HasPrefix(torrentName, filter.value)
case filterByNotEndsWith:
matched = !strings.HasSuffix(torrentName, filter.value)
case filterByRegex:
matched = filter.regex.MatchString(torrentName)
case filterByNotRegex:
matched = !filter.regex.MatchString(torrentName)
case filterByNotExactMatch:
matched = torrentName != filter.value
case filterBySizeGT:
matched = file.size > filter.sizeThreshold
case filterBySizeLT:
matched = file.size < filter.sizeThreshold
case filterBLastAdded:
matched = file.modTime.After(now.Add(-filter.ageThreshold))
}
if !matched {
return false // All filters must match
}
}
// If we get here, all filters matched
return true
}
func (tc *torrentCache) getAll() map[string]CachedTorrent {
tc.mu.Lock()
defer tc.mu.Unlock()
result := make(map[string]CachedTorrent)
for name, torrent := range tc.byID {
result[name] = torrent
}
return result
}
func (tc *torrentCache) getIdMaps() map[string]struct{} {
tc.mu.Lock()
defer tc.mu.Unlock()
res := make(map[string]struct{}, len(tc.byID))
for id := range tc.byID {
res[id] = struct{}{}
}
return res
}
func (tc *torrentCache) removeId(id string) {
tc.mu.Lock()
defer tc.mu.Unlock()
delete(tc.byID, id)
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) remove(name string) {
tc.mu.Lock()
defer tc.mu.Unlock()
delete(tc.byName, name)
tc.sortNeeded.Store(true)
}

View File

@@ -1 +0,0 @@
package debrid

View File

@@ -3,36 +3,41 @@ package alldebrid
import (
"encoding/json"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"net/http"
gourl "net/url"
"path/filepath"
"strconv"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"go.uber.org/ratelimit"
)
type AllDebrid struct {
Name string
Host string `json:"host"`
APIKey string
accounts map[string]types.Account
DownloadUncached bool
client *request.Client
name string
Host string `json:"host"`
APIKey string
accountsManager *account.Manager
autoExpiresLinksAfter time.Duration
DownloadUncached bool
client *request.Client
Profile *types.Profile `json:"profile"`
MountPath string
logger zerolog.Logger
checkCached bool
addSamples bool
MountPath string
logger zerolog.Logger
checkCached bool
addSamples bool
minimumFreeSlot int
}
func New(dc config.Debrid) *AllDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*AllDebrid, error) {
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
@@ -41,38 +46,35 @@ func New(dc config.Debrid) *AllDebrid {
client := request.New(
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithRateLimiter(rl),
request.WithRateLimiter(ratelimits["main"]),
request.WithProxy(dc.Proxy),
)
accounts := make(map[string]types.Account)
for idx, key := range dc.DownloadAPIKeys {
id := strconv.Itoa(idx)
accounts[id] = types.Account{
Name: key,
ID: id,
Token: key,
}
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
}
return &AllDebrid{
Name: "alldebrid",
Host: "http://api.alldebrid.com/v4.1",
APIKey: dc.APIKey,
accounts: accounts,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
name: "alldebrid",
Host: "http://api.alldebrid.com/v4.1",
APIKey: dc.APIKey,
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
minimumFreeSlot: dc.MinimumFreeSlot,
}, nil
}
func (ad *AllDebrid) GetName() string {
return ad.Name
func (ad *AllDebrid) Name() string {
return ad.name
}
func (ad *AllDebrid) GetLogger() zerolog.Logger {
func (ad *AllDebrid) Logger() zerolog.Logger {
return ad.logger
}
@@ -102,11 +104,12 @@ func (ad *AllDebrid) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error
}
magnets := data.Data.Magnets
if len(magnets) == 0 {
return nil, fmt.Errorf("error adding torrent")
return nil, fmt.Errorf("error adding torrent. No magnets returned")
}
magnet := magnets[0]
torrentId := strconv.Itoa(magnet.ID)
torrent.Id = torrentId
torrent.Added = time.Now().Format(time.RFC3339)
return torrent, nil
}
@@ -186,7 +189,7 @@ func (ad *AllDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
var res TorrentInfoResponse
err = json.Unmarshal(resp, &res)
if err != nil {
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
return nil, err
}
data := res.Data.Magnets
@@ -200,7 +203,7 @@ func (ad *AllDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
OriginalFilename: name,
Files: make(map[string]types.File),
InfoHash: data.Hash,
Debrid: ad.Name,
Debrid: ad.name,
MountPath: ad.MountPath,
Added: time.Unix(data.CompletionDate, 0).Format(time.RFC3339),
}
@@ -228,7 +231,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
var res TorrentInfoResponse
err = json.Unmarshal(resp, &res)
if err != nil {
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
return err
}
data := res.Data.Magnets
@@ -240,7 +243,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
t.OriginalFilename = name
t.Folder = name
t.MountPath = ad.MountPath
t.Debrid = ad.Name
t.Debrid = ad.name
t.Bytes = data.Size
t.Seeders = data.Seeders
t.Added = time.Unix(data.CompletionDate, 0).Format(time.RFC3339)
@@ -256,7 +259,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
return nil
}
func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
func (ad *AllDebrid) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
for {
err := ad.UpdateTorrent(torrent)
@@ -266,13 +269,7 @@ func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types
status := torrent.Status
if status == "downloaded" {
ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
if !isSymlink {
err = ad.GenerateDownloadLinks(torrent)
if err != nil {
return torrent, err
}
}
break
return torrent, nil
} else if utils.Contains(ad.GetDownloadingStatus(), status) {
if !torrent.DownloadUncached {
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
@@ -285,7 +282,6 @@ func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types
}
}
return torrent, nil
}
func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
@@ -298,8 +294,9 @@ func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
return nil
}
func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error {
func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files))
linksCh := make(chan types.DownloadLink, len(t.Files))
errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup
@@ -312,17 +309,15 @@ func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error {
errCh <- err
return
}
linksCh <- link
file.DownloadLink = link
if link != nil {
errCh <- fmt.Errorf("download link is empty")
return
}
filesCh <- file
}(file)
}
go func() {
wg.Wait()
close(filesCh)
close(linksCh)
close(errCh)
}()
files := make(map[string]types.File, len(t.Files))
@@ -330,10 +325,19 @@ func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error {
files[file.Name] = file
}
// Collect download links
links := make(map[string]types.DownloadLink, len(t.Files))
for link := range linksCh {
if link.Empty() {
continue
}
links[link.Link] = link
}
// Check for errors
for err := range errCh {
if err != nil {
return err // Return the first error encountered
return err
}
}
@@ -341,7 +345,7 @@ func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error {
return nil
}
func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
url := fmt.Sprintf("%s/link/unlock", ad.Host)
query := gourl.Values{}
query.Add("link", file.Link)
@@ -349,33 +353,34 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := ad.client.MakeRequest(req)
if err != nil {
return nil, err
return types.DownloadLink{}, err
}
var data DownloadLink
if err = json.Unmarshal(resp, &data); err != nil {
return nil, err
return types.DownloadLink{}, err
}
if data.Error != nil {
return nil, fmt.Errorf("error getting download link: %s", data.Error.Message)
return types.DownloadLink{}, fmt.Errorf("error getting download link: %s", data.Error.Message)
}
link := data.Data.Link
if link == "" {
return nil, fmt.Errorf("download link is empty")
return types.DownloadLink{}, fmt.Errorf("download link is empty")
}
return &types.DownloadLink{
now := time.Now()
dl := types.DownloadLink{
Token: ad.APIKey,
Link: file.Link,
DownloadLink: link,
Id: data.Data.Id,
Size: file.Size,
Filename: file.Name,
Generated: time.Now(),
AccountId: "0",
}, nil
}
func (ad *AllDebrid) GetCheckCached() bool {
return ad.checkCached
Generated: now,
ExpiresAt: now.Add(ad.autoExpiresLinksAfter),
}
// Set the download link in the account
ad.accountsManager.StoreDownloadLink(dl)
return dl, nil
}
func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
@@ -389,7 +394,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
var res TorrentsListResponse
err = json.Unmarshal(resp, &res)
if err != nil {
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
return torrents, err
}
for _, magnet := range res.Data.Magnets {
@@ -402,7 +407,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
OriginalFilename: magnet.Filename,
Files: make(map[string]types.File),
InfoHash: magnet.Hash,
Debrid: ad.Name,
Debrid: ad.name,
MountPath: ad.MountPath,
Added: time.Unix(magnet.CompletionDate, 0).Format(time.RFC3339),
})
@@ -411,8 +416,8 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
return torrents, nil
}
func (ad *AllDebrid) GetDownloads() (map[string]types.DownloadLink, error) {
return nil, nil
func (ad *AllDebrid) RefreshDownloadLinks() error {
return nil
}
func (ad *AllDebrid) GetDownloadingStatus() []string {
@@ -431,12 +436,64 @@ func (ad *AllDebrid) GetMountPath() string {
return ad.MountPath
}
func (ad *AllDebrid) DisableAccount(accountId string) {
func (ad *AllDebrid) GetAvailableSlots() (int, error) {
// This function is a placeholder for AllDebrid
//TODO: Implement the logic to check available slots for AllDebrid
return 0, fmt.Errorf("GetAvailableSlots not implemented for AllDebrid")
}
func (ad *AllDebrid) ResetActiveDownloadKeys() {
func (ad *AllDebrid) GetProfile() (*types.Profile, error) {
if ad.Profile != nil {
return ad.Profile, nil
}
url := fmt.Sprintf("%s/user", ad.Host)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
resp, err := ad.client.MakeRequest(req)
if err != nil {
return nil, err
}
var res UserProfileResponse
err = json.Unmarshal(resp, &res)
if err != nil {
ad.logger.Error().Err(err).Msgf("Error unmarshalling user profile")
return nil, err
}
if res.Status != "success" {
message := "unknown error"
if res.Error != nil {
message = res.Error.Message
}
return nil, fmt.Errorf("error getting user profile: %s", message)
}
userData := res.Data.User
expiration := time.Unix(userData.PremiumUntil, 0)
profile := &types.Profile{
Id: 1,
Name: ad.name,
Username: userData.Username,
Email: userData.Email,
Points: userData.FidelityPoints,
Premium: userData.PremiumUntil,
Expiration: expiration,
}
if userData.IsPremium {
profile.Type = "premium"
} else if userData.IsTrial {
profile.Type = "trial"
} else {
profile.Type = "free"
}
ad.Profile = profile
return profile, nil
}
func (ad *AllDebrid) DeleteDownloadLink(linkId string) error {
func (ad *AllDebrid) AccountManager() *account.Manager {
return ad.accountsManager
}
func (ad *AllDebrid) SyncAccounts() error {
return nil
}

View File

@@ -1,5 +1,10 @@
package alldebrid
import (
"encoding/json"
"fmt"
)
type errorResponse struct {
Code string `json:"code"`
Message string `json:"message"`
@@ -32,6 +37,8 @@ type magnetInfo struct {
Files []MagnetFile `json:"files"`
}
type Magnets []magnetInfo
type TorrentInfoResponse struct {
Status string `json:"status"`
Data struct {
@@ -43,7 +50,7 @@ type TorrentInfoResponse struct {
type TorrentsListResponse struct {
Status string `json:"status"`
Data struct {
Magnets []magnetInfo `json:"magnets"`
Magnets Magnets `json:"magnets"`
} `json:"data"`
Error *errorResponse `json:"error"`
}
@@ -81,3 +88,46 @@ type DownloadLink struct {
} `json:"data"`
Error *errorResponse `json:"error"`
}
// UnmarshalJSON implements custom unmarshaling for Magnets type
// It can handle both an array of magnetInfo objects or a map with string keys.
// If the input is an array, it will be unmarshaled directly into the Magnets slice.
// If the input is a map, it will extract the values and append them to the Magnets slice.
// If the input is neither, it will return an error.
func (m *Magnets) UnmarshalJSON(data []byte) error {
// Try to unmarshal as array
var arr []magnetInfo
if err := json.Unmarshal(data, &arr); err == nil {
*m = arr
return nil
}
// Try to unmarshal as map
var obj map[string]magnetInfo
if err := json.Unmarshal(data, &obj); err == nil {
for _, v := range obj {
*m = append(*m, v)
}
return nil
}
return fmt.Errorf("magnets: unsupported JSON format")
}
type UserProfileResponse struct {
Status string `json:"status"`
Error *errorResponse `json:"error"`
Data struct {
User struct {
Username string `json:"username"`
Email string `json:"email"`
IsPremium bool `json:"isPremium"`
IsSubscribed bool `json:"isSubscribed"`
IsTrial bool `json:"isTrial"`
PremiumUntil int64 `json:"premiumUntil"`
Lang string `json:"lang"`
FidelityPoints int `json:"fidelityPoints"`
LimitedHostersQuotas map[string]int `json:"limitedHostersQuotas"`
Notifications []string `json:"notifications"`
} `json:"user"`
} `json:"data"`
}

View File

@@ -1,41 +1,79 @@
package debrid_link
package debridlink
import (
"bytes"
"encoding/json"
"fmt"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"strconv"
"time"
"go.uber.org/ratelimit"
"net/http"
"strings"
)
type DebridLink struct {
Name string
name string
Host string `json:"host"`
APIKey string
accounts map[string]types.Account
accountsManager *account.Manager
DownloadUncached bool
client *request.Client
autoExpiresLinksAfter time.Duration
MountPath string
logger zerolog.Logger
checkCached bool
addSamples bool
Profile *types.Profile `json:"profile,omitempty"`
}
func (dl *DebridLink) GetName() string {
return dl.Name
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*DebridLink, error) {
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
}
_log := logger.New(dc.Name)
client := request.New(
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithRateLimiter(ratelimits["main"]),
request.WithProxy(dc.Proxy),
)
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
}
return &DebridLink{
name: "debridlink",
Host: "https://debrid-link.com/api/v2",
APIKey: dc.APIKey,
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}, nil
}
func (dl *DebridLink) GetLogger() zerolog.Logger {
func (dl *DebridLink) Name() string {
return dl.name
}
func (dl *DebridLink) Logger() zerolog.Logger {
return dl.logger
}
@@ -68,13 +106,13 @@ func (dl *DebridLink) IsAvailable(hashes []string) map[string]bool {
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := dl.client.MakeRequest(req)
if err != nil {
dl.logger.Info().Msgf("Error checking availability: %v", err)
dl.logger.Error().Err(err).Msgf("Error checking availability")
return result
}
var data AvailableResponse
err = json.Unmarshal(resp, &data)
if err != nil {
dl.logger.Info().Msgf("Error marshalling availability: %v", err)
dl.logger.Error().Err(err).Msgf("Error marshalling availability")
return result
}
if data.Value == nil {
@@ -121,7 +159,7 @@ func (dl *DebridLink) GetTorrent(torrentId string) (*types.Torrent, error) {
Filename: name,
OriginalFilename: name,
MountPath: dl.MountPath,
Debrid: dl.Name,
Debrid: dl.name,
Added: time.Unix(t.Created, 0).Format(time.RFC3339),
}
cfg := config.Get()
@@ -135,14 +173,7 @@ func (dl *DebridLink) GetTorrent(torrentId string) (*types.Torrent, error) {
Name: f.Name,
Size: f.Size,
Path: f.Name,
DownloadLink: &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: time.Now(),
AccountId: "0",
},
Link: f.DownloadURL,
Link: f.DownloadURL,
}
torrent.Files[file.Name] = file
}
@@ -191,6 +222,7 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
t.OriginalFilename = name
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
cfg := config.Get()
now := time.Now()
for _, f := range data.Files {
if !cfg.IsSizeAllowed(f.Size) {
continue
@@ -201,17 +233,21 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
Name: f.Name,
Size: f.Size,
Path: f.Name,
DownloadLink: &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: time.Now(),
AccountId: "0",
},
Link: f.DownloadURL,
Link: f.DownloadURL,
}
link := types.DownloadLink{
Token: dl.APIKey,
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
}
file.DownloadLink = link
t.Files[f.Name] = file
dl.accountsManager.StoreDownloadLink(link)
}
return nil
}
@@ -246,8 +282,9 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
t.Filename = name
t.OriginalFilename = name
t.MountPath = dl.MountPath
t.Debrid = dl.Name
t.Debrid = dl.name
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
now := time.Now()
for _, f := range data.Files {
file := types.File{
TorrentId: t.Id,
@@ -256,22 +293,25 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
Size: f.Size,
Path: f.Name,
Link: f.DownloadURL,
DownloadLink: &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: time.Now(),
AccountId: "0",
},
Generated: time.Now(),
Generated: now,
}
link := types.DownloadLink{
Token: dl.APIKey,
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
}
file.DownloadLink = link
t.Files[f.Name] = file
dl.accountsManager.StoreDownloadLink(link)
}
return t, nil
}
func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
func (dl *DebridLink) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
for {
err := dl.UpdateTorrent(torrent)
if err != nil || torrent == nil {
@@ -280,11 +320,7 @@ func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*type
status := torrent.Status
if status == "downloaded" {
dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
err = dl.GenerateDownloadLinks(torrent)
if err != nil {
return torrent, err
}
break
return torrent, nil
} else if utils.Contains(dl.GetDownloadingStatus(), status) {
if !torrent.DownloadUncached {
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
@@ -297,7 +333,6 @@ func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*type
}
}
return torrent, nil
}
func (dl *DebridLink) DeleteTorrent(torrentId string) error {
@@ -310,69 +345,27 @@ func (dl *DebridLink) DeleteTorrent(torrentId string) error {
return nil
}
func (dl *DebridLink) GenerateDownloadLinks(t *types.Torrent) error {
func (dl *DebridLink) GetFileDownloadLinks(t *types.Torrent) error {
// Download links are already generated
return nil
}
func (dl *DebridLink) GetDownloads() (map[string]types.DownloadLink, error) {
return nil, nil
func (dl *DebridLink) RefreshDownloadLinks() error {
return nil
}
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
return file.DownloadLink, nil
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
return dl.accountsManager.GetDownloadLink(file.Link)
}
func (dl *DebridLink) GetDownloadingStatus() []string {
return []string{"downloading"}
}
func (dl *DebridLink) GetCheckCached() bool {
return dl.checkCached
}
func (dl *DebridLink) GetDownloadUncached() bool {
return dl.DownloadUncached
}
func New(dc config.Debrid) *DebridLink {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
}
_log := logger.New(dc.Name)
client := request.New(
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithRateLimiter(rl),
request.WithProxy(dc.Proxy),
)
accounts := make(map[string]types.Account)
for idx, key := range dc.DownloadAPIKeys {
id := strconv.Itoa(idx)
accounts[id] = types.Account{
Name: key,
ID: id,
Token: key,
}
}
return &DebridLink{
Name: "debridlink",
Host: "https://debrid-link.com/api/v2",
APIKey: dc.APIKey,
accounts: accounts,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
}
func (dl *DebridLink) GetTorrents() ([]*types.Torrent, error) {
page := 0
perPage := 100
@@ -402,7 +395,7 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
var res torrentInfo
err = json.Unmarshal(resp, &res)
if err != nil {
dl.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
dl.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
return torrents, err
}
@@ -424,11 +417,12 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
OriginalFilename: t.Name,
InfoHash: t.HashString,
Files: make(map[string]types.File),
Debrid: dl.Name,
Debrid: dl.name,
MountPath: dl.MountPath,
Added: time.Unix(t.Created, 0).Format(time.RFC3339),
}
cfg := config.Get()
now := time.Now()
for _, f := range t.Files {
if !cfg.IsSizeAllowed(f.Size) {
continue
@@ -439,19 +433,23 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
Name: f.Name,
Size: f.Size,
Path: f.Name,
DownloadLink: &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: time.Now(),
AccountId: "0",
},
Link: f.DownloadURL,
Link: f.DownloadURL,
}
link := types.DownloadLink{
Token: dl.APIKey,
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
}
file.DownloadLink = link
torrent.Files[f.Name] = file
dl.accountsManager.StoreDownloadLink(link)
}
torrents = append(torrents, torrent)
}
return torrents, nil
}
@@ -463,12 +461,60 @@ func (dl *DebridLink) GetMountPath() string {
return dl.MountPath
}
func (dl *DebridLink) DisableAccount(accountId string) {
func (dl *DebridLink) GetAvailableSlots() (int, error) {
//TODO: Implement the logic to check available slots for DebridLink
return 0, fmt.Errorf("GetAvailableSlots not implemented for DebridLink")
}
func (dl *DebridLink) ResetActiveDownloadKeys() {
func (dl *DebridLink) GetProfile() (*types.Profile, error) {
if dl.Profile != nil {
return dl.Profile, nil
}
url := fmt.Sprintf("%s/account/infos", dl.Host)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
resp, err := dl.client.MakeRequest(req)
if err != nil {
return nil, err
}
var res UserInfo
err = json.Unmarshal(resp, &res)
if err != nil {
dl.logger.Error().Err(err).Msgf("Error unmarshalling user info")
return nil, err
}
if !res.Success || res.Value == nil {
return nil, fmt.Errorf("error getting user info")
}
data := *res.Value
expiration := time.Unix(data.PremiumLeft, 0)
profile := &types.Profile{
Id: 1,
Username: data.Username,
Name: dl.name,
Email: data.Email,
Points: data.Points,
Premium: data.PremiumLeft,
Expiration: expiration,
}
if expiration.IsZero() {
profile.Expiration = time.Now().AddDate(1, 0, 0) // Default to 1 year if no expiration
}
if data.PremiumLeft > 0 {
profile.Type = "premium"
} else {
profile.Type = "free"
}
dl.Profile = profile
return profile, nil
}
func (dl *DebridLink) DeleteDownloadLink(linkId string) error {
func (dl *DebridLink) AccountManager() *account.Manager {
return dl.accountsManager
}
func (dl *DebridLink) SyncAccounts() error {
return nil
}

View File

@@ -1,4 +1,4 @@
package debrid_link
package debridlink
type APIResponse[T any] struct {
Success bool `json:"success"`
@@ -43,3 +43,12 @@ type _torrentInfo struct {
type torrentInfo APIResponse[[]_torrentInfo]
type SubmitTorrentInfo APIResponse[_torrentInfo]
type UserInfo APIResponse[struct {
Username string `json:"username"`
Email string `json:"email"`
AccountType int `json:"accountType"`
PremiumLeft int64 `json:"premiumLeft"`
Points int `json:"pts"`
Trafficshare int `json:"trafficshare"`
}]

View File

@@ -0,0 +1 @@
package realdebrid

File diff suppressed because it is too large Load Diff

View File

@@ -139,3 +139,27 @@ type ErrorResponse struct {
Error string `json:"error"`
ErrorCode int `json:"error_code"`
}
type profileResponse struct {
Id int64 `json:"id"`
Username string `json:"username"`
Email string `json:"email"`
Points int `json:"points"`
Locale string `json:"locale"`
Avatar string `json:"avatar"`
Type string `json:"type"`
Premium int64 `json:"premium"`
Expiration time.Time `json:"expiration"`
}
type AvailableSlotsResponse struct {
ActiveSlots int `json:"nb"`
TotalSlots int `json:"limit"`
}
type hostData struct {
Host map[string]int64 `json:"host"`
Bytes int64 `json:"bytes"`
}
type TrafficResponse map[string]hostData

View File

@@ -4,13 +4,6 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/version"
"mime/multipart"
"net/http"
gourl "net/url"
@@ -21,13 +14,25 @@ import (
"strings"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/version"
"go.uber.org/ratelimit"
)
type Torbox struct {
Name string
Host string `json:"host"`
APIKey string
accounts map[string]types.Account
name string
Host string `json:"host"`
APIKey string
accountsManager *account.Manager
autoExpiresLinksAfter time.Duration
DownloadUncached bool
client *request.Client
@@ -37,8 +42,7 @@ type Torbox struct {
addSamples bool
}
func New(dc config.Debrid) *Torbox {
rl := request.ParseRateLimit(dc.RateLimit)
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*Torbox, error) {
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
@@ -47,40 +51,35 @@ func New(dc config.Debrid) *Torbox {
_log := logger.New(dc.Name)
client := request.New(
request.WithHeaders(headers),
request.WithRateLimiter(rl),
request.WithRateLimiter(ratelimits["main"]),
request.WithLogger(_log),
request.WithProxy(dc.Proxy),
)
accounts := make(map[string]types.Account)
for idx, key := range dc.DownloadAPIKeys {
id := strconv.Itoa(idx)
accounts[id] = types.Account{
Name: key,
ID: id,
Token: key,
}
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
}
return &Torbox{
Name: "torbox",
Host: "https://api.torbox.app/v1",
APIKey: dc.APIKey,
accounts: accounts,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: _log,
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
name: "torbox",
Host: "https://api.torbox.app/v1",
APIKey: dc.APIKey,
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client,
MountPath: dc.Folder,
logger: _log,
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}, nil
}
func (tb *Torbox) GetName() string {
return tb.Name
func (tb *Torbox) Name() string {
return tb.name
}
func (tb *Torbox) GetLogger() zerolog.Logger {
func (tb *Torbox) Logger() zerolog.Logger {
return tb.logger
}
@@ -113,13 +112,13 @@ func (tb *Torbox) IsAvailable(hashes []string) map[string]bool {
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req)
if err != nil {
tb.logger.Info().Msgf("Error checking availability: %v", err)
tb.logger.Error().Err(err).Msgf("Error checking availability")
return result
}
var res AvailableResponse
err = json.Unmarshal(resp, &res)
if err != nil {
tb.logger.Info().Msgf("Error marshalling availability: %v", err)
tb.logger.Error().Err(err).Msgf("Error marshalling availability")
return result
}
if res.Data == nil {
@@ -140,6 +139,9 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
payload := &bytes.Buffer{}
writer := multipart.NewWriter(payload)
_ = writer.WriteField("magnet", torrent.Magnet.Link)
if !torrent.DownloadUncached {
_ = writer.WriteField("add_only_if_cached", "true")
}
err := writer.Close()
if err != nil {
return nil, err
@@ -162,12 +164,13 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
torrentId := strconv.Itoa(dt.Id)
torrent.Id = torrentId
torrent.MountPath = tb.MountPath
torrent.Debrid = tb.Name
torrent.Debrid = tb.name
torrent.Added = time.Now().Format(time.RFC3339)
return torrent, nil
}
func getTorboxStatus(status string, finished bool) string {
func (tb *Torbox) getTorboxStatus(status string, finished bool) string {
if finished {
return "downloaded"
}
@@ -175,12 +178,16 @@ func getTorboxStatus(status string, finished bool) string {
"checkingResumeData", "metaDL", "pausedUP", "queuedUP", "checkingUP",
"forcedUP", "allocating", "downloading", "metaDL", "pausedDL",
"queuedDL", "checkingDL", "forcedDL", "checkingResumeData", "moving"}
var determinedStatus string
switch {
case utils.Contains(downloading, status):
return "downloading"
determinedStatus = "downloading"
default:
return "error"
determinedStatus = "error"
}
return determinedStatus
}
func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
@@ -205,39 +212,71 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
Bytes: data.Size,
Folder: data.Name,
Progress: data.Progress * 100,
Status: getTorboxStatus(data.DownloadState, data.DownloadFinished),
Status: tb.getTorboxStatus(data.DownloadState, data.DownloadFinished),
Speed: data.DownloadSpeed,
Seeders: data.Seeds,
Filename: data.Name,
OriginalFilename: data.Name,
MountPath: tb.MountPath,
Debrid: tb.Name,
Debrid: tb.name,
Files: make(map[string]types.File),
Added: data.CreatedAt.Format(time.RFC3339),
}
cfg := config.Get()
totalFiles := 0
skippedSamples := 0
skippedFileType := 0
skippedSize := 0
validFiles := 0
filesWithLinks := 0
for _, f := range data.Files {
totalFiles++
fileName := filepath.Base(f.Name)
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
// Skip sample files
skippedSamples++
continue
}
if !cfg.IsAllowedFile(fileName) {
skippedFileType++
continue
}
if !cfg.IsSizeAllowed(f.Size) {
skippedSize++
continue
}
validFiles++
file := types.File{
TorrentId: t.Id,
Id: strconv.Itoa(f.Id),
Name: fileName,
Size: f.Size,
Path: fileName,
Path: f.Name,
}
// For downloaded torrents, set a placeholder link to indicate file is available
if data.DownloadFinished {
file.Link = fmt.Sprintf("torbox://%s/%d", t.Id, f.Id)
filesWithLinks++
}
t.Files[fileName] = file
}
// Log summary only if there are issues or for debugging
tb.logger.Debug().
Str("torrent_id", t.Id).
Str("torrent_name", t.Name).
Bool("download_finished", data.DownloadFinished).
Str("status", t.Status).
Int("total_files", totalFiles).
Int("valid_files", validFiles).
Int("final_file_count", len(t.Files)).
Msg("Torrent file processing completed")
var cleanPath string
if len(t.Files) > 0 {
cleanPath = path.Clean(data.Files[0].Name)
@@ -246,7 +285,7 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
}
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
t.Debrid = tb.Name
t.Debrid = tb.name
return t, nil
}
@@ -265,24 +304,33 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
}
data := res.Data
name := data.Name
t.Name = name
t.Bytes = data.Size
t.Folder = name
t.Progress = data.Progress * 100
t.Status = getTorboxStatus(data.DownloadState, data.DownloadFinished)
t.Status = tb.getTorboxStatus(data.DownloadState, data.DownloadFinished)
t.Speed = data.DownloadSpeed
t.Seeders = data.Seeds
t.Filename = name
t.OriginalFilename = name
t.MountPath = tb.MountPath
t.Debrid = tb.Name
t.Debrid = tb.name
// Clear existing files map to rebuild it
t.Files = make(map[string]types.File)
cfg := config.Get()
validFiles := 0
filesWithLinks := 0
for _, f := range data.Files {
fileName := filepath.Base(f.Name)
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
// Skip sample files
continue
}
if !cfg.IsAllowedFile(fileName) {
continue
}
@@ -290,6 +338,8 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
if !cfg.IsSizeAllowed(f.Size) {
continue
}
validFiles++
file := types.File{
TorrentId: t.Id,
Id: strconv.Itoa(f.Id),
@@ -297,8 +347,16 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
Size: f.Size,
Path: fileName,
}
// For downloaded torrents, set a placeholder link to indicate file is available
if data.DownloadFinished {
file.Link = fmt.Sprintf("torbox://%s/%s", t.Id, strconv.Itoa(f.Id))
filesWithLinks++
}
t.Files[fileName] = file
}
var cleanPath string
if len(t.Files) > 0 {
cleanPath = path.Clean(data.Files[0].Name)
@@ -307,11 +365,11 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
}
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
t.Debrid = tb.Name
t.Debrid = tb.name
return nil
}
func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
func (tb *Torbox) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
for {
err := tb.UpdateTorrent(torrent)
@@ -321,13 +379,7 @@ func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.To
status := torrent.Status
if status == "downloaded" {
tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
if !isSymlink {
err = tb.GenerateDownloadLinks(torrent)
if err != nil {
return torrent, err
}
}
break
return torrent, nil
} else if utils.Contains(tb.GetDownloadingStatus(), status) {
if !torrent.DownloadUncached {
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
@@ -340,7 +392,6 @@ func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.To
}
}
return torrent, nil
}
func (tb *Torbox) DeleteTorrent(torrentId string) error {
@@ -355,8 +406,9 @@ func (tb *Torbox) DeleteTorrent(torrentId string) error {
return nil
}
func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error {
func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files))
linkCh := make(chan types.DownloadLink)
errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup
@@ -369,13 +421,17 @@ func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error {
errCh <- err
return
}
file.DownloadLink = link
if link.DownloadLink != "" {
linkCh <- link
file.DownloadLink = link
}
filesCh <- file
}()
}
go func() {
wg.Wait()
close(filesCh)
close(linkCh)
close(errCh)
}()
@@ -396,56 +452,182 @@ func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error {
return nil
}
func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
query := gourl.Values{}
query.Add("torrent_id", t.Id)
query.Add("token", tb.APIKey)
query.Add("file_id", file.Id)
url += "?" + query.Encode()
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req)
if err != nil {
return nil, err
tb.logger.Error().
Err(err).
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Failed to make request to Torbox API")
return types.DownloadLink{}, err
}
var data DownloadLinksResponse
if err = json.Unmarshal(resp, &data); err != nil {
return nil, err
tb.logger.Error().
Err(err).
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Failed to unmarshal Torbox API response")
return types.DownloadLink{}, err
}
if data.Data == nil {
return nil, fmt.Errorf("error getting download links")
tb.logger.Error().
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Bool("success", data.Success).
Interface("error", data.Error).
Str("detail", data.Detail).
Msg("Torbox API returned no data")
return types.DownloadLink{}, fmt.Errorf("error getting download links")
}
link := *data.Data
if link == "" {
return nil, fmt.Errorf("error getting download links")
tb.logger.Error().
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Torbox API returned empty download link")
return types.DownloadLink{}, fmt.Errorf("error getting download links")
}
return &types.DownloadLink{
now := time.Now()
dl := types.DownloadLink{
Token: tb.APIKey,
Link: file.Link,
DownloadLink: link,
Id: file.Id,
AccountId: "0",
Generated: time.Now(),
}, nil
Generated: now,
ExpiresAt: now.Add(tb.autoExpiresLinksAfter),
}
tb.accountsManager.StoreDownloadLink(dl)
return dl, nil
}
func (tb *Torbox) GetDownloadingStatus() []string {
return []string{"downloading"}
}
func (tb *Torbox) GetCheckCached() bool {
return tb.checkCached
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
offset := 0
allTorrents := make([]*types.Torrent, 0)
for {
torrents, err := tb.getTorrents(offset)
if err != nil {
break
}
if len(torrents) == 0 {
break
}
allTorrents = append(allTorrents, torrents...)
offset += len(torrents)
}
return allTorrents, nil
}
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
return nil, nil
func (tb *Torbox) getTorrents(offset int) ([]*types.Torrent, error) {
url := fmt.Sprintf("%s/api/torrents/mylist?offset=%d", tb.Host, offset)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req)
if err != nil {
return nil, err
}
var res TorrentsListResponse
err = json.Unmarshal(resp, &res)
if err != nil {
return nil, err
}
if !res.Success || res.Data == nil {
return nil, fmt.Errorf("torbox API error: %v", res.Error)
}
torrents := make([]*types.Torrent, 0, len(*res.Data))
cfg := config.Get()
for _, data := range *res.Data {
t := &types.Torrent{
Id: strconv.Itoa(data.Id),
Name: data.Name,
Bytes: data.Size,
Folder: data.Name,
Progress: data.Progress * 100,
Status: tb.getTorboxStatus(data.DownloadState, data.DownloadFinished),
Speed: data.DownloadSpeed,
Seeders: data.Seeds,
Filename: data.Name,
OriginalFilename: data.Name,
MountPath: tb.MountPath,
Debrid: tb.name,
Files: make(map[string]types.File),
Added: data.CreatedAt.Format(time.RFC3339),
InfoHash: data.Hash,
}
// Process files
for _, f := range data.Files {
fileName := filepath.Base(f.Name)
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
// Skip sample files
continue
}
if !cfg.IsAllowedFile(fileName) {
continue
}
if !cfg.IsSizeAllowed(f.Size) {
continue
}
file := types.File{
TorrentId: t.Id,
Id: strconv.Itoa(f.Id),
Name: fileName,
Size: f.Size,
Path: f.Name,
}
// For downloaded torrents, set a placeholder link to indicate file is available
if data.DownloadFinished {
file.Link = fmt.Sprintf("torbox://%s/%d", t.Id, f.Id)
}
t.Files[fileName] = file
}
// Set original filename based on first file or torrent name
var cleanPath string
if len(t.Files) > 0 {
cleanPath = path.Clean(data.Files[0].Name)
} else {
cleanPath = path.Clean(data.Name)
}
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
torrents = append(torrents, t)
}
return torrents, nil
}
func (tb *Torbox) GetDownloadUncached() bool {
return tb.DownloadUncached
}
func (tb *Torbox) GetDownloads() (map[string]types.DownloadLink, error) {
return nil, nil
func (tb *Torbox) RefreshDownloadLinks() error {
return nil
}
func (tb *Torbox) CheckLink(link string) error {
@@ -456,13 +638,19 @@ func (tb *Torbox) GetMountPath() string {
return tb.MountPath
}
func (tb *Torbox) DisableAccount(accountId string) {
func (tb *Torbox) GetAvailableSlots() (int, error) {
//TODO: Implement the logic to check available slots for Torbox
return 0, fmt.Errorf("not implemented")
}
func (tb *Torbox) ResetActiveDownloadKeys() {
func (tb *Torbox) GetProfile() (*types.Profile, error) {
return nil, nil
}
func (tb *Torbox) DeleteDownloadLink(linkId string) error {
func (tb *Torbox) AccountManager() *account.Manager {
return tb.accountsManager
}
func (tb *Torbox) SyncAccounts() error {
return nil
}

View File

@@ -57,7 +57,7 @@ type torboxInfo struct {
} `json:"files"`
DownloadPath string `json:"download_path"`
InactiveCheck int `json:"inactive_check"`
Availability int `json:"availability"`
Availability float64 `json:"availability"`
DownloadFinished bool `json:"download_finished"`
Tracker interface{} `json:"tracker"`
TotalUploaded int `json:"total_uploaded"`
@@ -73,3 +73,5 @@ type torboxInfo struct {
type InfoResponse APIResponse[torboxInfo]
type DownloadLinksResponse APIResponse[string]
type TorrentsListResponse APIResponse[[]torboxInfo]

View File

@@ -1,800 +0,0 @@
package realdebrid
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"io"
"net/http"
gourl "net/url"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
)
type RealDebrid struct {
Name string
Host string `json:"host"`
APIKey string
currentDownloadKey string
accounts map[string]types.Account
accountsMutex sync.RWMutex
DownloadUncached bool
client *request.Client
downloadClient *request.Client
MountPath string
logger zerolog.Logger
checkCached bool
addSamples bool
}
func New(dc config.Debrid) *RealDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
_log := logger.New(dc.Name)
accounts := make(map[string]types.Account)
currentDownloadKey := dc.DownloadAPIKeys[0]
for idx, key := range dc.DownloadAPIKeys {
id := strconv.Itoa(idx)
accounts[id] = types.Account{
Name: key,
ID: id,
Token: key,
}
}
downloadHeaders := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", currentDownloadKey),
}
return &RealDebrid{
Name: "realdebrid",
Host: "https://api.real-debrid.com/rest/1.0",
APIKey: dc.APIKey,
accounts: accounts,
DownloadUncached: dc.DownloadUncached,
client: request.New(
request.WithHeaders(headers),
request.WithRateLimiter(rl),
request.WithLogger(_log),
request.WithMaxRetries(5),
request.WithRetryableStatus(429, 502),
request.WithProxy(dc.Proxy),
),
downloadClient: request.New(
request.WithHeaders(downloadHeaders),
request.WithLogger(_log),
request.WithMaxRetries(10),
request.WithRetryableStatus(429, 447, 502),
request.WithProxy(dc.Proxy),
),
currentDownloadKey: currentDownloadKey,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
}
func (r *RealDebrid) GetName() string {
return r.Name
}
func (r *RealDebrid) GetLogger() zerolog.Logger {
return r.logger
}
func getSelectedFiles(t *types.Torrent, data torrentInfo) map[string]types.File {
selectedFiles := make([]types.File, 0)
for _, f := range data.Files {
if f.Selected == 1 {
name := filepath.Base(f.Path)
file := types.File{
TorrentId: t.Id,
Name: name,
Path: name,
Size: f.Bytes,
Id: strconv.Itoa(f.ID),
}
selectedFiles = append(selectedFiles, file)
}
}
files := make(map[string]types.File)
for index, f := range selectedFiles {
if index >= len(data.Links) {
break
}
f.Link = data.Links[index]
files[f.Name] = f
}
return files
}
// getTorrentFiles returns a list of torrent files from the torrent info
// validate is used to determine if the files should be validated
// if validate is false, selected files will be returned
func (r *RealDebrid) getTorrentFiles(t *types.Torrent, data torrentInfo) map[string]types.File {
files := make(map[string]types.File)
cfg := config.Get()
idx := 0
for _, f := range data.Files {
name := filepath.Base(f.Path)
if !r.addSamples && utils.IsSampleFile(f.Path) {
// Skip sample files
continue
}
if !cfg.IsAllowedFile(name) {
continue
}
if !cfg.IsSizeAllowed(f.Bytes) {
continue
}
file := types.File{
TorrentId: t.Id,
Name: name,
Path: name,
Size: f.Bytes,
Id: strconv.Itoa(f.ID),
}
files[name] = file
idx++
}
return files
}
func (r *RealDebrid) IsAvailable(hashes []string) map[string]bool {
// Check if the infohashes are available in the local cache
result := make(map[string]bool)
// Divide hashes into groups of 100
for i := 0; i < len(hashes); i += 200 {
end := i + 200
if end > len(hashes) {
end = len(hashes)
}
// Filter out empty strings
validHashes := make([]string, 0, end-i)
for _, hash := range hashes[i:end] {
if hash != "" {
validHashes = append(validHashes, hash)
}
}
// If no valid hashes in this batch, continue to the next batch
if len(validHashes) == 0 {
continue
}
hashStr := strings.Join(validHashes, "/")
url := fmt.Sprintf("%s/torrents/instantAvailability/%s", r.Host, hashStr)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
r.logger.Info().Msgf("Error checking availability: %v", err)
return result
}
var data AvailabilityResponse
err = json.Unmarshal(resp, &data)
if err != nil {
r.logger.Info().Msgf("Error marshalling availability: %v", err)
return result
}
for _, h := range hashes[i:end] {
hosters, exists := data[strings.ToLower(h)]
if exists && len(hosters.Rd) > 0 {
result[h] = true
}
}
}
return result
}
func (r *RealDebrid) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
if t.Magnet.IsTorrent() {
return r.addTorrent(t)
}
return r.addMagnet(t)
}
func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) {
url := fmt.Sprintf("%s/torrents/addTorrent", r.Host)
var data AddMagnetSchema
req, err := http.NewRequest(http.MethodPut, url, bytes.NewReader(t.Magnet.File))
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/x-bittorrent")
resp, err := r.client.MakeRequest(req)
if err != nil {
return nil, err
}
if err = json.Unmarshal(resp, &data); err != nil {
return nil, err
}
t.Id = data.Id
t.Debrid = r.Name
t.MountPath = r.MountPath
return t, nil
}
func (r *RealDebrid) addMagnet(t *types.Torrent) (*types.Torrent, error) {
url := fmt.Sprintf("%s/torrents/addMagnet", r.Host)
payload := gourl.Values{
"magnet": {t.Magnet.Link},
}
var data AddMagnetSchema
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.client.MakeRequest(req)
if err != nil {
return nil, err
}
if err = json.Unmarshal(resp, &data); err != nil {
return nil, err
}
t.Id = data.Id
t.Debrid = r.Name
t.MountPath = r.MountPath
return t, nil
}
func (r *RealDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, torrentId)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("reading response body: %w", err)
}
if resp.StatusCode != http.StatusOK {
if resp.StatusCode == http.StatusNotFound {
return nil, request.TorrentNotFoundError
}
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
}
var data torrentInfo
err = json.Unmarshal(bodyBytes, &data)
if err != nil {
return nil, err
}
t := &types.Torrent{
Id: data.ID,
Name: data.Filename,
Bytes: data.Bytes,
Folder: data.OriginalFilename,
Progress: data.Progress,
Speed: data.Speed,
Seeders: data.Seeders,
Added: data.Added,
Status: data.Status,
Filename: data.Filename,
OriginalFilename: data.OriginalFilename,
Links: data.Links,
Debrid: r.Name,
MountPath: r.MountPath,
}
t.Files = r.getTorrentFiles(t, data) // Get selected files
return t, nil
}
func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("reading response body: %w", err)
}
if resp.StatusCode != http.StatusOK {
if resp.StatusCode == http.StatusNotFound {
return request.TorrentNotFoundError
}
return fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
}
var data torrentInfo
err = json.Unmarshal(bodyBytes, &data)
if err != nil {
return err
}
t.Name = data.Filename
t.Bytes = data.Bytes
t.Folder = data.OriginalFilename
t.Progress = data.Progress
t.Status = data.Status
t.Speed = data.Speed
t.Seeders = data.Seeders
t.Filename = data.Filename
t.OriginalFilename = data.OriginalFilename
t.Links = data.Links
t.MountPath = r.MountPath
t.Debrid = r.Name
t.Added = data.Added
t.Files = getSelectedFiles(t, data) // Get selected files
return nil
}
func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torrent, error) {
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
req, _ := http.NewRequest(http.MethodGet, url, nil)
for {
resp, err := r.client.MakeRequest(req)
if err != nil {
r.logger.Info().Msgf("ERROR Checking file: %v", err)
return t, err
}
var data torrentInfo
if err = json.Unmarshal(resp, &data); err != nil {
return t, err
}
status := data.Status
t.Name = data.Filename // Important because some magnet changes the name
t.Folder = data.OriginalFilename
t.Filename = data.Filename
t.OriginalFilename = data.OriginalFilename
t.Bytes = data.Bytes
t.Progress = data.Progress
t.Speed = data.Speed
t.Seeders = data.Seeders
t.Links = data.Links
t.Status = status
t.Debrid = r.Name
t.MountPath = r.MountPath
if status == "waiting_files_selection" {
t.Files = r.getTorrentFiles(t, data)
if len(t.Files) == 0 {
return t, fmt.Errorf("no video files found")
}
filesId := make([]string, 0)
for _, f := range t.Files {
filesId = append(filesId, f.Id)
}
p := gourl.Values{
"files": {strings.Join(filesId, ",")},
}
payload := strings.NewReader(p.Encode())
req, _ := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, t.Id), payload)
res, err := r.client.Do(req)
if err != nil {
return t, err
}
if res.StatusCode != http.StatusNoContent {
return t, fmt.Errorf("realdebrid API error: Status: %d", res.StatusCode)
}
} else if status == "downloaded" {
t.Files = getSelectedFiles(t, data) // Get selected files
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
if !isSymlink {
err = r.GenerateDownloadLinks(t)
if err != nil {
return t, err
}
}
break
} else if utils.Contains(r.GetDownloadingStatus(), status) {
if !t.DownloadUncached {
return t, fmt.Errorf("torrent: %s not cached", t.Name)
}
return t, nil
} else {
return t, fmt.Errorf("torrent: %s has error: %s", t.Name, status)
}
}
return t, nil
}
func (r *RealDebrid) DeleteTorrent(torrentId string) error {
url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrentId)
req, _ := http.NewRequest(http.MethodDelete, url, nil)
if _, err := r.client.MakeRequest(req); err != nil {
return err
}
r.logger.Info().Msgf("Torrent: %s deleted from RD", torrentId)
return nil
}
func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files))
errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup
wg.Add(len(t.Files))
for _, f := range t.Files {
go func(file types.File) {
defer wg.Done()
link, err := r.GetDownloadLink(t, &file)
if err != nil {
errCh <- err
return
}
file.DownloadLink = link
filesCh <- file
}(f)
}
go func() {
wg.Wait()
close(filesCh)
close(errCh)
}()
// Collect results
files := make(map[string]types.File, len(t.Files))
for file := range filesCh {
files[file.Name] = file
}
// Check for errors
for err := range errCh {
if err != nil {
return err // Return the first error encountered
}
}
t.Files = files
return nil
}
func (r *RealDebrid) CheckLink(link string) error {
url := fmt.Sprintf("%s/unrestrict/check", r.Host)
payload := gourl.Values{
"link": {link},
}
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode == http.StatusNotFound {
return request.HosterUnavailableError // File has been removed
}
return nil
}
func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, error) {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
payload := gourl.Values{
"link": {file.Link},
}
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.downloadClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
// Read the response body to get the error message
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var data ErrorResponse
if err = json.Unmarshal(b, &data); err != nil {
return nil, fmt.Errorf("error unmarshalling %d || %s \n %s", resp.StatusCode, err, string(b))
}
switch data.ErrorCode {
case 19:
return nil, request.HosterUnavailableError // File has been removed
case 23:
return nil, request.TrafficExceededError
case 24:
return nil, request.HosterUnavailableError // Link has been nerfed
case 34:
return nil, request.TrafficExceededError // traffic exceeded
case 35:
return nil, request.HosterUnavailableError
case 36:
return nil, request.TrafficExceededError // traffic exceeded
default:
return nil, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
}
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var data UnrestrictResponse
if err = json.Unmarshal(b, &data); err != nil {
return nil, fmt.Errorf("realdebrid API error: Error unmarshalling response: %w", err)
}
if data.Download == "" {
return nil, fmt.Errorf("realdebrid API error: download link not found")
}
return &types.DownloadLink{
Filename: data.Filename,
Size: data.Filesize,
Link: data.Link,
DownloadLink: data.Download,
Generated: time.Now(),
}, nil
}
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
if r.currentDownloadKey == "" {
// If no download key is set, use the first one
accounts := r.getActiveAccounts()
if len(accounts) < 1 {
// No active download keys. It's likely that the key has reached bandwidth limit
return nil, fmt.Errorf("no active download keys")
}
r.currentDownloadKey = accounts[0].Token
}
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.currentDownloadKey))
downloadLink, err := r._getDownloadLink(file)
retries := 0
if err != nil {
if errors.Is(err, request.TrafficExceededError) {
// Retries generating
retries = 5
} else {
// If the error is not traffic exceeded, return the error
return nil, err
}
}
backOff := 1 * time.Second
for retries > 0 {
downloadLink, err = r._getDownloadLink(file)
if err == nil {
return downloadLink, nil
}
if !errors.Is(err, request.TrafficExceededError) {
return nil, err
}
// Add a delay before retrying
time.Sleep(backOff)
backOff *= 2 // Exponential backoff
}
return downloadLink, nil
}
func (r *RealDebrid) GetCheckCached() bool {
return r.checkCached
}
func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) {
url := fmt.Sprintf("%s/torrents?limit=%d", r.Host, limit)
torrents := make([]*types.Torrent, 0)
if offset > 0 {
url = fmt.Sprintf("%s&offset=%d", url, offset)
}
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.Do(req)
if err != nil {
return 0, torrents, err
}
if resp.StatusCode == http.StatusNoContent {
return 0, torrents, nil
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return 0, torrents, fmt.Errorf("realdebrid API error: %d", resp.StatusCode)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return 0, torrents, err
}
totalItems, _ := strconv.Atoi(resp.Header.Get("X-Total-Count"))
var data []TorrentsResponse
if err = json.Unmarshal(body, &data); err != nil {
return 0, torrents, err
}
filenames := map[string]struct{}{}
for _, t := range data {
if t.Status != "downloaded" {
continue
}
torrents = append(torrents, &types.Torrent{
Id: t.Id,
Name: t.Filename,
Bytes: t.Bytes,
Progress: t.Progress,
Status: t.Status,
Filename: t.Filename,
OriginalFilename: t.Filename,
Links: t.Links,
Files: make(map[string]types.File),
InfoHash: t.Hash,
Debrid: r.Name,
MountPath: r.MountPath,
Added: t.Added.Format(time.RFC3339),
})
filenames[t.Filename] = struct{}{}
}
return totalItems, torrents, nil
}
func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
limit := 5000
// Get first batch and total count
allTorrents := make([]*types.Torrent, 0)
var fetchError error
offset := 0
for {
// Fetch next batch of torrents
_, torrents, err := r.getTorrents(offset, limit)
if err != nil {
fetchError = err
break
}
totalTorrents := len(torrents)
if totalTorrents == 0 {
break
}
allTorrents = append(allTorrents, torrents...)
offset += totalTorrents
}
if fetchError != nil {
return nil, fetchError
}
return allTorrents, nil
}
func (r *RealDebrid) GetDownloads() (map[string]types.DownloadLink, error) {
links := make(map[string]types.DownloadLink)
offset := 0
limit := 1000
accounts := r.getActiveAccounts()
if len(accounts) < 1 {
// No active download keys. It's likely that the key has reached bandwidth limit
return nil, fmt.Errorf("no active download keys")
}
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", accounts[0].Token))
for {
dl, err := r._getDownloads(offset, limit)
if err != nil {
break
}
if len(dl) == 0 {
break
}
for _, d := range dl {
if _, exists := links[d.Link]; exists {
// This is ordered by date, so we can skip the rest
continue
}
links[d.Link] = d
}
offset += len(dl)
}
return links, nil
}
func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink, error) {
url := fmt.Sprintf("%s/downloads?limit=%d", r.Host, limit)
if offset > 0 {
url = fmt.Sprintf("%s&offset=%d", url, offset)
}
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.downloadClient.MakeRequest(req)
if err != nil {
return nil, err
}
var data []DownloadsResponse
if err = json.Unmarshal(resp, &data); err != nil {
return nil, err
}
links := make([]types.DownloadLink, 0)
for _, d := range data {
links = append(links, types.DownloadLink{
Filename: d.Filename,
Size: d.Filesize,
Link: d.Link,
DownloadLink: d.Download,
Generated: d.Generated,
Id: d.Id,
})
}
return links, nil
}
func (r *RealDebrid) GetDownloadingStatus() []string {
return []string{"downloading", "magnet_conversion", "queued", "compressing", "uploading"}
}
func (r *RealDebrid) GetDownloadUncached() bool {
return r.DownloadUncached
}
func (r *RealDebrid) GetMountPath() string {
return r.MountPath
}
func (r *RealDebrid) DisableAccount(accountId string) {
r.accountsMutex.Lock()
defer r.accountsMutex.Unlock()
if len(r.accounts) == 1 {
r.logger.Info().Msgf("Cannot disable last account: %s", accountId)
return
}
r.currentDownloadKey = ""
if value, ok := r.accounts[accountId]; ok {
value.Disabled = true
r.accounts[accountId] = value
r.logger.Info().Msgf("Disabled account Index: %s", value.ID)
}
}
func (r *RealDebrid) ResetActiveDownloadKeys() {
r.accountsMutex.Lock()
defer r.accountsMutex.Unlock()
for key, value := range r.accounts {
value.Disabled = false
r.accounts[key] = value
}
}
func (r *RealDebrid) getActiveAccounts() []types.Account {
r.accountsMutex.RLock()
defer r.accountsMutex.RUnlock()
accounts := make([]types.Account, 0)
for _, value := range r.accounts {
if value.Disabled {
continue
}
accounts = append(accounts, value)
}
// Sort accounts by ID
sort.Slice(accounts, func(i, j int) bool {
return accounts[i].ID < accounts[j].ID
})
return accounts
}
func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
url := fmt.Sprintf("%s/downloads/delete/%s", r.Host, linkId)
req, _ := http.NewRequest(http.MethodDelete, url, nil)
if _, err := r.downloadClient.MakeRequest(req); err != nil {
return err
}
return nil
}

View File

@@ -1,11 +1,13 @@
package debrid
package store
import (
"bufio"
"cmp"
"context"
"crypto/tls"
"errors"
"fmt"
"net/http"
"os"
"path"
"path/filepath"
@@ -16,13 +18,20 @@ import (
"sync/atomic"
"time"
"github.com/puzpuzpuz/xsync/v4"
"github.com/sirrobot01/decypharr/pkg/debrid/common"
"github.com/sirrobot01/decypharr/pkg/rclone"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"encoding/json"
_ "time/tzdata"
"github.com/go-co-op/gocron/v2"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type WebDavFolderNaming string
@@ -68,19 +77,17 @@ type RepairRequest struct {
type Cache struct {
dir string
client types.Client
client common.Client
logger zerolog.Logger
torrents *torrentCache
downloadLinks *downloadLinkCache
invalidDownloadLinks sync.Map
folderNaming WebDavFolderNaming
torrents *torrentCache
folderNaming WebDavFolderNaming
listingDebouncer *utils.Debouncer[bool]
// monitors
repairRequest sync.Map
failedToReinsert sync.Map
downloadLinkRequests sync.Map
invalidDownloadLinks *xsync.Map[string, string]
repairRequest *xsync.Map[string, *reInsertRequest]
failedToReinsert *xsync.Map[string, struct{}]
// repair
repairChan chan RepairRequest
@@ -89,10 +96,9 @@ type Cache struct {
ready chan struct{}
// config
workers int
torrentRefreshInterval string
downloadLinksRefreshInterval string
autoExpiresLinksAfterDuration time.Duration
workers int
torrentRefreshInterval string
downloadLinksRefreshInterval string
// refresh mutex
downloadLinksRefreshMu sync.RWMutex // for refreshing download links
@@ -105,18 +111,34 @@ type Cache struct {
config config.Debrid
customFolders []string
mounter *rclone.Mount
httpClient *http.Client
}
func New(dc config.Debrid, client types.Client) *Cache {
func NewDebridCache(dc config.Debrid, client common.Client, mounter *rclone.Mount) *Cache {
cfg := config.Get()
cet, _ := time.LoadLocation("CET")
cetSc, _ := gocron.NewScheduler(gocron.WithLocation(cet))
scheduler, _ := gocron.NewScheduler(gocron.WithLocation(time.Local))
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
cet, err := time.LoadLocation("CET")
if err != nil {
cet, err = time.LoadLocation("Europe/Berlin") // Fallback to Berlin if CET fails
if err != nil {
cet = time.FixedZone("CET", 1*60*60) // Fallback to a fixed CET zone
}
}
cetSc, err := gocron.NewScheduler(gocron.WithLocation(cet))
if err != nil {
// If we can't create a CET scheduler, fallback to local time
cetSc, _ = gocron.NewScheduler(gocron.WithLocation(time.Local), gocron.WithGlobalJobOptions(
gocron.WithTags("decypharr-"+dc.Name)))
}
scheduler, err := gocron.NewScheduler(
gocron.WithLocation(time.Local),
gocron.WithGlobalJobOptions(
gocron.WithTags("decypharr-"+dc.Name)))
if err != nil {
// If we can't create a local scheduler, fallback to CET
scheduler = cetSc
}
var customFolders []string
dirFilters := map[string][]directoryFilter{}
for name, value := range dc.Directories {
@@ -135,27 +157,43 @@ func New(dc config.Debrid, client types.Client) *Cache {
customFolders = append(customFolders, name)
}
_log := logger.New(fmt.Sprintf("%s-webdav", client.GetName()))
_log := logger.New(fmt.Sprintf("%s-webdav", client.Name()))
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
MaxIdleConns: 10,
MaxIdleConnsPerHost: 2,
}
httpClient := &http.Client{
Transport: transport,
Timeout: 0,
}
c := &Cache{
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
torrents: newTorrentCache(dirFilters),
client: client,
logger: _log,
workers: dc.Workers,
downloadLinks: newDownloadLinkCache(),
torrentRefreshInterval: dc.TorrentsRefreshInterval,
downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval,
folderNaming: WebDavFolderNaming(dc.FolderNaming),
autoExpiresLinksAfterDuration: autoExpiresLinksAfter,
saveSemaphore: make(chan struct{}, 50),
cetScheduler: cetSc,
scheduler: scheduler,
torrents: newTorrentCache(dirFilters),
client: client,
logger: _log,
workers: dc.Workers,
torrentRefreshInterval: dc.TorrentsRefreshInterval,
downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval,
folderNaming: WebDavFolderNaming(dc.FolderNaming),
saveSemaphore: make(chan struct{}, 50),
cetScheduler: cetSc,
scheduler: scheduler,
config: dc,
customFolders: customFolders,
mounter: mounter,
ready: make(chan struct{}),
ready: make(chan struct{}),
httpClient: httpClient,
invalidDownloadLinks: xsync.NewMap[string, string](),
repairRequest: xsync.NewMap[string, *reInsertRequest](),
failedToReinsert: xsync.NewMap[string, struct{}](),
repairChan: make(chan RepairRequest, 100), // Initialize the repair channel, max 100 requests buffered
}
c.listingDebouncer = utils.NewDebouncer[bool](100*time.Millisecond, func(refreshRclone bool) {
@@ -177,6 +215,15 @@ func (c *Cache) StreamWithRclone() bool {
// and before you discard the instance on a restart.
func (c *Cache) Reset() {
// Unmount first
if c.mounter != nil && c.mounter.IsMounted() {
if err := c.mounter.Unmount(); err != nil {
c.logger.Error().Err(err).Msgf("Failed to unmount %s", c.config.Name)
} else {
c.logger.Info().Msgf("Unmounted %s", c.config.Name)
}
}
if err := c.scheduler.StopJobs(); err != nil {
c.logger.Error().Err(err).Msg("Failed to stop scheduler jobs")
}
@@ -189,19 +236,17 @@ func (c *Cache) Reset() {
c.listingDebouncer.Stop()
// Close the repair channel
close(c.repairChan)
if c.repairChan != nil {
close(c.repairChan)
}
// 1. Reset torrent storage
c.torrents.reset()
// 2. Reset download-link cache
c.downloadLinks.reset()
// 3. Clear any sync.Maps
c.invalidDownloadLinks = sync.Map{}
c.repairRequest = sync.Map{}
c.failedToReinsert = sync.Map{}
c.downloadLinkRequests = sync.Map{}
c.invalidDownloadLinks = xsync.NewMap[string, string]()
c.repairRequest = xsync.NewMap[string, *reInsertRequest]()
c.failedToReinsert = xsync.NewMap[string, struct{}]()
// 5. Rebuild the listing debouncer
c.listingDebouncer = utils.NewDebouncer[bool](
@@ -213,6 +258,9 @@ func (c *Cache) Reset() {
// 6. Reset repair channel so the next Start() can spin it up
c.repairChan = make(chan RepairRequest, 100)
// Reset the ready channel
c.ready = make(chan struct{})
}
func (c *Cache) Start(ctx context.Context) error {
@@ -220,31 +268,31 @@ func (c *Cache) Start(ctx context.Context) error {
return fmt.Errorf("failed to create cache directory: %w", err)
}
c.logger.Info().Msgf("Started indexing...")
if err := c.Sync(ctx); err != nil {
return fmt.Errorf("failed to sync cache: %w", err)
}
// Fire the ready channel
close(c.ready)
c.logger.Info().Msgf("Indexing complete, %d torrents loaded", len(c.torrents.getAll()))
// initial download links
go c.refreshDownloadLinks(ctx)
if err := c.StartSchedule(ctx); err != nil {
c.logger.Error().Err(err).Msg("Failed to start cache worker")
}
c.repairChan = make(chan RepairRequest, 100)
go c.repairWorker(ctx)
// Fire the ready channel
close(c.ready)
cfg := config.Get()
name := c.client.GetName()
name := c.client.Name()
addr := cfg.BindAddress + ":" + cfg.Port + cfg.URLBase + "webdav/" + name + "/"
c.logger.Info().Msgf("%s WebDav server running at %s", name, addr)
<-ctx.Done()
c.logger.Info().Msgf("Stopping %s WebDav server", name)
c.Reset()
if c.mounter != nil {
if err := c.mounter.Mount(ctx); err != nil {
c.logger.Error().Err(err).Msgf("Failed to mount %s", c.config.Name)
}
} else {
c.logger.Warn().Msgf("Mounting is disabled for %s", c.config.Name)
}
return nil
}
@@ -307,10 +355,10 @@ func (c *Cache) load(ctx context.Context) (map[string]CachedTorrent, error) {
}
isComplete := true
if len(ct.Files) != 0 {
if len(ct.GetFiles()) != 0 {
// Check if all files are valid, if not, delete the file.json and remove from cache.
fs := make(map[string]types.File, len(ct.Files))
for _, f := range ct.Files {
fs := make(map[string]types.File, len(ct.GetFiles()))
for _, f := range ct.GetFiles() {
if f.Link == "" {
isComplete = false
break
@@ -368,7 +416,7 @@ func (c *Cache) Sync(ctx context.Context) error {
totalTorrents := len(torrents)
c.logger.Info().Msgf("%d torrents found from %s", totalTorrents, c.client.GetName())
c.logger.Info().Msgf("%d torrents found from %s", totalTorrents, c.client.Name())
newTorrents := make([]*types.Torrent, 0)
idStore := make(map[string]struct{}, totalTorrents)
@@ -390,9 +438,11 @@ func (c *Cache) Sync(ctx context.Context) error {
if len(deletedTorrents) > 0 {
c.logger.Info().Msgf("Found %d deleted torrents", len(deletedTorrents))
for _, id := range deletedTorrents {
if _, ok := cachedTorrents[id]; ok {
c.deleteTorrent(id, false) // delete from cache
}
// Remove from cache and debrid service
delete(cachedTorrents, id)
// Remove the json file from disk
c.removeFile(id, false)
}
}
@@ -504,10 +554,10 @@ func (c *Cache) setTorrent(t CachedTorrent, callback func(torrent CachedTorrent)
mergedFiles := mergeFiles(o, updatedTorrent) // Useful for merging files across multiple torrents, while keeping the most recent
updatedTorrent.Files = mergedFiles
}
c.torrents.set(torrentName, t, updatedTorrent)
c.SaveTorrent(t)
c.torrents.set(torrentName, t)
go c.SaveTorrent(t)
if callback != nil {
callback(updatedTorrent)
go callback(updatedTorrent)
}
}
@@ -520,7 +570,7 @@ func (c *Cache) setTorrents(torrents map[string]CachedTorrent, callback func())
mergedFiles := mergeFiles(o, updatedTorrent)
updatedTorrent.Files = mergedFiles
}
c.torrents.set(torrentName, t, updatedTorrent)
c.torrents.set(torrentName, t)
}
c.SaveTorrents()
if callback != nil {
@@ -550,6 +600,10 @@ func (c *Cache) GetTorrents() map[string]CachedTorrent {
return c.torrents.getAll()
}
func (c *Cache) TotalTorrents() int {
return c.torrents.getAllCount()
}
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
if torrent, ok := c.torrents.getByName(name); ok {
return &torrent
@@ -557,6 +611,10 @@ func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
return nil
}
func (c *Cache) GetTorrentsName() map[string]CachedTorrent {
return c.torrents.getAllByName()
}
func (c *Cache) GetTorrent(torrentId string) *CachedTorrent {
if torrent, ok := c.torrents.getByID(torrentId); ok {
return &torrent
@@ -665,8 +723,13 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
}
if !isComplete(t.Files) {
c.logger.Debug().Msgf("Torrent %s is still not complete. Triggering a reinsert(disabled)", t.Id)
c.logger.Debug().
Str("torrent_id", t.Id).
Str("torrent_name", t.Name).
Int("total_files", len(t.Files)).
Msg("Torrent still not complete after refresh, marking as bad")
} else {
addedOn, err := time.Parse(time.RFC3339, t.Added)
if err != nil {
addedOn = time.Now()
@@ -683,8 +746,9 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
return nil
}
func (c *Cache) AddTorrent(t *types.Torrent) error {
func (c *Cache) Add(t *types.Torrent) error {
if len(t.Files) == 0 {
c.logger.Warn().Msgf("Torrent %s has no files to add. Refreshing", t.Id)
if err := c.client.UpdateTorrent(t); err != nil {
return fmt.Errorf("failed to update torrent: %w", err)
}
@@ -701,12 +765,12 @@ func (c *Cache) AddTorrent(t *types.Torrent) error {
c.setTorrent(ct, func(tor CachedTorrent) {
c.RefreshListings(true)
})
go c.GenerateDownloadLinks(ct)
go c.GetFileDownloadLinks(ct)
return nil
}
func (c *Cache) GetClient() types.Client {
func (c *Cache) Client() common.Client {
return c.client
}
@@ -744,19 +808,19 @@ func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool {
if torrent, ok := c.torrents.getByID(id); ok {
c.torrents.removeId(id) // Delete id from cache
defer func() {
c.removeFromDB(id)
c.removeFile(id, false)
if removeFromDebrid {
_ = c.client.DeleteTorrent(id) // Skip error handling, we don't care if it fails
}
}() // defer delete from debrid
torrentName := torrent.Name
torrentName := c.GetTorrentFolder(torrent.Torrent)
if t, ok := c.torrents.getByName(torrentName); ok {
newFiles := map[string]types.File{}
newId := ""
for _, file := range t.Files {
for _, file := range t.GetFiles() {
if file.TorrentId != "" && file.TorrentId != id {
if newId == "" && file.TorrentId != "" {
newId = file.TorrentId
@@ -787,7 +851,7 @@ func (c *Cache) DeleteTorrents(ids []string) {
c.listingDebouncer.Call(true)
}
func (c *Cache) removeFromDB(torrentId string) {
func (c *Cache) removeFile(torrentId string, moveToTrash bool) {
// Moves the torrent file to the trash
filePath := filepath.Join(c.dir, torrentId+".json")
@@ -796,6 +860,14 @@ func (c *Cache) removeFromDB(torrentId string) {
return
}
if !moveToTrash {
// If not moving to trash, delete the file directly
if err := os.Remove(filePath); err != nil {
c.logger.Error().Err(err).Msgf("Failed to remove file: %s", filePath)
return
}
return
}
// Move the file to the trash
trashPath := filepath.Join(c.dir, "trash", torrentId+".json")
if err := os.MkdirAll(filepath.Dir(trashPath), 0755); err != nil {
@@ -815,6 +887,44 @@ func (c *Cache) OnRemove(torrentId string) {
}
}
func (c *Cache) GetLogger() zerolog.Logger {
// RemoveFile removes a file from the torrent cache
// TODO sends a re-insert that removes the file from debrid
func (c *Cache) RemoveFile(torrentId string, filename string) error {
c.logger.Debug().Str("torrent_id", torrentId).Msgf("Removing file %s", filename)
torrent, ok := c.torrents.getByID(torrentId)
if !ok {
return fmt.Errorf("torrent %s not found", torrentId)
}
file, ok := torrent.GetFile(filename)
if !ok {
return fmt.Errorf("file %s not found in torrent %s", filename, torrentId)
}
file.Deleted = true
torrent.Files[filename] = file
// If the torrent has no files left, delete it
if len(torrent.GetFiles()) == 0 {
c.logger.Debug().Msgf("Torrent %s has no files left, deleting it", torrentId)
if err := c.DeleteTorrent(torrentId); err != nil {
return fmt.Errorf("failed to delete torrent %s: %w", torrentId, err)
}
return nil
}
c.setTorrent(torrent, func(torrent CachedTorrent) {
c.listingDebouncer.Call(true)
}) // Update the torrent in the cache
return nil
}
func (c *Cache) Logger() zerolog.Logger {
return c.logger
}
func (c *Cache) GetConfig() config.Debrid {
return c.config
}
func (c *Cache) Download(req *http.Request) (*http.Response, error) {
return c.httpClient.Do(req)
}

View File

@@ -0,0 +1,192 @@
package store
import (
"errors"
"fmt"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type downloadLinkRequest struct {
result string
err error
done chan struct{}
}
func newDownloadLinkRequest() *downloadLinkRequest {
return &downloadLinkRequest{
done: make(chan struct{}),
}
}
func (r *downloadLinkRequest) Complete(result string, err error) {
r.result = result
r.err = err
close(r.done)
}
func (r *downloadLinkRequest) Wait() (string, error) {
<-r.done
return r.result, r.err
}
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (types.DownloadLink, error) {
// Check link cache
if dl, err := c.checkDownloadLink(fileLink); err == nil && !dl.Empty() {
return dl, nil
}
dl, err := c.fetchDownloadLink(torrentName, filename, fileLink)
if err != nil {
return types.DownloadLink{}, err
}
if dl.Empty() {
err = fmt.Errorf("download link is empty for %s in torrent %s", filename, torrentName)
return types.DownloadLink{}, err
}
return dl, err
}
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (types.DownloadLink, error) {
emptyDownloadLink := types.DownloadLink{}
ct := c.GetTorrentByName(torrentName)
if ct == nil {
return emptyDownloadLink, fmt.Errorf("torrent not found")
}
file, ok := ct.GetFile(filename)
if !ok {
return emptyDownloadLink, fmt.Errorf("file %s not found in torrent %s", filename, torrentName)
}
if file.Link == "" {
// file link is empty, refresh the torrent to get restricted links
ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid
if ct == nil {
return emptyDownloadLink, fmt.Errorf("failed to refresh torrent")
} else {
file, ok = ct.GetFile(filename)
if !ok {
return emptyDownloadLink, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName)
}
}
}
// If file.Link is still empty, return
if file.Link == "" {
// Try to reinsert the torrent?
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return emptyDownloadLink, fmt.Errorf("failed to reinsert torrent. %w", err)
}
ct = newCt
file, ok = ct.GetFile(filename)
if !ok {
return emptyDownloadLink, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
}
}
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
if errors.Is(err, utils.HosterUnavailableError) {
c.logger.Trace().
Str("token", utils.Mask(downloadLink.Token)).
Str("filename", filename).
Str("torrent_id", ct.Id).
Msg("Hoster unavailable, attempting to reinsert torrent")
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return emptyDownloadLink, fmt.Errorf("failed to reinsert torrent: %w", err)
}
ct = newCt
file, ok = ct.GetFile(filename)
if !ok {
return emptyDownloadLink, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
}
// Retry getting the download link
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
return emptyDownloadLink, fmt.Errorf("retry failed to get download link: %w", err)
}
if downloadLink.Empty() {
return emptyDownloadLink, fmt.Errorf("download link is empty after retry")
}
return emptyDownloadLink, fmt.Errorf("download link is empty after retry")
} else if errors.Is(err, utils.TrafficExceededError) {
// This is likely a fair usage limit error
return emptyDownloadLink, err
} else {
return emptyDownloadLink, fmt.Errorf("failed to get download link: %w", err)
}
}
if downloadLink.Empty() {
return emptyDownloadLink, fmt.Errorf("download link is empty")
}
return downloadLink, nil
}
func (c *Cache) GetFileDownloadLinks(t CachedTorrent) {
if err := c.client.GetFileDownloadLinks(t.Torrent); err != nil {
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("Failed to generate download links")
return
}
}
func (c *Cache) checkDownloadLink(link string) (types.DownloadLink, error) {
dl, err := c.client.AccountManager().GetDownloadLink(link)
if err != nil {
return dl, err
}
if !c.downloadLinkIsInvalid(dl.DownloadLink) {
return dl, nil
}
return types.DownloadLink{}, fmt.Errorf("download link not found for %s", link)
}
func (c *Cache) MarkDownloadLinkAsInvalid(downloadLink types.DownloadLink, reason string) {
c.invalidDownloadLinks.Store(downloadLink.DownloadLink, reason)
// Remove the download api key from active
if reason == "bandwidth_exceeded" {
// Disable the account
accountManager := c.client.AccountManager()
account, err := accountManager.GetAccount(downloadLink.Token)
if err != nil {
c.logger.Error().Err(err).Str("token", utils.Mask(downloadLink.Token)).Msg("Failed to get account to disable")
return
}
if account == nil {
c.logger.Error().Str("token", utils.Mask(downloadLink.Token)).Msg("Account not found to disable")
return
}
accountManager.Disable(account)
}
}
func (c *Cache) downloadLinkIsInvalid(downloadLink string) bool {
if reason, ok := c.invalidDownloadLinks.Load(downloadLink); ok {
c.logger.Debug().Msgf("Download link %s is invalid: %s", downloadLink, reason)
return true
}
return false
}
func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, error) {
ct := c.GetTorrentByName(torrentName)
if ct == nil {
return nil, fmt.Errorf("torrent not found")
}
file := ct.Files[filename]
return file.ByteRange, nil
}
func (c *Cache) GetTotalActiveDownloadLinks() int {
total := 0
allAccounts := c.client.AccountManager().Active()
for _, acc := range allAccounts {
total += acc.DownloadLinksCount()
}
return total
}

View File

@@ -1,4 +1,4 @@
package debrid
package store
import (
"github.com/sirrobot01/decypharr/pkg/debrid/types"
@@ -19,9 +19,24 @@ func mergeFiles(torrents ...CachedTorrent) map[string]types.File {
})
for _, torrent := range torrents {
for _, file := range torrent.Files {
for _, file := range torrent.GetFiles() {
merged[file.Name] = file
}
}
return merged
}
func (c *Cache) GetIngests() ([]types.IngestData, error) {
torrents := c.GetTorrents()
debridName := c.client.Name()
var ingests []types.IngestData
for _, torrent := range torrents {
ingests = append(ingests, types.IngestData{
Debrid: debridName,
Name: torrent.Filename,
Hash: torrent.InfoHash,
Size: torrent.Bytes,
})
}
return ingests, nil
}

View File

@@ -1,15 +1,16 @@
package debrid
package store
import (
"context"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"io"
"net/http"
"os"
"strings"
"sync"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type fileInfo struct {
@@ -120,83 +121,85 @@ func (c *Cache) refreshTorrents(ctx context.Context) {
close(workChan)
wg.Wait()
c.listingDebouncer.Call(false)
c.listingDebouncer.Call(true)
c.logger.Debug().Msgf("Processed %d new torrents", counter)
}
func (c *Cache) refreshRclone() error {
cfg := c.config
if cfg.RcUrl == "" {
return nil
}
if cfg.RcUrl == "" {
return nil
}
client := &http.Client{
Timeout: 10 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
DisableCompression: false,
MaxIdleConnsPerHost: 5,
},
}
// Create form data
data := ""
dirs := strings.FieldsFunc(cfg.RcRefreshDirs, func(r rune) bool {
return r == ',' || r == '&'
})
if len(dirs) == 0 {
data = "dir=__all__"
} else {
for index, dir := range dirs {
if dir != "" {
if index == 0 {
data += "dir=" + dir
} else {
data += "&dir" + fmt.Sprint(index+1) + "=" + dir
}
}
}
dirs = []string{"__all__"}
}
if c.mounter != nil {
return c.mounter.RefreshDir(dirs)
} else {
return c.refreshRcloneWithRC(dirs)
}
}
sendRequest := func(endpoint string) error {
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", cfg.RcUrl, endpoint), strings.NewReader(data))
if err != nil {
return err
}
func (c *Cache) refreshRcloneWithRC(dirs []string) error {
cfg := c.config
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if cfg.RcUser != "" && cfg.RcPass != "" {
req.SetBasicAuth(cfg.RcUser, cfg.RcPass)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
}
_, _ = io.Copy(io.Discard, resp.Body)
if cfg.RcUrl == "" {
return nil
}
if err := sendRequest("vfs/forget"); err != nil {
return err
client := http.DefaultClient
// Create form data
data := c.buildRcloneRequestData(dirs)
if err := c.sendRcloneRequest(client, "vfs/forget", data); err != nil {
c.logger.Error().Err(err).Msg("Failed to send rclone vfs/forget request")
}
if err := sendRequest("vfs/refresh"); err != nil {
if err := c.sendRcloneRequest(client, "vfs/refresh", data); err != nil {
c.logger.Error().Err(err).Msg("Failed to send rclone vfs/refresh request")
}
return nil
}
func (c *Cache) buildRcloneRequestData(dirs []string) string {
var data strings.Builder
for index, dir := range dirs {
if dir != "" {
if index == 0 {
data.WriteString("dir=" + dir)
} else {
data.WriteString("&dir" + fmt.Sprint(index+1) + "=" + dir)
}
}
}
return data.String()
}
func (c *Cache) sendRcloneRequest(client *http.Client, endpoint, data string) error {
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", c.config.RcUrl, endpoint), strings.NewReader(data))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if c.config.RcUser != "" && c.config.RcPass != "" {
req.SetBasicAuth(c.config.RcUser, c.config.RcPass)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
}
_, _ = io.Copy(io.Discard, resp.Body)
return nil
}
@@ -241,27 +244,10 @@ func (c *Cache) refreshDownloadLinks(ctx context.Context) {
}
defer c.downloadLinksRefreshMu.Unlock()
downloadLinks, err := c.client.GetDownloads()
if err != nil {
if err := c.client.RefreshDownloadLinks(); err != nil {
c.logger.Error().Err(err).Msg("Failed to get download links")
return
}
for k, v := range downloadLinks {
// if link is generated in the last 24 hours, add it to cache
timeSince := time.Since(v.Generated)
if timeSince < c.autoExpiresLinksAfterDuration {
c.downloadLinks.Store(k, linkCache{
Id: v.Id,
accountId: v.AccountId,
link: v.DownloadLink,
expiresAt: v.Generated.Add(c.autoExpiresLinksAfterDuration - timeSince),
})
} else {
c.downloadLinks.Delete(k)
}
}
c.logger.Trace().Msgf("Refreshed %d download links", len(downloadLinks))
c.logger.Debug().Msgf("Refreshed download links")
}

View File

@@ -1,14 +1,16 @@
package debrid
package store
import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
"time"
"github.com/puzpuzpuz/xsync/v4"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type reInsertRequest struct {
@@ -59,11 +61,12 @@ func (c *Cache) markAsSuccessfullyReinserted(torrentId string) {
}
}
func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool {
// Check torrent files
isBroken := false
// GetBrokenFiles checks the files in the torrent for broken links.
// It also attempts to reinsert the torrent if any files are broken.
func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
files := make(map[string]types.File)
repairStrategy := config.Get().Repair.Strategy
brokenFiles := make([]string, 0)
if len(filenames) > 0 {
for name, f := range t.Files {
if utils.Contains(filenames, name) {
@@ -73,8 +76,6 @@ func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool {
} else {
files = t.Files
}
// Check empty links
for _, f := range files {
// Check if file is missing
if f.Link == "" {
@@ -83,44 +84,92 @@ func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool {
t = newT
} else {
c.logger.Error().Str("torrentId", t.Torrent.Id).Msg("Failed to refresh torrent")
return true
return filenames // Return original filenames if refresh fails(torrent is somehow botched)
}
}
}
if t.Torrent == nil {
c.logger.Error().Str("torrentId", t.Torrent.Id).Msg("Failed to refresh torrent")
return true
return filenames // Return original filenames if refresh fails(torrent is somehow botched)
}
files = t.Files
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Use a mutex to protect brokenFiles slice and torrent-wide failure flag
var mu sync.Mutex
torrentWideFailed := false
wg.Add(len(files))
for _, f := range files {
// Check if file link is still missing
if f.Link == "" {
isBroken = true
break
} else {
// Check if file.Link not in the downloadLink Cache
go func(f types.File) {
defer wg.Done()
select {
case <-ctx.Done():
return
default:
}
if f.Link == "" {
mu.Lock()
if repairStrategy == config.RepairStrategyPerTorrent {
torrentWideFailed = true
mu.Unlock()
cancel() // Signal all other goroutines to stop
return
} else {
// per_file strategy - only mark this file as broken
brokenFiles = append(brokenFiles, f.Name)
}
mu.Unlock()
return
}
if err := c.client.CheckLink(f.Link); err != nil {
if errors.Is(err, request.HosterUnavailableError) {
isBroken = true
break
if errors.Is(err, utils.HosterUnavailableError) {
mu.Lock()
if repairStrategy == config.RepairStrategyPerTorrent {
torrentWideFailed = true
mu.Unlock()
cancel() // Signal all other goroutines to stop
return
} else {
// per_file strategy - only mark this file as broken
brokenFiles = append(brokenFiles, f.Name)
}
mu.Unlock()
}
}
}(f)
}
wg.Wait()
// Handle the result based on strategy
if repairStrategy == config.RepairStrategyPerTorrent && torrentWideFailed {
// Mark all files as broken for per_torrent strategy
for _, f := range files {
brokenFiles = append(brokenFiles, f.Name)
}
}
// For per_file strategy, brokenFiles already contains only the broken ones
// Try to reinsert the torrent if it's broken
if isBroken && t.Torrent != nil {
if len(brokenFiles) > 0 && t.Torrent != nil {
// Check if the torrent is already in progress
if _, err := c.reInsertTorrent(t); err != nil {
c.logger.Error().Err(err).Str("torrentId", t.Torrent.Id).Msg("Failed to reinsert torrent")
return true
return brokenFiles // Return broken files if reinsert fails
}
return false
return nil // Return nil if the torrent was successfully reinserted
}
return isBroken
return brokenFiles
}
func (c *Cache) repairWorker(ctx context.Context) {
@@ -172,8 +221,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
if _, ok := c.failedToReinsert.Load(oldID); ok {
return ct, fmt.Errorf("can't retry re-insert for %s", torrent.Id)
}
if reqI, inFlight := c.repairRequest.Load(oldID); inFlight {
req := reqI.(*reInsertRequest)
if req, inFlight := c.repairRequest.Load(oldID); inFlight {
c.logger.Debug().Msgf("Waiting for existing reinsert request to complete for torrent %s", oldID)
return req.Wait()
}
@@ -187,12 +235,13 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
// Submit the magnet to the debrid service
newTorrent := &types.Torrent{
Name: torrent.Name,
Magnet: utils.ConstructMagnet(torrent.InfoHash, torrent.Name),
InfoHash: torrent.InfoHash,
Size: torrent.Size,
Files: make(map[string]types.File),
Arr: torrent.Arr,
Name: torrent.Name,
Magnet: utils.ConstructMagnet(torrent.InfoHash, torrent.Name),
InfoHash: torrent.InfoHash,
Size: torrent.Size,
Files: make(map[string]types.File),
Arr: torrent.Arr,
DownloadUncached: false,
}
var err error
newTorrent, err = c.client.SubmitMagnet(newTorrent)
@@ -208,14 +257,14 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
return ct, fmt.Errorf("failed to submit magnet: empty torrent")
}
newTorrent.DownloadUncached = false // Set to false, avoid re-downloading
newTorrent, err = c.client.CheckStatus(newTorrent, true)
newTorrent, err = c.client.CheckStatus(newTorrent)
if err != nil {
if newTorrent != nil && newTorrent.Id != "" {
// Delete the torrent if it was not downloaded
_ = c.client.DeleteTorrent(newTorrent.Id)
}
c.markAsFailedToReinsert(oldID)
return ct, err
return ct, fmt.Errorf("failed to check torrent: %w", err)
}
// Update the torrent in the cache
@@ -223,7 +272,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
if err != nil {
addedOn = time.Now()
}
for _, f := range newTorrent.Files {
for _, f := range newTorrent.GetFiles() {
if f.Link == "" {
c.markAsFailedToReinsert(oldID)
return ct, fmt.Errorf("failed to reinsert torrent: empty link")
@@ -248,7 +297,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
}
}
req.Complete(ct, err)
req.Complete(ct, nil)
c.markAsSuccessfullyReinserted(oldID)
c.logger.Debug().Str("torrentId", torrent.Id).Msg("Torrent successfully reinserted")
@@ -256,7 +305,10 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
return ct, nil
}
func (c *Cache) resetInvalidLinks() {
c.invalidDownloadLinks = sync.Map{}
c.client.ResetActiveDownloadKeys() // Reset the active download keys
func (c *Cache) resetInvalidLinks(ctx context.Context) {
c.logger.Debug().Msgf("Resetting accounts")
c.invalidDownloadLinks = xsync.NewMap[string, string]()
c.client.AccountManager().Reset() // Reset the active download keys
// Refresh the download links
c.refreshDownloadLinks(ctx)
}

476
pkg/debrid/store/torrent.go Normal file
View File

@@ -0,0 +1,476 @@
package store
import (
"fmt"
"os"
"regexp"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
)
const (
filterByInclude string = "include"
filterByExclude string = "exclude"
filterByStartsWith string = "starts_with"
filterByEndsWith string = "ends_with"
filterByNotStartsWith string = "not_starts_with"
filterByNotEndsWith string = "not_ends_with"
filterByRegex string = "regex"
filterByNotRegex string = "not_regex"
filterByExactMatch string = "exact_match"
filterByNotExactMatch string = "not_exact_match"
filterBySizeGT string = "size_gt"
filterBySizeLT string = "size_lt"
filterBLastAdded string = "last_added"
)
type directoryFilter struct {
filterType string
value string
regex *regexp.Regexp // only for regex/not_regex
sizeThreshold int64 // only for size_gt/size_lt
ageThreshold time.Duration // only for last_added
}
type folders struct {
sync.RWMutex
listing map[string][]os.FileInfo // folder name to file listing
}
type CachedTorrentEntry struct {
CachedTorrent
deleted bool // Tombstone flag
}
type torrentCache struct {
mu sync.RWMutex
torrents []CachedTorrentEntry // Changed to store entries with tombstone
// Lookup indices
idIndex map[string]int
nameIndex map[string]int
// Compaction tracking
deletedCount atomic.Int32
compactThreshold int // Trigger compaction when deletedCount exceeds this
listing atomic.Value
folders folders
directoriesFilters map[string][]directoryFilter
sortNeeded atomic.Bool
}
type sortableFile struct {
id string
name string
modTime time.Time
size int64
bad bool
}
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
tc := &torrentCache{
torrents: []CachedTorrentEntry{},
idIndex: make(map[string]int),
nameIndex: make(map[string]int),
compactThreshold: 100, // Compact when 100+ deleted entries
folders: folders{
listing: make(map[string][]os.FileInfo),
},
directoriesFilters: dirFilters,
}
tc.sortNeeded.Store(false)
tc.listing.Store(make([]os.FileInfo, 0))
return tc
}
func (tc *torrentCache) reset() {
tc.mu.Lock()
tc.torrents = tc.torrents[:0] // Clear the slice
tc.idIndex = make(map[string]int) // Reset the ID index
tc.nameIndex = make(map[string]int) // Reset the name index
tc.deletedCount.Store(0)
tc.mu.Unlock()
// reset the sorted listing
tc.sortNeeded.Store(false)
tc.listing.Store(make([]os.FileInfo, 0))
// reset any per-folder views
tc.folders.Lock()
tc.folders.listing = make(map[string][]os.FileInfo)
tc.folders.Unlock()
}
func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) {
tc.mu.RLock()
defer tc.mu.RUnlock()
if index, exists := tc.idIndex[id]; exists && index < len(tc.torrents) {
entry := tc.torrents[index]
if !entry.deleted {
return entry.CachedTorrent, true
}
}
return CachedTorrent{}, false
}
func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) {
tc.mu.RLock()
defer tc.mu.RUnlock()
if index, exists := tc.nameIndex[name]; exists && index < len(tc.torrents) {
entry := tc.torrents[index]
if !entry.deleted {
return entry.CachedTorrent, true
}
}
return CachedTorrent{}, false
}
func (tc *torrentCache) set(name string, torrent CachedTorrent) {
tc.mu.Lock()
defer tc.mu.Unlock()
// Check if this torrent already exists (update case)
if existingIndex, exists := tc.idIndex[torrent.Id]; exists && existingIndex < len(tc.torrents) {
if !tc.torrents[existingIndex].deleted {
// Update existing entry
tc.torrents[existingIndex].CachedTorrent = torrent
tc.sortNeeded.Store(true)
return
}
}
// Add new torrent
entry := CachedTorrentEntry{
CachedTorrent: torrent,
deleted: false,
}
tc.torrents = append(tc.torrents, entry)
index := len(tc.torrents) - 1
tc.idIndex[torrent.Id] = index
tc.nameIndex[name] = index
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) removeId(id string) {
tc.mu.Lock()
defer tc.mu.Unlock()
if index, exists := tc.idIndex[id]; exists && index < len(tc.torrents) {
if !tc.torrents[index].deleted {
// Mark as deleted (tombstone)
tc.torrents[index].deleted = true
tc.deletedCount.Add(1)
// Remove from indices
delete(tc.idIndex, id)
// Find and remove from name index
for name, idx := range tc.nameIndex {
if idx == index {
delete(tc.nameIndex, name)
break
}
}
tc.sortNeeded.Store(true)
// Trigger compaction if threshold exceeded
if tc.deletedCount.Load() > int32(tc.compactThreshold) {
go tc.compact()
}
}
}
}
func (tc *torrentCache) remove(name string) {
tc.mu.Lock()
defer tc.mu.Unlock()
if index, exists := tc.nameIndex[name]; exists && index < len(tc.torrents) {
if !tc.torrents[index].deleted {
// Mark as deleted (tombstone)
torrentID := tc.torrents[index].CachedTorrent.Id
tc.torrents[index].deleted = true
tc.deletedCount.Add(1)
// Remove from indices
delete(tc.nameIndex, name)
delete(tc.idIndex, torrentID)
tc.sortNeeded.Store(true)
// Trigger compaction if threshold exceeded
if tc.deletedCount.Load() > int32(tc.compactThreshold) {
go tc.compact()
}
}
}
}
// Compact removes tombstoned entries and rebuilds indices
func (tc *torrentCache) compact() {
tc.mu.Lock()
defer tc.mu.Unlock()
deletedCount := tc.deletedCount.Load()
if deletedCount == 0 {
return // Nothing to compact
}
// Create new slice with only non-deleted entries
newTorrents := make([]CachedTorrentEntry, 0, len(tc.torrents)-int(deletedCount))
newIdIndex := make(map[string]int, len(tc.idIndex))
newNameIndex := make(map[string]int, len(tc.nameIndex))
// Copy non-deleted entries
for oldIndex, entry := range tc.torrents {
if !entry.deleted {
newIndex := len(newTorrents)
newTorrents = append(newTorrents, entry)
// Find the name for this torrent (reverse lookup)
for name, nameIndex := range tc.nameIndex {
if nameIndex == oldIndex {
newNameIndex[name] = newIndex
break
}
}
newIdIndex[entry.CachedTorrent.Id] = newIndex
}
}
// Replace old data with compacted data
tc.torrents = newTorrents
tc.idIndex = newIdIndex
tc.nameIndex = newNameIndex
tc.deletedCount.Store(0)
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) ForceCompact() {
tc.compact()
}
func (tc *torrentCache) GetStats() (total, active, deleted int) {
tc.mu.RLock()
defer tc.mu.RUnlock()
total = len(tc.torrents)
deleted = int(tc.deletedCount.Load())
active = total - deleted
return total, active, deleted
}
func (tc *torrentCache) refreshListing() {
tc.mu.RLock()
all := make([]sortableFile, 0, len(tc.nameIndex))
for name, index := range tc.nameIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
t := tc.torrents[index].CachedTorrent
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
}
}
tc.sortNeeded.Store(false)
tc.mu.RUnlock()
sort.Slice(all, func(i, j int) bool {
if all[i].name != all[j].name {
return all[i].name < all[j].name
}
return all[i].modTime.Before(all[j].modTime)
})
wg := sync.WaitGroup{}
wg.Add(1) // for all listing
go func() {
defer wg.Done()
listing := make([]os.FileInfo, len(all))
for i, sf := range all {
listing[i] = &fileInfo{sf.id, sf.name, sf.size, 0755 | os.ModeDir, sf.modTime, true}
}
tc.listing.Store(listing)
}()
wg.Add(1)
// For __bad__
go func() {
defer wg.Done()
listing := make([]os.FileInfo, 0)
for _, sf := range all {
if sf.bad {
listing = append(listing, &fileInfo{
id: sf.id,
name: fmt.Sprintf("%s || %s", sf.name, sf.id),
size: sf.size,
mode: 0755 | os.ModeDir,
modTime: sf.modTime,
isDir: true,
})
}
}
tc.folders.Lock()
if len(listing) > 0 {
tc.folders.listing["__bad__"] = listing
} else {
delete(tc.folders.listing, "__bad__")
}
tc.folders.Unlock()
}()
now := time.Now()
wg.Add(len(tc.directoriesFilters)) // for each directory filter
for dir, filters := range tc.directoriesFilters {
go func(dir string, filters []directoryFilter) {
defer wg.Done()
var matched []os.FileInfo
for _, sf := range all {
if tc.torrentMatchDirectory(filters, sf, now) {
matched = append(matched, &fileInfo{
id: sf.id,
name: sf.name, size: sf.size,
mode: 0755 | os.ModeDir, modTime: sf.modTime, isDir: true,
})
}
}
tc.folders.Lock()
if len(matched) > 0 {
tc.folders.listing[dir] = matched
} else {
delete(tc.folders.listing, dir)
}
tc.folders.Unlock()
}(dir, filters)
}
wg.Wait()
}
func (tc *torrentCache) getListing() []os.FileInfo {
// Fast path: if we have a sorted list and no changes since last sort
if !tc.sortNeeded.Load() {
return tc.listing.Load().([]os.FileInfo)
}
// Slow path: need to sort
tc.refreshListing()
return tc.listing.Load().([]os.FileInfo)
}
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
tc.folders.RLock()
defer tc.folders.RUnlock()
if folderName == "" {
return tc.getListing()
}
if folder, ok := tc.folders.listing[folderName]; ok {
return folder
}
// If folder not found, return empty slice
return []os.FileInfo{}
}
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
torrentName := strings.ToLower(file.name)
for _, filter := range filters {
matched := false
switch filter.filterType {
case filterByInclude:
matched = strings.Contains(torrentName, filter.value)
case filterByStartsWith:
matched = strings.HasPrefix(torrentName, filter.value)
case filterByEndsWith:
matched = strings.HasSuffix(torrentName, filter.value)
case filterByExactMatch:
matched = torrentName == filter.value
case filterByExclude:
matched = !strings.Contains(torrentName, filter.value)
case filterByNotStartsWith:
matched = !strings.HasPrefix(torrentName, filter.value)
case filterByNotEndsWith:
matched = !strings.HasSuffix(torrentName, filter.value)
case filterByRegex:
matched = filter.regex.MatchString(torrentName)
case filterByNotRegex:
matched = !filter.regex.MatchString(torrentName)
case filterByNotExactMatch:
matched = torrentName != filter.value
case filterBySizeGT:
matched = file.size > filter.sizeThreshold
case filterBySizeLT:
matched = file.size < filter.sizeThreshold
case filterBLastAdded:
matched = file.modTime.After(now.Add(-filter.ageThreshold))
}
if !matched {
return false // All filters must match
}
}
// If we get here, all filters matched
return true
}
func (tc *torrentCache) getAll() map[string]CachedTorrent {
tc.mu.RLock()
defer tc.mu.RUnlock()
result := make(map[string]CachedTorrent)
for _, entry := range tc.torrents {
if !entry.deleted {
result[entry.CachedTorrent.Id] = entry.CachedTorrent
}
}
return result
}
func (tc *torrentCache) getAllCount() int {
tc.mu.RLock()
defer tc.mu.RUnlock()
return len(tc.torrents) - int(tc.deletedCount.Load())
}
func (tc *torrentCache) getAllByName() map[string]CachedTorrent {
tc.mu.RLock()
defer tc.mu.RUnlock()
results := make(map[string]CachedTorrent, len(tc.nameIndex))
for name, index := range tc.nameIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
results[name] = tc.torrents[index].CachedTorrent
}
}
return results
}
func (tc *torrentCache) getIdMaps() map[string]struct{} {
tc.mu.RLock()
defer tc.mu.RUnlock()
res := make(map[string]struct{}, len(tc.idIndex))
for id, index := range tc.idIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
res[id] = struct{}{}
}
}
return res
}

View File

@@ -1,4 +1,4 @@
package debrid
package store
import (
"context"
@@ -6,9 +6,12 @@ import (
"github.com/sirrobot01/decypharr/internal/utils"
)
func (c *Cache) StartSchedule(ctx context.Context) error {
func (c *Cache) StartWorker(ctx context.Context) error {
// For now, we just want to refresh the listing and download links
// Stop any existing jobs before starting new ones
c.scheduler.RemoveByTags("decypharr-%s", c.GetConfig().Name)
// Schedule download link refresh job
if jd, err := utils.ConvertToJobDef(c.downloadLinksRefreshInterval); err != nil {
c.logger.Error().Err(err).Msg("Failed to convert download link refresh interval to job definition")
@@ -45,7 +48,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
} else {
// Schedule the job
if _, err := c.cetScheduler.NewJob(jd, gocron.NewTask(func() {
c.resetInvalidLinks()
c.resetInvalidLinks(ctx)
}), gocron.WithContext(ctx)); err != nil {
c.logger.Error().Err(err).Msg("Failed to create link reset job")
} else {

1
pkg/debrid/store/xml.go Normal file
View File

@@ -0,0 +1 @@
package store

View File

@@ -1,28 +0,0 @@
package types
import (
"github.com/rs/zerolog"
)
type Client interface {
SubmitMagnet(tr *Torrent) (*Torrent, error)
CheckStatus(tr *Torrent, isSymlink bool) (*Torrent, error)
GenerateDownloadLinks(tr *Torrent) error
GetDownloadLink(tr *Torrent, file *File) (*DownloadLink, error)
DeleteTorrent(torrentId string) error
IsAvailable(infohashes []string) map[string]bool
GetCheckCached() bool
GetDownloadUncached() bool
UpdateTorrent(torrent *Torrent) error
GetTorrent(torrentId string) (*Torrent, error)
GetTorrents() ([]*Torrent, error)
GetName() string
GetLogger() zerolog.Logger
GetDownloadingStatus() []string
GetDownloads() (map[string]DownloadLink, error)
CheckLink(link string) error
GetMountPath() string
DisableAccount(string)
ResetActiveDownloadKeys()
DeleteDownloadLink(linkId string) error
}

30
pkg/debrid/types/error.go Normal file
View File

@@ -0,0 +1,30 @@
package types
type Error struct {
Message string `json:"message"`
Code string `json:"code"`
}
func (e *Error) Error() string {
return e.Message
}
var NoActiveAccountsError = &Error{
Message: "No active accounts",
Code: "no_active_accounts",
}
var ErrDownloadLinkNotFound = &Error{
Message: "No download link found",
Code: "no_download_link",
}
var DownloadLinkExpiredError = &Error{
Message: "Download link expired",
Code: "download_link_expired",
}
var EmptyDownloadLinkError = &Error{
Message: "Download link is empty",
Code: "empty_download_link",
}

View File

@@ -2,13 +2,15 @@ package types
import (
"fmt"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"net/url"
"os"
"path/filepath"
"sync"
"time"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
)
type Torrent struct {
@@ -29,27 +31,48 @@ type Torrent struct {
Seeders int `json:"seeders"`
Links []string `json:"links"`
MountPath string `json:"mount_path"`
DeletedFiles []string `json:"deleted_files"`
Debrid string `json:"debrid"`
Arr *arr.Arr `json:"arr"`
Mu sync.Mutex `json:"-"`
SizeDownloaded int64 `json:"-"` // This is used for local download
DownloadUncached bool `json:"-"`
Arr *arr.Arr `json:"arr"`
SizeDownloaded int64 `json:"-"` // This is used for local download
DownloadUncached bool `json:"-"`
sync.Mutex
}
type DownloadLink struct {
Filename string `json:"filename"`
Link string `json:"link"`
DownloadLink string `json:"download_link"`
Generated time.Time `json:"generated"`
Size int64 `json:"size"`
Id string `json:"id"`
AccountId string `json:"account_id"`
}
func (t *Torrent) Copy() *Torrent {
t.Lock()
defer t.Unlock()
func (d *DownloadLink) String() string {
return d.DownloadLink
newFiles := make(map[string]File, len(t.Files))
for k, v := range t.Files {
newFiles[k] = v
}
return &Torrent{
Id: t.Id,
InfoHash: t.InfoHash,
Name: t.Name,
Folder: t.Folder,
Filename: t.Filename,
OriginalFilename: t.OriginalFilename,
Size: t.Size,
Bytes: t.Bytes,
Magnet: t.Magnet,
Files: newFiles,
Status: t.Status,
Added: t.Added,
Progress: t.Progress,
Speed: t.Speed,
Seeders: t.Seeders,
Links: append([]string{}, t.Links...),
MountPath: t.MountPath,
Debrid: t.Debrid,
Arr: t.Arr,
}
}
func (t *Torrent) GetSymlinkFolder(parent string) string {
@@ -75,16 +98,37 @@ func (t *Torrent) GetMountFolder(rClonePath string) (string, error) {
return "", fmt.Errorf("no path found")
}
func (t *Torrent) GetFile(filename string) (File, bool) {
f, ok := t.Files[filename]
if !ok {
return File{}, false
}
return f, !f.Deleted
}
func (t *Torrent) GetFiles() []File {
files := make([]File, 0, len(t.Files))
for _, f := range t.Files {
if !f.Deleted {
files = append(files, f)
}
}
return files
}
type File struct {
TorrentId string `json:"torrent_id"`
Id string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
Path string `json:"path"`
Link string `json:"link"`
DownloadLink *DownloadLink `json:"-"`
AccountId string `json:"account_id"`
Generated time.Time `json:"generated"`
TorrentId string `json:"torrent_id"`
Id string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
IsRar bool `json:"is_rar"`
ByteRange *[2]int64 `json:"byte_range,omitempty"`
Path string `json:"path"`
Link string `json:"link"`
AccountId string `json:"account_id"`
Generated time.Time `json:"generated"`
Deleted bool `json:"deleted"`
DownloadLink DownloadLink `json:"-"`
}
func (t *Torrent) Cleanup(remove bool) {
@@ -96,18 +140,69 @@ func (t *Torrent) Cleanup(remove bool) {
}
}
func (t *Torrent) GetFile(id string) *File {
for _, f := range t.Files {
if f.Id == id {
return &f
}
type IngestData struct {
Debrid string `json:"debrid"`
Name string `json:"name"`
Hash string `json:"hash"`
Size int64 `json:"size"`
}
type LibraryStats struct {
Total int `json:"total"`
Bad int `json:"bad"`
ActiveLinks int `json:"active_links"`
}
type Stats struct {
Profile *Profile `json:"profile"`
Library LibraryStats `json:"library"`
Accounts []map[string]any `json:"accounts"`
}
type Profile struct {
Name string `json:"name"`
Id int64 `json:"id"`
Username string `json:"username"`
Email string `json:"email"`
Points int `json:"points"`
Type string `json:"type"`
Premium int64 `json:"premium"`
Expiration time.Time `json:"expiration"`
}
type DownloadLink struct {
Debrid string `json:"debrid"`
Token string `json:"token"`
Filename string `json:"filename"`
Link string `json:"link"`
DownloadLink string `json:"download_link"`
Generated time.Time `json:"generated"`
Size int64 `json:"size"`
Id string `json:"id"`
ExpiresAt time.Time
}
func isValidURL(str string) bool {
u, err := url.Parse(str)
// A valid URL should parse without error, and have a non-empty scheme and host.
return err == nil && u.Scheme != "" && u.Host != ""
}
func (dl *DownloadLink) Valid() error {
if dl.Empty() {
return EmptyDownloadLinkError
}
// Check if the link is actually a valid URL
if !isValidURL(dl.DownloadLink) {
return ErrDownloadLinkNotFound
}
return nil
}
type Account struct {
ID string `json:"id"`
Disabled bool `json:"disabled"`
Name string `json:"name"`
Token string `json:"token"`
func (dl *DownloadLink) Empty() bool {
return dl.DownloadLink == ""
}
func (dl *DownloadLink) String() string {
return dl.DownloadLink
}

282
pkg/qbit/context.go Normal file
View File

@@ -0,0 +1,282 @@
package qbit
import (
"context"
"crypto/sha256"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/wire"
"golang.org/x/crypto/bcrypt"
)
type contextKey string
const (
categoryKey contextKey = "category"
hashesKey contextKey = "hashes"
arrKey contextKey = "arr"
)
func validateServiceURL(urlStr string) error {
if urlStr == "" {
return fmt.Errorf("URL cannot be empty")
}
// Try parsing as full URL first
u, err := url.Parse(urlStr)
if err == nil && u.Scheme != "" && u.Host != "" {
// It's a full URL, validate scheme
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("URL scheme must be http or https")
}
return nil
}
// Check if it's a host:port format (no scheme)
if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") {
// Try parsing with http:// prefix
testURL := "http://" + urlStr
u, err := url.Parse(testURL)
if err != nil {
return fmt.Errorf("invalid host:port format: %w", err)
}
if u.Host == "" {
return fmt.Errorf("host is required in host:port format")
}
// Validate port number
if u.Port() == "" {
return fmt.Errorf("port is required in host:port format")
}
return nil
}
return fmt.Errorf("invalid URL format: %s", urlStr)
}
func getCategory(ctx context.Context) string {
if category, ok := ctx.Value(categoryKey).(string); ok {
return category
}
return ""
}
func getHashes(ctx context.Context) []string {
if hashes, ok := ctx.Value(hashesKey).([]string); ok {
return hashes
}
return nil
}
func getArrFromContext(ctx context.Context) *arr.Arr {
if a, ok := ctx.Value(arrKey).(*arr.Arr); ok {
return a
}
return nil
}
func decodeAuthHeader(header string) (string, string, error) {
encodedTokens := strings.Split(header, " ")
if len(encodedTokens) != 2 {
return "", "", nil
}
encodedToken := encodedTokens[1]
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
if err != nil {
return "", "", err
}
bearer := string(bytes)
colonIndex := strings.LastIndex(bearer, ":")
username := bearer[:colonIndex]
password := bearer[colonIndex+1:]
if username == "" || password == "" {
return username, password, fmt.Errorf("empty username or password")
}
return strings.TrimSpace(username), strings.TrimSpace(password), nil
}
func (q *QBit) categoryContext(next http.Handler) http.Handler {
// Print full URL for debugging
// Try to get category from URL query first
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Print request method and URL
category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" {
// Get from form
_ = r.ParseForm()
category = r.Form.Get("category")
if category == "" {
// Get from multipart form
_ = r.ParseMultipartForm(32 << 20)
category = r.FormValue("category")
}
}
ctx := context.WithValue(r.Context(), categoryKey, strings.TrimSpace(category))
next.ServeHTTP(w, r.WithContext(ctx))
})
}
// authContext creates a middleware that extracts the Arr host and token from the Authorization header
// and adds it to the request context.
// This is used to identify the Arr instance for the request.
// Only a valid host and token will be added to the context/config. The rest are manual
func (q *QBit) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username, password, err := getUsernameAndPassword(r)
if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
category := getCategory(r.Context())
a, err := q.authenticate(category, username, password)
if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ctx := context.WithValue(r.Context(), arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func getUsernameAndPassword(r *http.Request) (string, string, error) {
// Try to get from authorization header
username, password, err := decodeAuthHeader(r.Header.Get("Authorization"))
if err == nil && username != "" {
return username, password, err
}
// Try to get from cookie
sid, err := r.Cookie("sid")
if err != nil {
// try SID
sid, err = r.Cookie("SID")
}
if err == nil {
username, password, err = extractFromSID(sid.Value)
if err != nil {
return "", "", err
}
}
return username, password, nil
}
func (q *QBit) authenticate(category, username, password string) (*arr.Arr, error) {
cfg := config.Get()
arrs := wire.Get().Arr()
// Check if arr exists
a := arrs.Get(category)
if a == nil {
// Arr is not configured, create a new one
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
}
a.Host = username
a.Token = password
if cfg.UseAuth {
if a.Host == "" || a.Token == "" {
return nil, fmt.Errorf("unauthorized: Host and token are required for authentication(you've enabled authentication)")
}
// try to use either Arr validate, or user auth validation
if err := a.Validate(); err != nil {
// If this failed, try to use user auth validation
if !verifyAuth(username, password) {
return nil, fmt.Errorf("unauthorized: invalid credentials")
}
}
}
a.Source = "auto"
arrs.AddOrUpdate(a)
return a, nil
}
func createSID(username, password string) string {
// Create a verification hash
cfg := config.Get()
combined := fmt.Sprintf("%s|%s", username, password)
hash := sha256.Sum256([]byte(combined + cfg.SecretKey()))
hashStr := fmt.Sprintf("%x", hash)[:16] // First 16 chars
// Base64 encode
return base64.URLEncoding.EncodeToString([]byte(fmt.Sprintf("%s|%s", combined, hashStr)))
}
func extractFromSID(sid string) (string, string, error) {
// Decode base64
decoded, err := base64.URLEncoding.DecodeString(sid)
if err != nil {
return "", "", fmt.Errorf("invalid SID format")
}
// Split into parts: username:password:hash
parts := strings.Split(string(decoded), "|")
if len(parts) != 3 {
return "", "", fmt.Errorf("invalid SID structure")
}
username := parts[0]
password := parts[1]
providedHash := parts[2]
// Verify hash
cfg := config.Get()
combined := fmt.Sprintf("%s|%s", username, password)
expectedHash := sha256.Sum256([]byte(combined + cfg.SecretKey()))
expectedHashStr := fmt.Sprintf("%x", expectedHash)[:16]
if providedHash != expectedHashStr {
return "", "", fmt.Errorf("invalid SID signature")
}
return username, password, nil
}
func hashesContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_hashes := chi.URLParam(r, "hashes")
var hashes []string
if _hashes != "" {
hashes = strings.Split(_hashes, "|")
}
if hashes == nil {
// Get hashes from form
_ = r.ParseForm()
hashes = r.Form["hashes"]
}
for i, hash := range hashes {
hashes[i] = strings.TrimSpace(hash)
}
ctx := context.WithValue(r.Context(), hashesKey, hashes)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func verifyAuth(username, password string) bool {
// If you're storing hashed password, use bcrypt to compare
if username == "" {
return false
}
auth := config.Get().GetAuth()
if auth == nil {
return false
}
if username != auth.Username {
return false
}
err := bcrypt.CompareHashAndPassword([]byte(auth.Password), []byte(password))
return err == nil
}

View File

@@ -1,357 +0,0 @@
package qbit
import (
"fmt"
"github.com/cavaliergopher/grab/v3"
"github.com/sirrobot01/decypharr/internal/utils"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"io"
"net/http"
"os"
"path/filepath"
"sync"
"time"
)
func Download(client *grab.Client, url, filename string, progressCallback func(int64, int64)) error {
req, err := grab.NewRequest(filename, url)
if err != nil {
return err
}
resp := client.Do(req)
t := time.NewTicker(time.Second * 2)
defer t.Stop()
var lastReported int64
Loop:
for {
select {
case <-t.C:
current := resp.BytesComplete()
speed := int64(resp.BytesPerSecond())
if current != lastReported {
if progressCallback != nil {
progressCallback(current-lastReported, speed)
}
lastReported = current
}
case <-resp.Done:
break Loop
}
}
// Report final bytes
if progressCallback != nil {
progressCallback(resp.BytesComplete()-lastReported, 0)
}
return resp.Err()
}
func (q *QBit) ProcessManualFile(torrent *Torrent) (string, error) {
debridTorrent := torrent.DebridTorrent
q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
torrentPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, utils.RemoveExtension(debridTorrent.OriginalFilename))
torrentPath = utils.RemoveInvalidChars(torrentPath)
err := os.MkdirAll(torrentPath, os.ModePerm)
if err != nil {
// add previous error to the error and return
return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err)
}
q.downloadFiles(torrent, torrentPath)
return torrentPath, nil
}
func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
debridTorrent := torrent.DebridTorrent
var wg sync.WaitGroup
totalSize := int64(0)
for _, file := range debridTorrent.Files {
totalSize += file.Size
}
debridTorrent.Mu.Lock()
debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes
debridTorrent.Progress = 0 // Reset progress
debridTorrent.Mu.Unlock()
progressCallback := func(downloaded int64, speed int64) {
debridTorrent.Mu.Lock()
defer debridTorrent.Mu.Unlock()
torrent.Mu.Lock()
defer torrent.Mu.Unlock()
// Update total downloaded bytes
debridTorrent.SizeDownloaded += downloaded
debridTorrent.Speed = speed
// Calculate overall progress
if totalSize > 0 {
debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100
}
q.UpdateTorrentMin(torrent, debridTorrent)
}
client := &grab.Client{
UserAgent: "Decypharr[QBitTorrent]",
HTTPClient: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
},
}
errChan := make(chan error, len(debridTorrent.Files))
for _, file := range debridTorrent.Files {
if file.DownloadLink == nil {
q.logger.Info().Msgf("No download link found for %s", file.Name)
continue
}
wg.Add(1)
q.downloadSemaphore <- struct{}{}
go func(file debridTypes.File) {
defer wg.Done()
defer func() { <-q.downloadSemaphore }()
filename := file.Name
err := Download(
client,
file.DownloadLink.DownloadLink,
filepath.Join(parent, filename),
progressCallback,
)
if err != nil {
q.logger.Error().Msgf("Failed to download %s: %v", filename, err)
errChan <- err
} else {
q.logger.Info().Msgf("Downloaded %s", filename)
}
}(file)
}
wg.Wait()
close(errChan)
var errors []error
for err := range errChan {
if err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
q.logger.Error().Msgf("Errors occurred during download: %v", errors)
return
}
q.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
}
func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) {
debridTorrent := torrent.DebridTorrent
files := debridTorrent.Files
if len(files) == 0 {
return "", fmt.Errorf("no video files found")
}
q.logger.Info().Msgf("Checking symlinks for %d files...", len(files))
rCloneBase := debridTorrent.MountPath
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
// This returns filename.ext for alldebrid instead of the parent folder filename/
torrentFolder := torrentPath
if err != nil {
return "", fmt.Errorf("failed to get torrent path: %v", err)
}
// Check if the torrent path is a file
torrentRclonePath := filepath.Join(rCloneBase, torrentPath) // leave it as is
if debridTorrent.Debrid == "alldebrid" && utils.IsMediaFile(torrentPath) {
// Alldebrid hotfix for single file torrents
torrentFolder = utils.RemoveExtension(torrentFolder)
torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder
}
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
}
pending := make(map[string]debridTypes.File)
filePaths := make([]string, 0, len(files))
for _, file := range files {
pending[file.Path] = file
}
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute) // Adjust timeout duration as needed
for len(pending) > 0 {
select {
case <-ticker.C:
for path, file := range pending {
fullFilePath := filepath.Join(torrentRclonePath, file.Path)
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(pending, path)
q.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending))
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(pending))
}
}
if q.SkipPreCache {
return torrentSymlinkPath, nil
}
go func() {
if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil {
q.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
q.logger.Trace().Msgf("Pre-cached %d files", len(filePaths))
}
}()
return torrentSymlinkPath, nil
}
func (q *QBit) createSymlinksWebdav(debridTorrent *debridTypes.Torrent, rclonePath, torrentFolder string) (string, error) {
files := debridTorrent.Files
symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err := os.MkdirAll(symlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
}
remainingFiles := make(map[string]debridTypes.File)
for _, file := range files {
remainingFiles[file.Name] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(files))
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
entries, err := os.ReadDir(rclonePath)
if err != nil {
continue
}
// Check which files exist in this batch
for _, entry := range entries {
filename := entry.Name()
if file, exists := remainingFiles[filename]; exists {
fullFilePath := filepath.Join(rclonePath, filename)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
q.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
}
}
if q.SkipPreCache {
return symlinkPath, nil
}
go func() {
if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil {
q.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
q.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
// Pre-cache the first 256KB and 1MB of the file
return symlinkPath, nil
}
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debridTypes.Torrent) (string, error) {
for {
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)
if err == nil {
q.logger.Debug().Msgf("Found torrent path: %s", torrentPath)
return torrentPath, err
}
time.Sleep(100 * time.Millisecond)
}
}
func (q *QBit) preCacheFile(name string, filePaths []string) error {
q.logger.Trace().Msgf("Pre-caching torrent: %s", name)
if len(filePaths) == 0 {
return fmt.Errorf("no file paths provided")
}
for _, filePath := range filePaths {
err := func(f string) error {
file, err := os.Open(f)
if err != nil {
if os.IsNotExist(err) {
// File has probably been moved by arr, return silently
return nil
}
return fmt.Errorf("failed to open file: %s: %v", f, err)
}
defer file.Close()
// Pre-cache the file header (first 256KB) using 16KB chunks.
if err := q.readSmallChunks(file, 0, 256*1024, 16*1024); err != nil {
return err
}
if err := q.readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil {
return err
}
return nil
}(filePath)
if err != nil {
return err
}
}
return nil
}
func (q *QBit) readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error {
_, err := file.Seek(startPos, 0)
if err != nil {
return err
}
buf := make([]byte, chunkSize)
bytesRemaining := totalToRead
for bytesRemaining > 0 {
toRead := chunkSize
if bytesRemaining < chunkSize {
toRead = bytesRemaining
}
n, err := file.Read(buf[:toRead])
if err != nil {
if err == io.EOF {
break
}
return err
}
bytesRemaining -= n
}
return nil
}

View File

@@ -1,114 +1,33 @@
package qbit
import (
"context"
"encoding/base64"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/service"
"net/http"
"path/filepath"
"strings"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/arr"
)
func decodeAuthHeader(header string) (string, string, error) {
encodedTokens := strings.Split(header, " ")
if len(encodedTokens) != 2 {
return "", "", nil
}
encodedToken := encodedTokens[1]
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
if err != nil {
return "", "", err
}
bearer := string(bytes)
colonIndex := strings.LastIndex(bearer, ":")
host := bearer[:colonIndex]
token := bearer[colonIndex+1:]
return host, token, nil
}
func (q *QBit) CategoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" {
// Get from form
_ = r.ParseForm()
category = r.Form.Get("category")
if category == "" {
// Get from multipart form
_ = r.ParseMultipartForm(32 << 20)
category = r.FormValue("category")
}
}
ctx := context.WithValue(r.Context(), "category", strings.TrimSpace(category))
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func (q *QBit) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
category := r.Context().Value("category").(string)
svc := service.GetService()
// Check if arr exists
a := svc.Arr.Get(category)
if a == nil {
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached)
}
if err == nil {
host = strings.TrimSpace(host)
if host != "" {
a.Host = host
}
token = strings.TrimSpace(token)
if token != "" {
a.Token = token
}
}
svc.Arr.AddOrUpdate(a)
ctx := context.WithValue(r.Context(), "arr", a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func HashesCtx(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_hashes := chi.URLParam(r, "hashes")
var hashes []string
if _hashes != "" {
hashes = strings.Split(_hashes, "|")
}
if hashes == nil {
// Get hashes from form
_ = r.ParseForm()
hashes = r.Form["hashes"]
}
for i, hash := range hashes {
hashes[i] = strings.TrimSpace(hash)
}
ctx := context.WithValue(r.Context(), "hashes", hashes)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
_arr := ctx.Value("arr").(*arr.Arr)
if _arr == nil {
// No arr
_, _ = w.Write([]byte("Ok."))
cfg := config.Get()
username := r.FormValue("username")
password := r.FormValue("password")
a, err := q.authenticate(getCategory(ctx), username, password)
if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
if err := _arr.Validate(); err != nil {
q.logger.Info().Msgf("Error validating arr: %v", err)
if cfg.UseAuth {
cookie := &http.Cookie{
Name: "sid",
Value: createSID(a.Host, a.Token),
Path: "/",
SameSite: http.SameSiteNoneMode,
}
http.SetCookie(w, cookie)
}
_, _ = w.Write([]byte("Ok."))
}
@@ -122,7 +41,7 @@ func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
}
func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) {
preferences := NewAppPreferences()
preferences := getAppPreferences()
preferences.WebUiUsername = q.Username
preferences.SavePath = q.DownloadFolder
@@ -150,10 +69,10 @@ func (q *QBit) handleShutdown(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
//log all url params
ctx := r.Context()
category := ctx.Value("category").(string)
category := getCategory(ctx)
filter := strings.Trim(r.URL.Query().Get("filter"), "")
hashes, _ := ctx.Value("hashes").([]string)
torrents := q.Storage.GetAllSorted(category, filter, hashes, "added_on", false)
hashes := getHashes(ctx)
torrents := q.storage.GetAllSorted(category, filter, hashes, "added_on", false)
request.JSONResponse(w, torrents, http.StatusOK)
}
@@ -164,13 +83,13 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
contentType := r.Header.Get("Content-Type")
if strings.Contains(contentType, "multipart/form-data") {
if err := r.ParseMultipartForm(32 << 20); err != nil {
q.logger.Info().Msgf("Error parsing multipart form: %v", err)
q.logger.Error().Err(err).Msgf("Error parsing multipart form")
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
} else if strings.Contains(contentType, "application/x-www-form-urlencoded") {
if err := r.ParseForm(); err != nil {
q.logger.Info().Msgf("Error parsing form: %v", err)
q.logger.Error().Err(err).Msgf("Error parsing form")
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
@@ -179,10 +98,18 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
return
}
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
action := "symlink"
if strings.ToLower(r.FormValue("sequentialDownload")) == "true" {
action = "download"
}
debridName := r.FormValue("debrid")
category := r.FormValue("category")
_arr := getArrFromContext(ctx)
if _arr == nil {
// Arr is not in context
_arr = arr.New(category, "", "", false, false, nil, "", "")
}
atleastOne := false
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
// Handle magnet URLs
if urls := r.FormValue("urls"); urls != "" {
@@ -191,8 +118,8 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
urlList = append(urlList, strings.TrimSpace(u))
}
for _, url := range urlList {
if err := q.AddMagnet(ctx, url, category); err != nil {
q.logger.Info().Msgf("Error adding magnet: %v", err)
if err := q.addMagnet(ctx, url, _arr, debridName, action); err != nil {
q.logger.Debug().Msgf("Error adding magnet: %s", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
@@ -204,8 +131,8 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
if r.MultipartForm != nil && r.MultipartForm.File != nil {
if files := r.MultipartForm.File["torrents"]; len(files) > 0 {
for _, fileHeader := range files {
if err := q.AddTorrent(ctx, fileHeader, category); err != nil {
q.logger.Info().Msgf("Error adding torrent: %v", err)
if err := q.addTorrent(ctx, fileHeader, _arr, debridName, action); err != nil {
q.logger.Debug().Err(err).Msgf("Error adding torrent")
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
@@ -224,14 +151,14 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
hashes := getHashes(ctx)
if len(hashes) == 0 {
http.Error(w, "No hashes provided", http.StatusBadRequest)
return
}
category := ctx.Value("category").(string)
category := getCategory(ctx)
for _, hash := range hashes {
q.Storage.Delete(hash, category, false)
q.storage.Delete(hash, category, false)
}
w.WriteHeader(http.StatusOK)
@@ -239,10 +166,10 @@ func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
category := ctx.Value("category").(string)
hashes := getHashes(ctx)
category := getCategory(ctx)
for _, hash := range hashes {
torrent := q.Storage.Get(hash, category)
torrent := q.storage.Get(hash, category)
if torrent == nil {
continue
}
@@ -254,10 +181,10 @@ func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
category := ctx.Value("category").(string)
hashes := getHashes(ctx)
category := getCategory(ctx)
for _, hash := range hashes {
torrent := q.Storage.Get(hash, category)
torrent := q.storage.Get(hash, category)
if torrent == nil {
continue
}
@@ -269,10 +196,10 @@ func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
category := ctx.Value("category").(string)
hashes := getHashes(ctx)
category := getCategory(ctx)
for _, hash := range hashes {
torrent := q.Storage.Get(hash, category)
torrent := q.storage.Get(hash, category)
if torrent == nil {
continue
}
@@ -315,7 +242,7 @@ func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hash := r.URL.Query().Get("hash")
torrent := q.Storage.Get(hash, ctx.Value("category").(string))
torrent := q.storage.Get(hash, getCategory(ctx))
properties := q.GetTorrentProperties(torrent)
request.JSONResponse(w, properties, http.StatusOK)
@@ -324,22 +251,21 @@ func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hash := r.URL.Query().Get("hash")
torrent := q.Storage.Get(hash, ctx.Value("category").(string))
torrent := q.storage.Get(hash, getCategory(ctx))
if torrent == nil {
return
}
files := q.GetTorrentFiles(torrent)
request.JSONResponse(w, files, http.StatusOK)
request.JSONResponse(w, torrent.Files, http.StatusOK)
}
func (q *QBit) handleSetCategory(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
category := ctx.Value("category").(string)
hashes, _ := ctx.Value("hashes").([]string)
torrents := q.Storage.GetAll("", "", hashes)
category := getCategory(ctx)
hashes := getHashes(ctx)
torrents := q.storage.GetAll("", "", hashes)
for _, torrent := range torrents {
torrent.Category = category
q.Storage.AddOrUpdate(torrent)
q.storage.AddOrUpdate(torrent)
}
request.JSONResponse(w, nil, http.StatusOK)
}
@@ -351,14 +277,14 @@ func (q *QBit) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) {
return
}
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
hashes := getHashes(ctx)
tags := strings.Split(r.FormValue("tags"), ",")
for i, tag := range tags {
tags[i] = strings.TrimSpace(tag)
}
torrents := q.Storage.GetAll("", "", hashes)
torrents := q.storage.GetAll("", "", hashes)
for _, t := range torrents {
q.SetTorrentTags(t, tags)
q.setTorrentTags(t, tags)
}
request.JSONResponse(w, nil, http.StatusOK)
}
@@ -370,14 +296,14 @@ func (q *QBit) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) {
return
}
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
hashes := getHashes(ctx)
tags := strings.Split(r.FormValue("tags"), ",")
for i, tag := range tags {
tags[i] = strings.TrimSpace(tag)
}
torrents := q.Storage.GetAll("", "", hashes)
torrents := q.storage.GetAll("", "", hashes)
for _, torrent := range torrents {
q.RemoveTorrentTags(torrent, tags)
q.removeTorrentTags(torrent, tags)
}
request.JSONResponse(w, nil, http.StatusOK)
@@ -397,6 +323,6 @@ func (q *QBit) handleCreateTags(w http.ResponseWriter, r *http.Request) {
for i, tag := range tags {
tags[i] = strings.TrimSpace(tag)
}
q.AddTags(tags)
q.addTags(tags)
request.JSONResponse(w, nil, http.StatusOK)
}

View File

@@ -1,80 +0,0 @@
package qbit
import (
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
"github.com/sirrobot01/decypharr/pkg/service"
"time"
"github.com/google/uuid"
"github.com/sirrobot01/decypharr/pkg/arr"
)
type ImportRequest struct {
ID string `json:"id"`
Path string `json:"path"`
Magnet *utils.Magnet `json:"magnet"`
Arr *arr.Arr `json:"arr"`
IsSymlink bool `json:"isSymlink"`
SeriesId int `json:"series"`
Seasons []int `json:"seasons"`
Episodes []string `json:"episodes"`
DownloadUncached bool `json:"downloadUncached"`
Failed bool `json:"failed"`
FailedAt time.Time `json:"failedAt"`
Reason string `json:"reason"`
Completed bool `json:"completed"`
CompletedAt time.Time `json:"completedAt"`
Async bool `json:"async"`
}
type ManualImportResponseSchema struct {
Priority string `json:"priority"`
Status string `json:"status"`
Result string `json:"result"`
Queued time.Time `json:"queued"`
Trigger string `json:"trigger"`
SendUpdatesToClient bool `json:"sendUpdatesToClient"`
UpdateScheduledTask bool `json:"updateScheduledTask"`
Id int `json:"id"`
}
func NewImportRequest(magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool) *ImportRequest {
return &ImportRequest{
ID: uuid.NewString(),
Magnet: magnet,
Arr: arr,
Failed: false,
Completed: false,
Async: false,
IsSymlink: isSymlink,
DownloadUncached: downloadUncached,
}
}
func (i *ImportRequest) Fail(reason string) {
i.Failed = true
i.FailedAt = time.Now()
i.Reason = reason
}
func (i *ImportRequest) Complete() {
i.Completed = true
i.CompletedAt = time.Now()
}
func (i *ImportRequest) Process(q *QBit) (err error) {
// Use this for now.
// This sends the torrent to the arr
svc := service.GetService()
torrent := createTorrentFromMagnet(i.Magnet, i.Arr.Name, "manual")
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, i.Magnet, i.Arr, i.IsSymlink, i.DownloadUncached)
if err != nil {
return err
}
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
q.Storage.AddOrUpdate(torrent)
go q.ProcessFiles(torrent, debridTorrent, i.Arr, i.IsSymlink)
return nil
}

View File

@@ -1,52 +1,38 @@
package qbit
import (
"cmp"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"os"
"path/filepath"
"github.com/sirrobot01/decypharr/pkg/wire"
)
type QBit struct {
Username string `json:"username"`
Password string `json:"password"`
Port string `json:"port"`
DownloadFolder string `json:"download_folder"`
Categories []string `json:"categories"`
Storage *TorrentStorage
logger zerolog.Logger
Tags []string
RefreshInterval int
SkipPreCache bool
downloadSemaphore chan struct{}
Username string
Password string
DownloadFolder string
Categories []string
storage *wire.TorrentStorage
logger zerolog.Logger
Tags []string
}
func New() *QBit {
_cfg := config.Get()
cfg := _cfg.QBitTorrent
port := cmp.Or(_cfg.Port, os.Getenv("QBIT_PORT"), "8282")
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
return &QBit{
Username: cfg.Username,
Password: cfg.Password,
Port: port,
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")),
logger: logger.New("qbit"),
RefreshInterval: refreshInterval,
SkipPreCache: cfg.SkipPreCache,
downloadSemaphore: make(chan struct{}, cmp.Or(cfg.MaxDownloads, 5)),
Username: cfg.Username,
Password: cfg.Password,
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
storage: wire.Get().Torrents(),
logger: logger.New("qbit"),
}
}
func (q *QBit) Reset() {
if q.Storage != nil {
q.Storage.Reset()
if q.storage != nil {
q.storage.Reset()
}
q.Tags = nil
close(q.downloadSemaphore)
}

View File

@@ -1,33 +1,50 @@
package qbit
import (
"github.com/go-chi/chi/v5"
"net/http"
"github.com/go-chi/chi/v5"
)
func (q *QBit) Routes() http.Handler {
r := chi.NewRouter()
r.Use(q.CategoryContext)
r.Use(q.categoryContext)
r.Group(func(r chi.Router) {
r.Use(q.authContext)
r.Post("/auth/login", q.handleLogin)
r.Route("/torrents", func(r chi.Router) {
r.Use(HashesCtx)
r.Use(q.authContext)
r.Use(hashesContext)
r.Get("/info", q.handleTorrentsInfo)
r.Post("/info", q.handleTorrentsInfo)
r.Post("/add", q.handleTorrentsAdd)
r.Post("/delete", q.handleTorrentsDelete)
r.Get("/categories", q.handleCategories)
r.Post("/categories", q.handleCategories)
r.Post("/createCategory", q.handleCreateCategory)
r.Post("/setCategory", q.handleSetCategory)
r.Post("/addTags", q.handleAddTorrentTags)
r.Post("/removeTags", q.handleRemoveTorrentTags)
r.Post("/createTags", q.handleCreateTags)
r.Get("/tags", q.handleGetTags)
r.Get("/pause", q.handleTorrentsPause)
r.Get("/resume", q.handleTorrentsResume)
r.Get("/recheck", q.handleTorrentRecheck)
r.Get("/properties", q.handleTorrentProperties)
r.Get("/files", q.handleTorrentFiles)
// Create POST equivalents for pause, resume, recheck
r.Post("/tags", q.handleGetTags)
r.Post("/pause", q.handleTorrentsPause)
r.Post("/resume", q.handleTorrentsResume)
r.Post("/recheck", q.handleTorrentRecheck)
r.Post("/properties", q.handleTorrentProperties)
r.Post("/files", q.handleTorrentFiles)
})
r.Route("/app", func(r chi.Router) {

View File

@@ -1,38 +1,35 @@
package qbit
import (
"cmp"
"context"
"fmt"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/service"
"github.com/sirrobot01/decypharr/pkg/wire"
"io"
"mime/multipart"
"os"
"path/filepath"
"strings"
"time"
)
// All torrent related helpers goes here
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
// All torrent-related helpers goes here
func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid string, action string) error {
magnet, err := utils.GetMagnetFromUrl(url)
if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err)
}
err = q.Process(ctx, magnet, category)
_store := wire.Get()
importReq := wire.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", wire.ImportTypeQBitTorrent)
err = _store.AddTorrent(ctx, importReq)
if err != nil {
return fmt.Errorf("failed to process torrent: %w", err)
}
return nil
}
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, arr *arr.Arr, debrid string, action string) error {
file, _ := fileHeader.Open()
defer file.Close()
var reader io.Reader = file
@@ -40,226 +37,28 @@ func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
if err != nil {
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
}
err = q.Process(ctx, magnet, category)
_store := wire.Get()
importReq := wire.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", wire.ImportTypeQBitTorrent)
err = _store.AddTorrent(ctx, importReq)
if err != nil {
return fmt.Errorf("failed to process torrent: %w", err)
}
return nil
}
func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error {
svc := service.GetService()
torrent := createTorrentFromMagnet(magnet, category, "auto")
a, ok := ctx.Value("arr").(*arr.Arr)
if !ok {
return fmt.Errorf("arr not found in context")
}
isSymlink := ctx.Value("isSymlink").(bool)
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, magnet, a, isSymlink, false)
if err != nil || debridTorrent == nil {
if err == nil {
err = fmt.Errorf("failed to process torrent")
}
return err
}
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
q.Storage.AddOrUpdate(torrent)
go q.ProcessFiles(torrent, debridTorrent, a, isSymlink) // We can send async for file processing not to delay the response
return nil
}
func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debridTypes.Torrent, arr *arr.Arr, isSymlink bool) {
svc := service.GetService()
client := svc.Debrid.GetClient(debridTorrent.Debrid)
downloadingStatuses := client.GetDownloadingStatus()
for debridTorrent.Status != "downloaded" {
q.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress)
dbT, err := client.CheckStatus(debridTorrent, isSymlink)
if err != nil {
if dbT != nil && dbT.Id != "" {
// Delete the torrent if it was not downloaded
go func() {
_ = client.DeleteTorrent(dbT.Id)
}()
}
q.logger.Error().Msgf("Error checking status: %v", err)
q.MarkAsFailed(torrent)
go func() {
if err := arr.Refresh(); err != nil {
q.logger.Error().Msgf("Error refreshing arr: %v", err)
}
}()
return
}
debridTorrent = dbT
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
// Exit the loop for downloading statuses to prevent memory buildup
if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) {
break
}
if !utils.Contains(client.GetDownloadingStatus(), debridTorrent.Status) {
break
}
time.Sleep(time.Duration(q.RefreshInterval) * time.Second)
}
var torrentSymlinkPath string
var err error
debridTorrent.Arr = arr
// Check if debrid supports webdav by checking cache
if isSymlink {
timer := time.Now()
cache, useWebdav := svc.Debrid.Caches[debridTorrent.Debrid]
if useWebdav {
q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
// Use webdav to download the file
if err := cache.AddTorrent(debridTorrent); err != nil {
q.logger.Error().Msgf("Error adding torrent to cache: %v", err)
q.MarkAsFailed(torrent)
return
}
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
torrentSymlinkPath, err = q.createSymlinksWebdav(debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
} else {
// User is using either zurg or debrid webdav
torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/
}
q.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer))
} else {
torrentSymlinkPath, err = q.ProcessManualFile(torrent)
}
if err != nil {
q.MarkAsFailed(torrent)
go func() {
_ = client.DeleteTorrent(debridTorrent.Id)
}()
q.logger.Info().Msgf("Error: %v", err)
return
}
torrent.TorrentPath = torrentSymlinkPath
q.UpdateTorrent(torrent, debridTorrent)
go func() {
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
q.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
if err := arr.Refresh(); err != nil {
q.logger.Error().Msgf("Error refreshing arr: %v", err)
}
}
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
t.State = "error"
q.Storage.AddOrUpdate(t)
go func() {
if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil {
q.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
return t
}
func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
if err != nil {
addedOn = time.Now()
}
totalSize := debridTorrent.Bytes
progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0
sizeCompleted := int64(float64(totalSize) * progress)
var speed int64
if debridTorrent.Speed != 0 {
speed = debridTorrent.Speed
}
var eta int
if speed != 0 {
eta = int((totalSize - sizeCompleted) / speed)
}
t.ID = debridTorrent.Id
t.Name = debridTorrent.Name
t.AddedOn = addedOn.Unix()
t.DebridTorrent = debridTorrent
t.Debrid = debridTorrent.Debrid
t.Size = totalSize
t.Completed = sizeCompleted
t.Downloaded = sizeCompleted
t.DownloadedSession = sizeCompleted
t.Uploaded = sizeCompleted
t.UploadedSession = sizeCompleted
t.AmountLeft = totalSize - sizeCompleted
t.Progress = progress
t.Eta = eta
t.Dlspeed = speed
t.Upspeed = speed
t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
return t
}
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
if debridClient := service.GetDebrid().GetClient(debridTorrent.Debrid); debridClient != nil {
if debridTorrent.Status != "downloaded" {
_ = debridClient.UpdateTorrent(debridTorrent)
}
}
t = q.UpdateTorrentMin(t, debridTorrent)
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
if t.IsReady() {
t.State = "pausedUP"
q.Storage.Update(t)
return t
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if t.IsReady() {
t.State = "pausedUP"
q.Storage.Update(t)
return t
}
updatedT := q.UpdateTorrent(t, debridTorrent)
t = updatedT
case <-time.After(10 * time.Minute): // Add a timeout
return t
}
}
}
func (q *QBit) ResumeTorrent(t *Torrent) bool {
func (q *QBit) ResumeTorrent(t *wire.Torrent) bool {
return true
}
func (q *QBit) PauseTorrent(t *Torrent) bool {
func (q *QBit) PauseTorrent(t *wire.Torrent) bool {
return true
}
func (q *QBit) RefreshTorrent(t *Torrent) bool {
func (q *QBit) RefreshTorrent(t *wire.Torrent) bool {
return true
}
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
func (q *QBit) GetTorrentProperties(t *wire.Torrent) *TorrentProperties {
return &TorrentProperties{
AdditionDate: t.AddedOn,
Comment: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>",
@@ -284,21 +83,7 @@ func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
}
}
func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
files := make([]*TorrentFile, 0)
if t.DebridTorrent == nil {
return files
}
for _, file := range t.DebridTorrent.Files {
files = append(files, &TorrentFile{
Name: file.Path,
Size: file.Size,
})
}
return files
}
func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool {
func (q *QBit) setTorrentTags(t *wire.Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",")
for _, tag := range tags {
if tag == "" {
@@ -312,20 +97,20 @@ func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool {
}
}
t.Tags = strings.Join(torrentTags, ",")
q.Storage.Update(t)
q.storage.Update(t)
return true
}
func (q *QBit) RemoveTorrentTags(t *Torrent, tags []string) bool {
func (q *QBit) removeTorrentTags(t *wire.Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",")
newTorrentTags := utils.RemoveItem(torrentTags, tags...)
q.Tags = utils.RemoveItem(q.Tags, tags...)
t.Tags = strings.Join(newTorrentTags, ",")
q.Storage.Update(t)
q.storage.Update(t)
return true
}
func (q *QBit) AddTags(tags []string) bool {
func (q *QBit) addTags(tags []string) bool {
for _, tag := range tags {
if tag == "" {
continue
@@ -336,8 +121,3 @@ func (q *QBit) AddTags(tags []string) bool {
}
return true
}
func (q *QBit) RemoveTags(tags []string) bool {
q.Tags = utils.RemoveItem(q.Tags, tags...)
return true
}

Some files were not shown because too many files have changed in this diff Show More