Compare commits
76 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a0e9f7f553 | ||
|
|
4be4f6b293 | ||
|
|
6c8949b831 | ||
|
|
0dd1efb07c | ||
|
|
3aeb806033 | ||
|
|
7c8156eacf | ||
|
|
d8a963f77f | ||
|
|
27e7bc8f47 | ||
|
|
1d243dd12b | ||
|
|
b4efa22bfd | ||
|
|
6f9fafd7d8 | ||
|
|
eba24c9d63 | ||
|
|
c620ba3d56 | ||
|
|
fab3a7e4f7 | ||
|
|
01615cb51e | ||
|
|
cb63fc69f5 | ||
|
|
40755fbdde | ||
|
|
d0ae839617 | ||
|
|
ce972779c3 | ||
|
|
139249a1f3 | ||
|
|
a60d93677f | ||
|
|
9c31ad266e | ||
|
|
3d2fcf5656 | ||
|
|
afe577bf2f | ||
|
|
604402250e | ||
|
|
74615a80ff | ||
|
|
b901bd5175 | ||
|
|
8c56e59107 | ||
|
|
b8b9e76753 | ||
|
|
6fb54d322e | ||
|
|
cf61546bec | ||
|
|
c72867ff57 | ||
|
|
fa6920f94a | ||
|
|
dba5604d79 | ||
|
|
f656b7e4e2 | ||
|
|
c7b07137c5 | ||
|
|
c0aa4eaeba | ||
|
|
2c90e518aa | ||
|
|
dec7d93272 | ||
|
|
8d092615db | ||
|
|
a4ee0973cc | ||
|
|
ab12610346 | ||
|
|
1d19be9013 | ||
|
|
cee0e20fe1 | ||
|
|
a3e698e04f | ||
|
|
e123a2fd5e | ||
|
|
817051589e | ||
|
|
705de2d2bc | ||
|
|
54c421a480 | ||
|
|
1b98b994b7 | ||
|
|
06096c3748 | ||
|
|
7474011ef0 | ||
|
|
086aa3b1ff | ||
|
|
c15e9d8f70 | ||
|
|
b2e99585f7 | ||
|
|
5661b05ec1 | ||
|
|
b7226b21ec | ||
|
|
605d5b81c2 | ||
|
|
8d87c602b9 | ||
|
|
7cf25f53e7 | ||
|
|
22280f15cf | ||
|
|
a539aa53bd | ||
|
|
3efda45304 | ||
|
|
5bf1dab5e6 | ||
|
|
84603b084b | ||
|
|
dfcf8708f1 | ||
|
|
30a1dd74a7 | ||
|
|
f041ef47a7 | ||
|
|
349a13468b | ||
|
|
9c6c44d785 | ||
|
|
1cd09239f9 | ||
|
|
f9c49cbbef | ||
|
|
60b8d87f1c | ||
|
|
fbd6cd5038 | ||
|
|
87bf8d0574 | ||
|
|
7f25599b60 |
10
.air.toml
@@ -7,16 +7,16 @@ tmp_dir = "tmp"
|
|||||||
bin = "./tmp/main"
|
bin = "./tmp/main"
|
||||||
cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/decypharr/pkg/version.Version=0.0.0 -X github.com/sirrobot01/decypharr/pkg/version.Channel=dev\" -o ./tmp/main .'"
|
cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/decypharr/pkg/version.Version=0.0.0 -X github.com/sirrobot01/decypharr/pkg/version.Channel=dev\" -o ./tmp/main .'"
|
||||||
delay = 1000
|
delay = 1000
|
||||||
exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"]
|
exclude_dir = ["tmp", "vendor", "testdata", "data", "logs", "docs", "dist", "node_modules", ".ven"]
|
||||||
exclude_file = []
|
exclude_file = []
|
||||||
exclude_regex = ["_test.go"]
|
exclude_regex = ["_test.go"]
|
||||||
exclude_unchanged = false
|
exclude_unchanged = false
|
||||||
follow_symlink = false
|
follow_symlink = false
|
||||||
full_bin = ""
|
full_bin = ""
|
||||||
include_dir = []
|
include_dir = []
|
||||||
include_ext = ["go", "tpl", "tmpl", "html", ".json"]
|
include_ext = ["go", "tpl", "tmpl", "html", ".json", ".js", ".css"]
|
||||||
include_file = []
|
include_file = []
|
||||||
kill_delay = "0s"
|
kill_delay = "1s"
|
||||||
log = "build-errors.log"
|
log = "build-errors.log"
|
||||||
poll = false
|
poll = false
|
||||||
poll_interval = 0
|
poll_interval = 0
|
||||||
@@ -24,8 +24,8 @@ tmp_dir = "tmp"
|
|||||||
pre_cmd = []
|
pre_cmd = []
|
||||||
rerun = false
|
rerun = false
|
||||||
rerun_delay = 500
|
rerun_delay = 500
|
||||||
send_interrupt = false
|
send_interrupt = true
|
||||||
stop_on_error = false
|
stop_on_error = true
|
||||||
|
|
||||||
[color]
|
[color]
|
||||||
app = ""
|
app = ""
|
||||||
|
|||||||
@@ -11,3 +11,23 @@ torrents.json
|
|||||||
*.json
|
*.json
|
||||||
.ven/**
|
.ven/**
|
||||||
docs/**
|
docs/**
|
||||||
|
|
||||||
|
# Don't copy node modules
|
||||||
|
node_modules/
|
||||||
|
|
||||||
|
# Don't copy development files
|
||||||
|
.git/
|
||||||
|
.gitignore
|
||||||
|
*.md
|
||||||
|
.env*
|
||||||
|
*.log
|
||||||
|
|
||||||
|
# Build artifacts
|
||||||
|
decypharr
|
||||||
|
healthcheck
|
||||||
|
*.exe
|
||||||
|
.venv/
|
||||||
|
data/**
|
||||||
|
|
||||||
|
.stignore
|
||||||
|
.stfolder/**
|
||||||
76
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
name: Bug Report
|
||||||
|
description: 'Report a new bug'
|
||||||
|
labels: ['Type: Bug', 'Status: Needs Triage']
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: Is there an existing issue for this?
|
||||||
|
description: Please search to see if an open or closed issue already exists for the bug you encountered. If a bug exists and is closed note that it may only be fixed in an unstable branch.
|
||||||
|
options:
|
||||||
|
- label: I have searched the existing open and closed issues
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Current Behavior
|
||||||
|
description: A concise description of what you're experiencing.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Expected Behavior
|
||||||
|
description: A concise description of what you expected to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Steps To Reproduce
|
||||||
|
description: Steps to reproduce the behavior.
|
||||||
|
placeholder: |
|
||||||
|
1. In this environment...
|
||||||
|
2. With this config...
|
||||||
|
3. Run '...'
|
||||||
|
4. See error...
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Environment
|
||||||
|
description: |
|
||||||
|
examples:
|
||||||
|
- **OS**: Ubuntu 20.04
|
||||||
|
- **Version**: v1.0.0
|
||||||
|
- **Docker Install**: Yes
|
||||||
|
- **Browser**: Firefox 90 (If UI related)
|
||||||
|
value: |
|
||||||
|
- OS:
|
||||||
|
- Version:
|
||||||
|
- Docker Install:
|
||||||
|
- Browser:
|
||||||
|
render: markdown
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
attributes:
|
||||||
|
label: What branch are you running?
|
||||||
|
options:
|
||||||
|
- Main/Latest
|
||||||
|
- Beta
|
||||||
|
- Experimental
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Trace Logs? **Not Optional**
|
||||||
|
description: |
|
||||||
|
Trace Logs
|
||||||
|
- are **required** for bug reports
|
||||||
|
- are not optional
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: Trace Logs have been provided as applicable
|
||||||
|
description: Trace logs are **generally required** and are not optional for all bug reports and contain `trace`. Info logs are invalid for bug reports and do not contain `debug` nor `trace`
|
||||||
|
options:
|
||||||
|
- label: I have read and followed the steps in the documentation link and provided the required trace logs - the logs contain `trace` - that are relevant and show this issue.
|
||||||
|
required: true
|
||||||
38
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
name: Feature Request
|
||||||
|
description: 'Suggest an idea for Decypharr'
|
||||||
|
labels: ['Type: Feature Request', 'Status: Needs Triage']
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: Is there an existing issue for this?
|
||||||
|
description: Please search to see if an open or closed issue already exists for the feature you are requesting. If a request exists and is closed note that it may only be fixed in an unstable branch.
|
||||||
|
options:
|
||||||
|
- label: I have searched the existing open and closed issues
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Is your feature request related to a problem? Please describe
|
||||||
|
description: A clear and concise description of what the problem is.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Describe the solution you'd like
|
||||||
|
description: A clear and concise description of what you want to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Describe alternatives you've considered
|
||||||
|
description: A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Anything else?
|
||||||
|
description: |
|
||||||
|
Links? References? Mockups? Anything that will give us more context about the feature you are encountering!
|
||||||
|
|
||||||
|
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
2
.github/workflows/deploy-docs.yml
vendored
@@ -24,5 +24,5 @@ jobs:
|
|||||||
path: .cache
|
path: .cache
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
mkdocs-material-
|
mkdocs-material-
|
||||||
- run: pip install mkdocs-material
|
- run: cd docs && pip install -r requirements.txt
|
||||||
- run: cd docs && mkdocs gh-deploy --force
|
- run: cd docs && mkdocs gh-deploy --force
|
||||||
7
.gitignore
vendored
@@ -12,4 +12,9 @@ tmp/**
|
|||||||
torrents.json
|
torrents.json
|
||||||
logs/**
|
logs/**
|
||||||
auth.json
|
auth.json
|
||||||
.ven/
|
.ven/
|
||||||
|
.env
|
||||||
|
node_modules/
|
||||||
|
.venv/
|
||||||
|
.stignore
|
||||||
|
.stfolder/**
|
||||||
37
Dockerfile
@@ -29,38 +29,37 @@ RUN --mount=type=cache,target=/go/pkg/mod \
|
|||||||
go build -trimpath -ldflags="-w -s" \
|
go build -trimpath -ldflags="-w -s" \
|
||||||
-o /healthcheck cmd/healthcheck/main.go
|
-o /healthcheck cmd/healthcheck/main.go
|
||||||
|
|
||||||
# Stage 2: Create directory structure
|
# Stage 2: Final image
|
||||||
FROM alpine:3.19 as dirsetup
|
FROM alpine:latest
|
||||||
RUN mkdir -p /app/logs && \
|
|
||||||
mkdir -p /app/cache && \
|
|
||||||
chmod 777 /app/logs && \
|
|
||||||
touch /app/logs/decypharr.log && \
|
|
||||||
chmod 666 /app/logs/decypharr.log
|
|
||||||
|
|
||||||
# Stage 3: Final image
|
ARG VERSION=0.0.0
|
||||||
FROM gcr.io/distroless/static-debian12:nonroot
|
ARG CHANNEL=dev
|
||||||
|
|
||||||
LABEL version = "${VERSION}-${CHANNEL}"
|
LABEL version = "${VERSION}-${CHANNEL}"
|
||||||
|
|
||||||
LABEL org.opencontainers.image.source = "https://github.com/sirrobot01/decypharr"
|
LABEL org.opencontainers.image.source = "https://github.com/sirrobot01/decypharr"
|
||||||
LABEL org.opencontainers.image.title = "decypharr"
|
LABEL org.opencontainers.image.title = "decypharr"
|
||||||
LABEL org.opencontainers.image.authors = "sirrobot01"
|
LABEL org.opencontainers.image.authors = "sirrobot01"
|
||||||
LABEL org.opencontainers.image.documentation = "https://github.com/sirrobot01/decypharr/blob/main/README.md"
|
LABEL org.opencontainers.image.documentation = "https://github.com/sirrobot01/decypharr/blob/main/README.md"
|
||||||
|
|
||||||
# Copy binaries
|
# Install dependencies including rclone
|
||||||
COPY --from=builder --chown=nonroot:nonroot /decypharr /usr/bin/decypharr
|
RUN apk add --no-cache fuse3 ca-certificates su-exec shadow rclone && \
|
||||||
COPY --from=builder --chown=nonroot:nonroot /healthcheck /usr/bin/healthcheck
|
echo "user_allow_other" >> /etc/fuse.conf
|
||||||
|
|
||||||
# Copy pre-made directory structure
|
# Copy binaries and entrypoint
|
||||||
COPY --from=dirsetup --chown=nonroot:nonroot /app /app
|
COPY --from=builder /decypharr /usr/bin/decypharr
|
||||||
|
COPY --from=builder /healthcheck /usr/bin/healthcheck
|
||||||
|
COPY scripts/entrypoint.sh /entrypoint.sh
|
||||||
|
RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
# Set environment variables
|
||||||
# Metadata
|
ENV PUID=1000
|
||||||
|
ENV PGID=1000
|
||||||
ENV LOG_PATH=/app/logs
|
ENV LOG_PATH=/app/logs
|
||||||
|
|
||||||
EXPOSE 8282
|
EXPOSE 8282
|
||||||
VOLUME ["/app"]
|
VOLUME ["/app"]
|
||||||
USER nonroot:nonroot
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=3s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app"]
|
HEALTHCHECK --interval=10s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app", "--basic"]
|
||||||
|
|
||||||
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
CMD ["/usr/bin/decypharr", "--config", "/app"]
|
CMD ["/usr/bin/decypharr", "--config", "/app"]
|
||||||
52
README.md
@@ -6,16 +6,16 @@
|
|||||||
|
|
||||||
## What is Decypharr?
|
## What is Decypharr?
|
||||||
|
|
||||||
Decypharr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications while leveraging the capabilities of Debrid providers.
|
Decypharr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- 🔄 Mock Qbittorent API that supports the Arrs (Sonarr, Radarr, Lidarr etc)
|
- Mock Qbittorent API that supports the Arrs (Sonarr, Radarr, Lidarr etc)
|
||||||
- 🖥️ Full-fledged UI for managing torrents
|
- Full-fledged UI for managing torrents
|
||||||
- 🛡️ Proxy support for filtering out un-cached Debrid torrents
|
- Multiple Debrid providers support
|
||||||
- 🔌 Multiple Debrid providers support
|
- WebDAV server support for each debrid provider
|
||||||
- 📁 WebDAV server support for each debrid provider
|
- Optional mounting of WebDAV to your system(using [Rclone](https://rclone.org/))
|
||||||
- 🔧 Repair Worker for missing files
|
- Repair Worker for missing files
|
||||||
|
|
||||||
## Supported Debrid Providers
|
## Supported Debrid Providers
|
||||||
|
|
||||||
@@ -29,22 +29,22 @@ Decypharr combines the power of QBittorrent with popular Debrid services to enha
|
|||||||
### Docker (Recommended)
|
### Docker (Recommended)
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
version: '3.7'
|
|
||||||
services:
|
services:
|
||||||
decypharr:
|
decypharr:
|
||||||
image: cy01/blackhole:latest # or cy01/blackhole:beta
|
image: cy01/blackhole:latest
|
||||||
container_name: decypharr
|
container_name: decypharr
|
||||||
ports:
|
ports:
|
||||||
- "8282:8282" # qBittorrent
|
- "8282:8282"
|
||||||
user: "1000:1000"
|
|
||||||
volumes:
|
volumes:
|
||||||
- /mnt/:/mnt
|
- /mnt/:/mnt:rshared
|
||||||
- ./configs/:/app # config.json must be in this directory
|
- ./configs/:/app # config.json must be in this directory
|
||||||
environment:
|
|
||||||
- PUID=1000
|
|
||||||
- PGID=1000
|
|
||||||
- UMASK=002
|
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
devices:
|
||||||
|
- /dev/fuse:/dev/fuse:rwm
|
||||||
|
cap_add:
|
||||||
|
- SYS_ADMIN
|
||||||
|
security_opt:
|
||||||
|
- apparmor:unconfined
|
||||||
```
|
```
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
@@ -62,25 +62,7 @@ The documentation includes:
|
|||||||
|
|
||||||
## Basic Configuration
|
## Basic Configuration
|
||||||
|
|
||||||
```json
|
You can configure Decypharr through the Web UI or by editing the `config.json` file directly.
|
||||||
{
|
|
||||||
"debrids": [
|
|
||||||
{
|
|
||||||
"name": "realdebrid",
|
|
||||||
"api_key": "your_api_key_here",
|
|
||||||
"folder": "/mnt/remote/realdebrid/__all__/",
|
|
||||||
"use_webdav": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"qbittorrent": {
|
|
||||||
"download_folder": "/mnt/symlinks/",
|
|
||||||
"categories": ["sonarr", "radarr"]
|
|
||||||
},
|
|
||||||
"use_auth": false,
|
|
||||||
"log_level": "info",
|
|
||||||
"port": "8282"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
|
|||||||
@@ -7,11 +7,10 @@ import (
|
|||||||
"github.com/sirrobot01/decypharr/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"github.com/sirrobot01/decypharr/pkg/qbit"
|
"github.com/sirrobot01/decypharr/pkg/qbit"
|
||||||
"github.com/sirrobot01/decypharr/pkg/server"
|
"github.com/sirrobot01/decypharr/pkg/server"
|
||||||
"github.com/sirrobot01/decypharr/pkg/service"
|
"github.com/sirrobot01/decypharr/pkg/store"
|
||||||
"github.com/sirrobot01/decypharr/pkg/version"
|
"github.com/sirrobot01/decypharr/pkg/version"
|
||||||
"github.com/sirrobot01/decypharr/pkg/web"
|
"github.com/sirrobot01/decypharr/pkg/web"
|
||||||
"github.com/sirrobot01/decypharr/pkg/webdav"
|
"github.com/sirrobot01/decypharr/pkg/webdav"
|
||||||
"github.com/sirrobot01/decypharr/pkg/worker"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
@@ -62,7 +61,7 @@ func Start(ctx context.Context) error {
|
|||||||
qb := qbit.New()
|
qb := qbit.New()
|
||||||
wd := webdav.New()
|
wd := webdav.New()
|
||||||
|
|
||||||
ui := web.New(qb).Routes()
|
ui := web.New().Routes()
|
||||||
webdavRoutes := wd.Routes()
|
webdavRoutes := wd.Routes()
|
||||||
qbitRoutes := qb.Routes()
|
qbitRoutes := qb.Routes()
|
||||||
|
|
||||||
@@ -74,9 +73,17 @@ func Start(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
srv := server.New(handlers)
|
srv := server.New(handlers)
|
||||||
|
|
||||||
|
reset := func() {
|
||||||
|
// Reset the store and services
|
||||||
|
qb.Reset()
|
||||||
|
store.Reset()
|
||||||
|
// refresh GC
|
||||||
|
runtime.GC()
|
||||||
|
}
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
go func(ctx context.Context) {
|
go func(ctx context.Context) {
|
||||||
if err := startServices(ctx, wd, srv); err != nil {
|
if err := startServices(ctx, cancelSvc, wd, srv); err != nil {
|
||||||
_log.Error().Err(err).Msg("Error starting services")
|
_log.Error().Err(err).Msg("Error starting services")
|
||||||
cancelSvc()
|
cancelSvc()
|
||||||
}
|
}
|
||||||
@@ -88,27 +95,23 @@ func Start(ctx context.Context) error {
|
|||||||
// graceful shutdown
|
// graceful shutdown
|
||||||
cancelSvc() // propagate to services
|
cancelSvc() // propagate to services
|
||||||
<-done // wait for them to finish
|
<-done // wait for them to finish
|
||||||
|
_log.Info().Msg("Decypharr has been stopped gracefully.")
|
||||||
|
reset() // reset store and services
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case <-restartCh:
|
case <-restartCh:
|
||||||
cancelSvc() // tell existing services to shut down
|
cancelSvc() // tell existing services to shut down
|
||||||
_log.Info().Msg("Restarting Decypharr...")
|
_log.Info().Msg("Restarting Decypharr...")
|
||||||
<-done // wait for them to finish
|
<-done // wait for them to finish
|
||||||
qb.Reset()
|
_log.Info().Msg("Decypharr has been restarted.")
|
||||||
service.Reset()
|
reset() // reset store and services
|
||||||
|
|
||||||
// rebuild svcCtx off the original parent
|
// rebuild svcCtx off the original parent
|
||||||
svcCtx, cancelSvc = context.WithCancel(ctx)
|
svcCtx, cancelSvc = context.WithCancel(ctx)
|
||||||
runtime.GC()
|
|
||||||
|
|
||||||
config.Reload()
|
|
||||||
service.Reset()
|
|
||||||
// loop will restart services automatically
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) error {
|
func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav.WebDav, srv *server.Server) error {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
errChan := make(chan error)
|
errChan := make(chan error)
|
||||||
|
|
||||||
@@ -145,12 +148,17 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e
|
|||||||
return srv.Start(ctx)
|
return srv.Start(ctx)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Start rclone RC server if enabled
|
||||||
safeGo(func() error {
|
safeGo(func() error {
|
||||||
return worker.Start(ctx)
|
rcManager := store.Get().RcloneManager()
|
||||||
|
if rcManager == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return rcManager.Start(ctx)
|
||||||
})
|
})
|
||||||
|
|
||||||
safeGo(func() error {
|
safeGo(func() error {
|
||||||
arr := service.GetService().Arr
|
arr := store.Get().Arr()
|
||||||
if arr == nil {
|
if arr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -159,9 +167,9 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e
|
|||||||
|
|
||||||
if cfg := config.Get(); cfg.Repair.Enabled {
|
if cfg := config.Get(); cfg.Repair.Enabled {
|
||||||
safeGo(func() error {
|
safeGo(func() error {
|
||||||
r := service.GetService().Repair
|
repair := store.Get().Repair()
|
||||||
if r != nil {
|
if repair != nil {
|
||||||
if err := r.Start(ctx); err != nil {
|
if err := repair.Start(ctx); err != nil {
|
||||||
_log.Error().Err(err).Msg("repair failed")
|
_log.Error().Err(err).Msg("repair failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -169,6 +177,10 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
safeGo(func() error {
|
||||||
|
return store.Get().StartQueueSchedule(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
close(errChan)
|
close(errChan)
|
||||||
@@ -178,7 +190,11 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e
|
|||||||
for err := range errChan {
|
for err := range errChan {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_log.Error().Err(err).Msg("Service error detected")
|
_log.Error().Err(err).Msg("Service error detected")
|
||||||
// Don't shut down the whole app
|
// If the error is critical, return it to stop the main loop
|
||||||
|
if ctx.Err() == nil {
|
||||||
|
_log.Error().Msg("Stopping services due to error")
|
||||||
|
cancelSvc() // Cancel the service context to stop all services
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
@@ -22,8 +22,14 @@ type HealthStatus struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var configPath string
|
var (
|
||||||
|
configPath string
|
||||||
|
isBasicCheck bool
|
||||||
|
debug bool
|
||||||
|
)
|
||||||
flag.StringVar(&configPath, "config", "/data", "path to the data folder")
|
flag.StringVar(&configPath, "config", "/data", "path to the data folder")
|
||||||
|
flag.BoolVar(&isBasicCheck, "basic", false, "perform basic health check without WebDAV")
|
||||||
|
flag.BoolVar(&debug, "debug", false, "enable debug mode for detailed output")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
config.SetConfigPath(configPath)
|
config.SetConfigPath(configPath)
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
@@ -63,16 +69,17 @@ func main() {
|
|||||||
status.WebUI = true
|
status.WebUI = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check WebDAV if enabled
|
if isBasicCheck {
|
||||||
if webdavPath != "" {
|
status.WebDAVService = checkBaseWebdav(ctx, baseUrl, port)
|
||||||
if checkWebDAV(ctx, baseUrl, port, webdavPath) {
|
} else {
|
||||||
|
// If not a basic check, check WebDAV with debrid path
|
||||||
|
if webdavPath != "" {
|
||||||
|
status.WebDAVService = checkDebridWebDAV(ctx, baseUrl, port, webdavPath)
|
||||||
|
} else {
|
||||||
|
// If no WebDAV path is set, consider it healthy
|
||||||
status.WebDAVService = true
|
status.WebDAVService = true
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// If WebDAV is not enabled, consider it healthy
|
|
||||||
status.WebDAVService = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine overall status
|
// Determine overall status
|
||||||
// Consider the application healthy if core services are running
|
// Consider the application healthy if core services are running
|
||||||
status.OverallStatus = status.QbitAPI && status.WebUI
|
status.OverallStatus = status.QbitAPI && status.WebUI
|
||||||
@@ -81,7 +88,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Optional: output health status as JSON for logging
|
// Optional: output health status as JSON for logging
|
||||||
if os.Getenv("DEBUG") == "true" {
|
if debug {
|
||||||
statusJSON, _ := json.MarshalIndent(status, "", " ")
|
statusJSON, _ := json.MarshalIndent(status, "", " ")
|
||||||
fmt.Println(string(statusJSON))
|
fmt.Println(string(statusJSON))
|
||||||
}
|
}
|
||||||
@@ -132,7 +139,24 @@ func checkWebUI(ctx context.Context, baseUrl, port string) bool {
|
|||||||
return resp.StatusCode == http.StatusOK
|
return resp.StatusCode == http.StatusOK
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkWebDAV(ctx context.Context, baseUrl, port, path string) bool {
|
func checkBaseWebdav(ctx context.Context, baseUrl, port string) bool {
|
||||||
|
url := fmt.Sprintf("http://localhost:%s%swebdav/", port, baseUrl)
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
return resp.StatusCode == http.StatusMultiStatus ||
|
||||||
|
resp.StatusCode == http.StatusOK
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkDebridWebDAV(ctx context.Context, baseUrl, port, path string) bool {
|
||||||
url := fmt.Sprintf("http://localhost:%s%swebdav/%s", port, baseUrl, path)
|
url := fmt.Sprintf("http://localhost:%s%swebdav/%s", port, baseUrl, path)
|
||||||
req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -145,5 +169,7 @@ func checkWebDAV(ctx context.Context, baseUrl, port, path string) bool {
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
return resp.StatusCode == 207 || resp.StatusCode == http.StatusOK
|
return resp.StatusCode == http.StatusMultiStatus ||
|
||||||
|
resp.StatusCode == http.StatusOK
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
418
docs/docs/api-spec.yaml
Normal file
@@ -0,0 +1,418 @@
|
|||||||
|
openapi: 3.0.3
|
||||||
|
info:
|
||||||
|
title: Decypharr API
|
||||||
|
description: QbitTorrent with Debrid Support API
|
||||||
|
version: 1.0.0
|
||||||
|
contact:
|
||||||
|
name: Decypharr
|
||||||
|
url: https://github.com/sirrobot01/decypharr
|
||||||
|
|
||||||
|
servers:
|
||||||
|
- url: /api
|
||||||
|
description: API endpoints
|
||||||
|
|
||||||
|
security:
|
||||||
|
- cookieAuth: []
|
||||||
|
- bearerAuth: []
|
||||||
|
|
||||||
|
paths:
|
||||||
|
/arrs:
|
||||||
|
get:
|
||||||
|
summary: Get all configured Arrs
|
||||||
|
description: Retrieve a list of all configured Arr applications (Sonarr, Radarr, etc.)
|
||||||
|
tags:
|
||||||
|
- Arrs
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successfully retrieved Arrs
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/Arr'
|
||||||
|
|
||||||
|
/add:
|
||||||
|
post:
|
||||||
|
summary: Add content for processing
|
||||||
|
description: Add torrent files or magnet links for processing through debrid services
|
||||||
|
tags:
|
||||||
|
- Content
|
||||||
|
requestBody:
|
||||||
|
content:
|
||||||
|
multipart/form-data:
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
arr:
|
||||||
|
type: string
|
||||||
|
description: Name of the Arr application
|
||||||
|
action:
|
||||||
|
type: string
|
||||||
|
description: Action to perform
|
||||||
|
debrid:
|
||||||
|
type: string
|
||||||
|
description: Debrid service to use
|
||||||
|
callbackUrl:
|
||||||
|
type: string
|
||||||
|
description: Optional callback URL
|
||||||
|
downloadFolder:
|
||||||
|
type: string
|
||||||
|
description: Download folder path
|
||||||
|
downloadUncached:
|
||||||
|
type: boolean
|
||||||
|
description: Whether to download uncached content
|
||||||
|
urls:
|
||||||
|
type: string
|
||||||
|
description: Newline-separated URLs or magnet links
|
||||||
|
files:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
format: binary
|
||||||
|
description: Torrent files to upload
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Content added successfully
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
results:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/ImportRequest'
|
||||||
|
errors:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
'400':
|
||||||
|
description: Bad request
|
||||||
|
|
||||||
|
/repair:
|
||||||
|
post:
|
||||||
|
summary: Repair media
|
||||||
|
description: Start a repair process for specified media items
|
||||||
|
tags:
|
||||||
|
- Repair
|
||||||
|
requestBody:
|
||||||
|
required: true
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/RepairRequest'
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Repair started or completed
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
'400':
|
||||||
|
description: Bad request
|
||||||
|
'404':
|
||||||
|
description: Arr not found
|
||||||
|
'500':
|
||||||
|
description: Internal server error
|
||||||
|
|
||||||
|
/repair/jobs:
|
||||||
|
get:
|
||||||
|
summary: Get repair jobs
|
||||||
|
description: Retrieve all repair jobs
|
||||||
|
tags:
|
||||||
|
- Repair
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successfully retrieved repair jobs
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/RepairJob'
|
||||||
|
delete:
|
||||||
|
summary: Delete repair jobs
|
||||||
|
description: Delete multiple repair jobs by IDs
|
||||||
|
tags:
|
||||||
|
- Repair
|
||||||
|
requestBody:
|
||||||
|
required: true
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
ids:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- ids
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Jobs deleted successfully
|
||||||
|
'400':
|
||||||
|
description: Bad request
|
||||||
|
|
||||||
|
/repair/jobs/{id}/process:
|
||||||
|
post:
|
||||||
|
summary: Process repair job
|
||||||
|
description: Process a specific repair job by ID
|
||||||
|
tags:
|
||||||
|
- Repair
|
||||||
|
parameters:
|
||||||
|
- name: id
|
||||||
|
in: path
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: Job ID
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Job processed successfully
|
||||||
|
'400':
|
||||||
|
description: Bad request
|
||||||
|
|
||||||
|
/repair/jobs/{id}/stop:
|
||||||
|
post:
|
||||||
|
summary: Stop repair job
|
||||||
|
description: Stop a running repair job by ID
|
||||||
|
tags:
|
||||||
|
- Repair
|
||||||
|
parameters:
|
||||||
|
- name: id
|
||||||
|
in: path
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: Job ID
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Job stopped successfully
|
||||||
|
'400':
|
||||||
|
description: Bad request
|
||||||
|
'500':
|
||||||
|
description: Internal server error
|
||||||
|
|
||||||
|
/torrents:
|
||||||
|
get:
|
||||||
|
summary: Get all torrents
|
||||||
|
description: Retrieve all torrents sorted by added date
|
||||||
|
tags:
|
||||||
|
- Torrents
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successfully retrieved torrents
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/Torrent'
|
||||||
|
delete:
|
||||||
|
summary: Delete multiple torrents
|
||||||
|
description: Delete multiple torrents by hash list
|
||||||
|
tags:
|
||||||
|
- Torrents
|
||||||
|
parameters:
|
||||||
|
- name: hashes
|
||||||
|
in: query
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: Comma-separated list of torrent hashes
|
||||||
|
- name: removeFromDebrid
|
||||||
|
in: query
|
||||||
|
schema:
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
description: Whether to remove from debrid service
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Torrents deleted successfully
|
||||||
|
'400':
|
||||||
|
description: Bad request
|
||||||
|
|
||||||
|
/torrents/{category}/{hash}:
|
||||||
|
delete:
|
||||||
|
summary: Delete single torrent
|
||||||
|
description: Delete a specific torrent by category and hash
|
||||||
|
tags:
|
||||||
|
- Torrents
|
||||||
|
parameters:
|
||||||
|
- name: category
|
||||||
|
in: path
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: Torrent category
|
||||||
|
- name: hash
|
||||||
|
in: path
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
description: Torrent hash
|
||||||
|
- name: removeFromDebrid
|
||||||
|
in: query
|
||||||
|
schema:
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
description: Whether to remove from debrid service
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Torrent deleted successfully
|
||||||
|
'400':
|
||||||
|
description: Bad request
|
||||||
|
|
||||||
|
components:
|
||||||
|
securitySchemes:
|
||||||
|
cookieAuth:
|
||||||
|
type: apiKey
|
||||||
|
in: cookie
|
||||||
|
name: auth-session
|
||||||
|
bearerAuth:
|
||||||
|
type: http
|
||||||
|
scheme: bearer
|
||||||
|
bearerFormat: token
|
||||||
|
description: API token for authentication
|
||||||
|
|
||||||
|
schemas:
|
||||||
|
Arr:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
description: Name of the Arr application
|
||||||
|
host:
|
||||||
|
type: string
|
||||||
|
description: Host URL of the Arr application
|
||||||
|
token:
|
||||||
|
type: string
|
||||||
|
description: API token for the Arr application
|
||||||
|
cleanup:
|
||||||
|
type: boolean
|
||||||
|
description: Whether to cleanup after processing
|
||||||
|
skipRepair:
|
||||||
|
type: boolean
|
||||||
|
description: Whether to skip repair operations
|
||||||
|
downloadUncached:
|
||||||
|
type: boolean
|
||||||
|
description: Whether to download uncached content
|
||||||
|
selectedDebrid:
|
||||||
|
type: string
|
||||||
|
description: Selected debrid service
|
||||||
|
source:
|
||||||
|
type: string
|
||||||
|
description: Source of the Arr configuration
|
||||||
|
|
||||||
|
ImportRequest:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
debridName:
|
||||||
|
type: string
|
||||||
|
description: Name of the debrid service
|
||||||
|
downloadFolder:
|
||||||
|
type: string
|
||||||
|
description: Download folder path
|
||||||
|
magnet:
|
||||||
|
type: string
|
||||||
|
description: Magnet link
|
||||||
|
arr:
|
||||||
|
$ref: '#/components/schemas/Arr'
|
||||||
|
action:
|
||||||
|
type: string
|
||||||
|
description: Action to perform
|
||||||
|
downloadUncached:
|
||||||
|
type: boolean
|
||||||
|
description: Whether to download uncached content
|
||||||
|
callbackUrl:
|
||||||
|
type: string
|
||||||
|
description: Callback URL
|
||||||
|
importType:
|
||||||
|
type: string
|
||||||
|
description: Type of import (API, etc.)
|
||||||
|
|
||||||
|
RepairRequest:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
arrName:
|
||||||
|
type: string
|
||||||
|
description: Name of the Arr application
|
||||||
|
mediaIds:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
description: List of media IDs to repair
|
||||||
|
autoProcess:
|
||||||
|
type: boolean
|
||||||
|
description: Whether to auto-process the repair
|
||||||
|
async:
|
||||||
|
type: boolean
|
||||||
|
description: Whether to run repair asynchronously
|
||||||
|
required:
|
||||||
|
- arrName
|
||||||
|
|
||||||
|
RepairJob:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
id:
|
||||||
|
type: string
|
||||||
|
description: Job ID
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
description: Job status
|
||||||
|
arrName:
|
||||||
|
type: string
|
||||||
|
description: Associated Arr application
|
||||||
|
mediaIds:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
description: Media IDs being repaired
|
||||||
|
createdAt:
|
||||||
|
type: string
|
||||||
|
format: date-time
|
||||||
|
description: Job creation timestamp
|
||||||
|
|
||||||
|
Torrent:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
hash:
|
||||||
|
type: string
|
||||||
|
description: Torrent hash
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
description: Torrent name
|
||||||
|
category:
|
||||||
|
type: string
|
||||||
|
description: Torrent category
|
||||||
|
addedOn:
|
||||||
|
type: string
|
||||||
|
format: date-time
|
||||||
|
description: Date when torrent was added
|
||||||
|
size:
|
||||||
|
type: integer
|
||||||
|
description: Torrent size in bytes
|
||||||
|
progress:
|
||||||
|
type: number
|
||||||
|
format: float
|
||||||
|
description: Download progress (0-1)
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
description: Torrent status
|
||||||
|
|
||||||
|
|
||||||
|
tags:
|
||||||
|
- name: Arrs
|
||||||
|
description: Arr application management
|
||||||
|
- name: Content
|
||||||
|
description: Content addition and processing
|
||||||
|
- name: Repair
|
||||||
|
description: Media repair operations
|
||||||
|
- name: Torrents
|
||||||
|
description: Torrent management
|
||||||
|
- name: Configuration
|
||||||
|
description: Application configuration
|
||||||
|
- name: Authentication
|
||||||
|
description: API token management
|
||||||
90
docs/docs/api.md
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
# API Documentation
|
||||||
|
|
||||||
|
Decypharr provides a RESTful API for managing torrents, debrid services, and Arr integrations. The API requires authentication and all endpoints are prefixed with `/api`.
|
||||||
|
|
||||||
|
## Authentication
|
||||||
|
|
||||||
|
The API supports two authentication methods:
|
||||||
|
|
||||||
|
### 1. Session-based Authentication (Cookies)
|
||||||
|
Log in through the web interface (`/login`) to establish an authenticated session. The session cookie (`auth-session`) will be automatically included in subsequent API requests from the same browser session.
|
||||||
|
|
||||||
|
### 2. API Token Authentication (Bearer Token)
|
||||||
|
Use API tokens for programmatic access. Include the token in the `Authorization` header for each request:
|
||||||
|
|
||||||
|
- `Authorization: Bearer <your-token>`
|
||||||
|
|
||||||
|
## Interactive API Documentation
|
||||||
|
|
||||||
|
<swagger-ui src="api-spec.yaml"/>
|
||||||
|
|
||||||
|
## API Endpoints Overview
|
||||||
|
|
||||||
|
### Arrs Management
|
||||||
|
- `GET /api/arrs` - Get all configured Arr applications (Sonarr, Radarr, etc.)
|
||||||
|
|
||||||
|
### Content Management
|
||||||
|
- `POST /api/add` - Add torrent files or magnet links for processing through debrid services
|
||||||
|
|
||||||
|
### Repair Operations
|
||||||
|
- `POST /api/repair` - Start repair process for media items
|
||||||
|
- `GET /api/repair/jobs` - Get all repair jobs
|
||||||
|
- `POST /api/repair/jobs/{id}/process` - Process a specific repair job
|
||||||
|
- `POST /api/repair/jobs/{id}/stop` - Stop a running repair job
|
||||||
|
- `DELETE /api/repair/jobs` - Delete multiple repair jobs
|
||||||
|
|
||||||
|
### Torrent Management
|
||||||
|
- `GET /api/torrents` - Get all torrents
|
||||||
|
- `DELETE /api/torrents/{category}/{hash}` - Delete a specific torrent
|
||||||
|
- `DELETE /api/torrents/` - Delete multiple torrents
|
||||||
|
|
||||||
|
## Usage Examples
|
||||||
|
|
||||||
|
### Adding Content via API
|
||||||
|
|
||||||
|
#### Using API Token:
|
||||||
|
```bash
|
||||||
|
curl -H "Authorization: Bearer $API_TOKEN" -X POST http://localhost:8080/api/add \
|
||||||
|
-F "arr=sonarr" \
|
||||||
|
-F "debrid=realdebrid" \
|
||||||
|
-F "urls=magnet:?xt=urn:btih:..." \
|
||||||
|
-F "downloadUncached=true"
|
||||||
|
-F "file=@/path/to/torrent/file.torrent"
|
||||||
|
-F "callbackUrl=http://your.callback.url/endpoint"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using Session Cookies:
|
||||||
|
```bash
|
||||||
|
# Login first (this sets the session cookie)
|
||||||
|
curl -c cookies.txt -X POST http://localhost:8080/login \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"username": "your_username", "password": "your_password"}'
|
||||||
|
|
||||||
|
# Then use the session cookie for API calls
|
||||||
|
curl -b cookies.txt -X POST http://localhost:8080/api/add \
|
||||||
|
-F "arr=sonarr" \
|
||||||
|
-F "debrid=realdebrid" \
|
||||||
|
-F "urls=magnet:?xt=urn:btih:..." \
|
||||||
|
-F "downloadUncached=true"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Getting Torrents
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# With API token
|
||||||
|
curl -H "Authorization: Bearer $API_TOKEN" -X GET http://localhost:8080/api/torrents
|
||||||
|
```
|
||||||
|
|
||||||
|
### Starting a Repair Job
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# With API token
|
||||||
|
curl -H "Authorization: Bearer $API_TOKEN" -X POST http://localhost:8080/api/repair \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"arrName": "sonarr",
|
||||||
|
"mediaIds": ["123", "456"],
|
||||||
|
"autoProcess": true,
|
||||||
|
"async": true
|
||||||
|
}'
|
||||||
|
```
|
||||||
@@ -1,186 +0,0 @@
|
|||||||
# Changelog
|
|
||||||
|
|
||||||
## 1.0.0
|
|
||||||
|
|
||||||
- Add WebDAV support for debrid providers
|
|
||||||
- Some refactoring and code cleanup
|
|
||||||
- Fixes
|
|
||||||
- Fix Alldebrid not downloading torrents
|
|
||||||
- Fix Alldebrid not downloading uncached torrents
|
|
||||||
- Fix uncached torrents not being downloaded for RealDebrid
|
|
||||||
- Add support for multiple download API keys for debrid providers
|
|
||||||
- Add support for editable config.json via the UI
|
|
||||||
- Fix downloading timeout
|
|
||||||
- Fix UMASK for Windows
|
|
||||||
- Retries 50x(except 503) errors for RD
|
|
||||||
|
|
||||||
|
|
||||||
## 0.5.0
|
|
||||||
|
|
||||||
- A more refined repair worker (with more control)
|
|
||||||
- UI Improvements
|
|
||||||
- Pagination for torrents
|
|
||||||
- Dark mode
|
|
||||||
- Ordered torrents table
|
|
||||||
- Fix Arr API flaky behavior
|
|
||||||
- Discord Notifications
|
|
||||||
- Minor bug fixes
|
|
||||||
- Add Tautulli support
|
|
||||||
- playback_failed event triggers a repair
|
|
||||||
- Miscellaneous improvements
|
|
||||||
- Add an option to skip the repair worker for a specific arr
|
|
||||||
- Arr specific uncached downloading option
|
|
||||||
- Option to download uncached torrents from UI
|
|
||||||
- Remove QbitTorrent Log level (Use the global log level)
|
|
||||||
|
|
||||||
## 0.4.2
|
|
||||||
|
|
||||||
- Hotfixes
|
|
||||||
- Fix saving torrents error
|
|
||||||
- Fix bugs with the UI
|
|
||||||
- Speed improvements
|
|
||||||
|
|
||||||
## 0.4.1
|
|
||||||
|
|
||||||
- Adds optional UI authentication
|
|
||||||
- Downloaded Torrents persist on restart
|
|
||||||
- Fixes
|
|
||||||
- Fix Alldebrid struggling to find the correct file
|
|
||||||
- Minor bug fixes or speed-gains
|
|
||||||
- A new cleanup worker to clean up ARR queues
|
|
||||||
|
|
||||||
## 0.4.0
|
|
||||||
|
|
||||||
- Add support for multiple debrid providers
|
|
||||||
- A full-fledged UI for adding torrents, repairing files, viewing config and managing torrents
|
|
||||||
- Fix issues with Alldebrid
|
|
||||||
- Fix file transversal bug
|
|
||||||
- Fix files with no parent directory
|
|
||||||
- Logging
|
|
||||||
- Add a more robust logging system
|
|
||||||
- Add logging to a file
|
|
||||||
- Add logging to the UI
|
|
||||||
- Qbittorrent
|
|
||||||
- Add support for tags (creating, deleting, listing)
|
|
||||||
- Add support for categories (creating, deleting, listing)
|
|
||||||
- Fix issues with arr sending torrents using a different content type
|
|
||||||
|
|
||||||
## 0.3.3
|
|
||||||
|
|
||||||
- Add AllDebrid Support
|
|
||||||
- Fix Torbox not downloading uncached torrents
|
|
||||||
- Fix Rar files being downloaded
|
|
||||||
|
|
||||||
## 0.3.2
|
|
||||||
|
|
||||||
- Fix DebridLink not downloading
|
|
||||||
- Fix Torbox with uncached torrents
|
|
||||||
- Add new /internal/cached endpoint to check if an hash is cached
|
|
||||||
- Implement per-debrid local cache
|
|
||||||
- Fix file check for torbox
|
|
||||||
- Other minor bug fixes
|
|
||||||
|
|
||||||
## 0.3.1
|
|
||||||
|
|
||||||
- Add DebridLink Support
|
|
||||||
- Refactor error handling
|
|
||||||
|
|
||||||
## 0.3.0
|
|
||||||
|
|
||||||
- Add UI for adding torrents
|
|
||||||
- Refraction of the code
|
|
||||||
- Fix Torbox bug
|
|
||||||
- Update CI/CD
|
|
||||||
- Update Readme
|
|
||||||
|
|
||||||
## 0.2.7
|
|
||||||
|
|
||||||
- Add support for multiple debrid providers
|
|
||||||
- Add Torbox support
|
|
||||||
- Add support for configurable debrid cache checks
|
|
||||||
- Add support for configurable debrid download uncached torrents
|
|
||||||
|
|
||||||
## 0.2.6
|
|
||||||
|
|
||||||
- Delete torrent for empty matched files
|
|
||||||
- Update Readme
|
|
||||||
|
|
||||||
## 0.2.5
|
|
||||||
|
|
||||||
- Fix ContentPath not being set prior
|
|
||||||
- Rewrote Readme
|
|
||||||
- Cleaned up the code
|
|
||||||
|
|
||||||
## 0.2.4
|
|
||||||
|
|
||||||
- Add file download support (Sequential Download)
|
|
||||||
- Fix http handler error
|
|
||||||
- Fix *arrs map failing concurrently
|
|
||||||
- Fix cache not being updated
|
|
||||||
|
|
||||||
## 0.2.3
|
|
||||||
|
|
||||||
- Delete uncached items from RD
|
|
||||||
- Fail if the torrent is not cached (optional)
|
|
||||||
- Fix cache not being updated
|
|
||||||
|
|
||||||
## 0.2.2
|
|
||||||
|
|
||||||
- Fix name mismatch in the cache
|
|
||||||
- Fix directory mapping with mounts
|
|
||||||
- Add Support for refreshing the *arrs
|
|
||||||
|
|
||||||
## 0.2.1
|
|
||||||
|
|
||||||
- Fix Uncached torrents not being downloaded/downloaded
|
|
||||||
- Minor bug fixed
|
|
||||||
- Fix Race condition in the cache and file system
|
|
||||||
|
|
||||||
## 0.2.0
|
|
||||||
|
|
||||||
- Implement 0.2.0-beta changes
|
|
||||||
- Removed Blackhole
|
|
||||||
- Added QbitTorrent API
|
|
||||||
- Cleaned up the code
|
|
||||||
|
|
||||||
## 0.2.0-beta
|
|
||||||
|
|
||||||
- Switch to QbitTorrent API instead of Blackhole
|
|
||||||
- Rewrote the whole codebase
|
|
||||||
|
|
||||||
## 0.1.4
|
|
||||||
|
|
||||||
- Rewrote Report log
|
|
||||||
- Fix YTS, 1337x not grabbing infohash
|
|
||||||
- Fix Torrent symlink bug
|
|
||||||
|
|
||||||
## 0.1.3
|
|
||||||
|
|
||||||
- Searching for infohashes in the xml description/summary/comments
|
|
||||||
- Added local cache support
|
|
||||||
- Added max cache size
|
|
||||||
- Rewrite blackhole.go
|
|
||||||
- Bug fixes
|
|
||||||
- Fixed indexer getting disabled
|
|
||||||
- Fixed blackhole not working
|
|
||||||
|
|
||||||
## 0.1.2
|
|
||||||
|
|
||||||
- Bug fixes
|
|
||||||
- Code cleanup
|
|
||||||
- Get available hashes at once
|
|
||||||
|
|
||||||
## 0.1.1
|
|
||||||
|
|
||||||
- Added support for "No Blackhole" for Arrs
|
|
||||||
- Added support for "Cached Only" for Proxy
|
|
||||||
- Bug Fixes
|
|
||||||
|
|
||||||
## 0.1.0
|
|
||||||
|
|
||||||
- Initial Release
|
|
||||||
- Added Real Debrid Support
|
|
||||||
- Added Arrs Support
|
|
||||||
- Added Proxy Support
|
|
||||||
- Added Basic Authentication for Proxy
|
|
||||||
- Added Rate Limiting for Debrid Providers
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
# Arr Applications Configuration
|
|
||||||
|
|
||||||
Decypharr can integrate directly with Sonarr, Radarr, and other Arr applications. This section explains how to configure the Arr integration in your `config.json` file.
|
|
||||||
|
|
||||||
## Basic Configuration
|
|
||||||
|
|
||||||
The Arr applications are configured under the `arrs` key:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"arrs": [
|
|
||||||
{
|
|
||||||
"name": "sonarr",
|
|
||||||
"host": "http://sonarr:8989",
|
|
||||||
"token": "your-sonarr-api-key",
|
|
||||||
"cleanup": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "radarr",
|
|
||||||
"host": "http://radarr:7878",
|
|
||||||
"token": "your-radarr-api-key",
|
|
||||||
"cleanup": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
### !!! note
|
|
||||||
This configuration is optional if you've already set up the qBittorrent client in your Arr applications with the correct host and token information. It's particularly useful for the Repair Worker functionality.
|
|
||||||
|
|
||||||
|
|
||||||
### Configuration Options
|
|
||||||
Each Arr application supports the following options:
|
|
||||||
|
|
||||||
- `name`: The name of the Arr application, which should match the category in qBittorrent
|
|
||||||
- `host`: The host URL of the Arr application, including protocol and port
|
|
||||||
- `token`: The API token/key of the Arr application
|
|
||||||
- `cleanup`: Whether to clean up the Arr queue (removes completed downloads). This is only useful for Sonarr.
|
|
||||||
- `skip_repair`: Automated repair will be skipped for this *arr.
|
|
||||||
- `download_uncached`: Whether to download uncached torrents (defaults to debrid/manual setting)
|
|
||||||
|
|
||||||
### Finding Your API Key
|
|
||||||
#### Sonarr/Radarr/Lidarr
|
|
||||||
|
|
||||||
1. Go to Sonarr > Settings > General
|
|
||||||
2. Look for "API Key" in the "Security" section
|
|
||||||
3. Copy the API key
|
|
||||||
|
|
||||||
### Multiple Arr Applications
|
|
||||||
You can configure multiple Arr applications by adding more entries to the arrs array:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"arrs": [
|
|
||||||
{
|
|
||||||
"name": "sonarr",
|
|
||||||
"host": "http://sonarr:8989",
|
|
||||||
"token": "your-sonarr-api-key",
|
|
||||||
"cleanup": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "sonarr-anime",
|
|
||||||
"host": "http://sonarr-anime:8989",
|
|
||||||
"token": "your-sonarr-anime-api-key",
|
|
||||||
"cleanup": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "radarr",
|
|
||||||
"host": "http://radarr:7878",
|
|
||||||
"token": "your-radarr-api-key",
|
|
||||||
"cleanup": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "lidarr",
|
|
||||||
"host": "http://lidarr:8686",
|
|
||||||
"token": "your-lidarr-api-key",
|
|
||||||
"cleanup": false
|
|
||||||
}
|
|
||||||
]
|
|
||||||
```
|
|
||||||
@@ -1,131 +0,0 @@
|
|||||||
|
|
||||||
# Debrid Providers Configuration
|
|
||||||
|
|
||||||
Decypharr supports multiple Debrid providers. This section explains how to configure each provider in your `config.json` file.
|
|
||||||
|
|
||||||
## Basic Configuration
|
|
||||||
|
|
||||||
Each Debrid provider is configured in the `debrids` array:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"debrids": [
|
|
||||||
{
|
|
||||||
"name": "realdebrid",
|
|
||||||
"api_key": "your-api-key",
|
|
||||||
"folder": "/mnt/remote/realdebrid/__all__/",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "alldebrid",
|
|
||||||
"api_key": "your-api-key",
|
|
||||||
"folder": "/mnt/remote/alldebrid/downloads/"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
### Provider Options
|
|
||||||
|
|
||||||
Each Debrid provider accepts the following configuration options:
|
|
||||||
|
|
||||||
|
|
||||||
#### Basic(Required) Options
|
|
||||||
|
|
||||||
- `name`: The name of the Debrid provider (realdebrid, alldebrid, debridlink, torbox)
|
|
||||||
- `host`: The API endpoint of the Debrid provider
|
|
||||||
- `api_key`: Your API key for the Debrid service (can be comma-separated for multiple keys)
|
|
||||||
- `folder`: The folder where your Debrid content is mounted (via webdav, rclone, zurg, etc.)
|
|
||||||
|
|
||||||
#### Advanced Options
|
|
||||||
|
|
||||||
- `rate_limit`: Rate limit for API requests (null by default)
|
|
||||||
- `download_uncached`: Whether to download uncached torrents (disabled by default)
|
|
||||||
- `check_cached`: Whether to check if torrents are cached (disabled by default)
|
|
||||||
- `use_webdav`: Whether to create a WebDAV server for this Debrid provider (disabled by default)
|
|
||||||
- `proxy`: Proxy URL for the Debrid provider (optional)
|
|
||||||
|
|
||||||
#### WebDAV and Rclone Options
|
|
||||||
- `torrents_refresh_interval`: Interval for refreshing torrent data (e.g., `15s`, `1m`, `1h`).
|
|
||||||
- `download_links_refresh_interval`: Interval for refreshing download links (e.g., `40m`, `1h`).
|
|
||||||
- `workers`: Number of concurrent workers for processing requests.
|
|
||||||
- `serve_from_rclone`: Whether to serve files directly from Rclone (disabled by default)
|
|
||||||
- `add_samples`: Whether to add sample files when adding torrents to debrid (disabled by default)
|
|
||||||
- `folder_naming`: Naming convention for folders:
|
|
||||||
- `original_no_ext`: Original file name without extension
|
|
||||||
- `original`: Original file name with extension
|
|
||||||
- `filename`: Torrent filename
|
|
||||||
- `filename_no_ext`: Torrent filename without extension
|
|
||||||
- `id`: Torrent ID
|
|
||||||
- `hash`: Torrent hash
|
|
||||||
- `auto_expire_links_after`: Time after which download links will expire (e.g., `3d`, `1w`).
|
|
||||||
- `rc_url`, `rc_user`, `rc_pass`, `rc_refresh_dirs`: Rclone RC configuration for VFS refreshes
|
|
||||||
- `directories`: A map of virtual folders to serve via the webDAV server. The key is the virtual folder name, and the values are map of filters and their value
|
|
||||||
|
|
||||||
#### Example of `directories` configuration
|
|
||||||
```json
|
|
||||||
"directories": {
|
|
||||||
"Newly Added": {
|
|
||||||
"filters": {
|
|
||||||
"exclude": "9-1-1",
|
|
||||||
"last_added": "20h"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"Spiderman Collection": {
|
|
||||||
"filters": {
|
|
||||||
"regex": "(?i)spider[-\\s]?man(\\s+collection|\\s+\\d|\\s+trilogy|\\s+complete|\\s+ultimate|\\s+box\\s+set|:?\\s+homecoming|:?\\s+far\\s+from\\s+home|:?\\s+no\\s+way\\s+home)"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Example Configuration
|
|
||||||
|
|
||||||
#### Real Debrid
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "realdebrid",
|
|
||||||
"api_key": "your-api-key",
|
|
||||||
"folder": "/mnt/remote/realdebrid/__all__/",
|
|
||||||
"rate_limit": null,
|
|
||||||
"download_uncached": false,
|
|
||||||
"use_webdav": true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### All Debrid
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "alldebrid",
|
|
||||||
"api_key": "your-api-key",
|
|
||||||
"folder": "/mnt/remote/alldebrid/torrents/",
|
|
||||||
"rate_limit": null,
|
|
||||||
"download_uncached": false,
|
|
||||||
"use_webdav": true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Debrid Link
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "debridlink",
|
|
||||||
"api_key": "your-api-key",
|
|
||||||
"folder": "/mnt/remote/debridlink/torrents/",
|
|
||||||
"rate_limit": null,
|
|
||||||
"download_uncached": false,
|
|
||||||
"use_webdav": true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Torbox
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "torbox",
|
|
||||||
"api_key": "your-api-key",
|
|
||||||
"folder": "/mnt/remote/torbox/torrents/",
|
|
||||||
"rate_limit": null,
|
|
||||||
"download_uncached": false,
|
|
||||||
"use_webdav": true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
@@ -1,81 +0,0 @@
|
|||||||
# General Configuration
|
|
||||||
|
|
||||||
This section covers the basic configuration options for Decypharr that apply to the entire application.
|
|
||||||
|
|
||||||
## Basic Settings
|
|
||||||
|
|
||||||
Here are the fundamental configuration options:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"use_auth": false,
|
|
||||||
"port": 8282,
|
|
||||||
"log_level": "info",
|
|
||||||
"discord_webhook_url": "",
|
|
||||||
"min_file_size": 0,
|
|
||||||
"max_file_size": 0,
|
|
||||||
"allowed_file_types": [".mp4", ".mkv", ".avi", ...],
|
|
||||||
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Configuration Options
|
|
||||||
|
|
||||||
#### Log Level
|
|
||||||
The `log_level` setting determines how verbose the application logs will be:
|
|
||||||
|
|
||||||
- `debug`: Detailed information, useful for troubleshooting
|
|
||||||
- `info`: General operational information (default)
|
|
||||||
- `warn`: Warning messages
|
|
||||||
- `error`: Error messages only
|
|
||||||
- `trace`: Very detailed information, including all requests and responses
|
|
||||||
|
|
||||||
#### Port
|
|
||||||
|
|
||||||
The `port` setting specifies the port on which Decypharr will run. The default is `8282`. You can change this to any available port on your server.
|
|
||||||
|
|
||||||
Ensure this port:
|
|
||||||
|
|
||||||
- Is not used by other applications
|
|
||||||
- Is accessible to your Arr applications
|
|
||||||
- Is properly exposed if using Docker (see the Docker Compose example in the Installation guide)
|
|
||||||
|
|
||||||
|
|
||||||
#### Authentication
|
|
||||||
The `use_auth` option enables basic authentication for the UI:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"use_auth": true
|
|
||||||
```
|
|
||||||
|
|
||||||
When enabled, you'll need to provide a username and password to access the Decypharr interface.
|
|
||||||
|
|
||||||
|
|
||||||
#### File Size Limits
|
|
||||||
|
|
||||||
You can set minimum and maximum file size limits for torrents:
|
|
||||||
```json
|
|
||||||
"min_file_size": 0, // Minimum file size in bytes (0 = no minimum)
|
|
||||||
"max_file_size": 0 // Maximum file size in bytes (0 = no maximum)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Allowed File Types
|
|
||||||
You can restrict the types of files that Decypharr will process by specifying allowed file extensions. This is useful for filtering out unwanted file types.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"allowed_file_types": [
|
|
||||||
".mp4", ".mkv", ".avi", ".mov",
|
|
||||||
".m4v", ".mpg", ".mpeg", ".wmv",
|
|
||||||
".m4a", ".mp3", ".flac", ".wav"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
If not specified, all movie, TV show, and music file types are allowed by default.
|
|
||||||
|
|
||||||
|
|
||||||
#### Discord Notifications
|
|
||||||
To receive notifications on Discord, add your webhook URL:
|
|
||||||
```json
|
|
||||||
"discord_webhook_url": "https://discord.com/api/webhooks/..."
|
|
||||||
```
|
|
||||||
This will send notifications for various events, such as successful downloads or errors.
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
# Configuration Overview
|
|
||||||
|
|
||||||
Decypharr uses a JSON configuration file to manage its settings. This file should be named `config.json` and placed in your configured directory.
|
|
||||||
|
|
||||||
## Basic Configuration
|
|
||||||
|
|
||||||
Here's a minimal configuration to get started:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"debrids": [
|
|
||||||
{
|
|
||||||
"name": "realdebrid",
|
|
||||||
"api_key": "realdebrid_key",
|
|
||||||
"folder": "/mnt/remote/realdebrid/__all__/",
|
|
||||||
"use_webdav": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"qbittorrent": {
|
|
||||||
"port": "8282",
|
|
||||||
"download_folder": "/mnt/symlinks/",
|
|
||||||
"categories": ["sonarr", "radarr"]
|
|
||||||
},
|
|
||||||
"repair": {
|
|
||||||
"enabled": false,
|
|
||||||
"interval": "12h",
|
|
||||||
"run_on_start": false
|
|
||||||
},
|
|
||||||
"use_auth": false,
|
|
||||||
"log_level": "info"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Configuration Sections
|
|
||||||
|
|
||||||
Decypharr's configuration is divided into several sections:
|
|
||||||
|
|
||||||
- [General Configuration](general.md) - Basic settings like logging and authentication
|
|
||||||
- [Debrid Providers](debrid.md) - Configure one or more Debrid services
|
|
||||||
- [qBittorrent Settings](qbittorrent.md) - Settings for the qBittorrent API
|
|
||||||
- [Arr Integration](arrs.md) - Configuration for Sonarr, Radarr, etc.
|
|
||||||
|
|
||||||
Full Configuration Example
|
|
||||||
For a complete configuration file with all available options, see our [full configuration example](../extras/config.full.json).
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
# qBittorrent Configuration
|
|
||||||
|
|
||||||
Decypharr emulates a qBittorrent instance to integrate with Arr applications. This section explains how to configure the qBittorrent settings in your `config.json` file.
|
|
||||||
|
|
||||||
## Basic Configuration
|
|
||||||
|
|
||||||
The qBittorrent functionality is configured under the `qbittorrent` key:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"qbittorrent": {
|
|
||||||
"download_folder": "/mnt/symlinks/",
|
|
||||||
"categories": ["sonarr", "radarr", "lidarr"],
|
|
||||||
"refresh_interval": 5
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Configuration Options
|
|
||||||
#### Required Settings
|
|
||||||
|
|
||||||
- `download_folder`: The folder where symlinks or downloaded files will be placed
|
|
||||||
- `categories`: An array of categories to organize downloads (usually matches your Arr applications)
|
|
||||||
|
|
||||||
#### Advanced Settings
|
|
||||||
|
|
||||||
- `refresh_interval`: How often (in seconds) to refresh the Arrs Monitored Downloads (default: 5)
|
|
||||||
- `max_downloads`: The maximum number of concurrent downloads. This is only for downloading real files(Not symlinks). If you set this to 0, it will download all files at once. This is not recommended for most users.(default: 5)
|
|
||||||
- `skip_pre_cache`: This option disables the process of pre-caching files. This caches a small portion of the file to speed up your *arrs import process.
|
|
||||||
|
|
||||||
#### Categories
|
|
||||||
Categories help organize your downloads and match them to specific Arr applications. Typically, you'll want to configure categories that match your Sonarr, Radarr, or other Arr applications:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"categories": ["sonarr", "radarr", "lidarr", "readarr"]
|
|
||||||
```
|
|
||||||
|
|
||||||
When setting up your Arr applications to connect to Decypharr, you'll specify these same category names.
|
|
||||||
|
|
||||||
#### Download Folder
|
|
||||||
|
|
||||||
The `download_folder` setting specifies where Decypharr will place downloaded files or create symlinks:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"download_folder": "/mnt/symlinks/"
|
|
||||||
```
|
|
||||||
|
|
||||||
This folder should be:
|
|
||||||
|
|
||||||
- Accessible to Decypharr
|
|
||||||
- Accessible to your Arr applications
|
|
||||||
- Have sufficient space if downloading files locally
|
|
||||||
|
|
||||||
|
|
||||||
#### Refresh Interval
|
|
||||||
The refresh_interval setting controls how often Decypharr checks for updates from your Arr applications:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"refresh_interval": 5
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
This value is in seconds. Lower values provide more responsive updates but may increase CPU usage.
|
|
||||||
@@ -1,88 +0,0 @@
|
|||||||
{
|
|
||||||
"debrids": [
|
|
||||||
{
|
|
||||||
"name": "realdebrid",
|
|
||||||
"api_key": "realdebrid_key",
|
|
||||||
"folder": "/mnt/remote/realdebrid/__all__/",
|
|
||||||
"download_api_keys": [],
|
|
||||||
"proxy": "",
|
|
||||||
"rate_limit": "250/minute",
|
|
||||||
"download_uncached": false,
|
|
||||||
"use_webdav": true,
|
|
||||||
"torrents_refresh_interval": "15s",
|
|
||||||
"folder_naming": "original_no_ext",
|
|
||||||
"auto_expire_links_after": "3d",
|
|
||||||
"rc_url": "http://your-ip-address:9990",
|
|
||||||
"rc_user": "your_rclone_rc_user",
|
|
||||||
"rc_pass": "your_rclone_rc_pass"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "torbox",
|
|
||||||
"api_key": "torbox_api_key",
|
|
||||||
"folder": "/mnt/remote/torbox/torrents/",
|
|
||||||
"rate_limit": "250/minute",
|
|
||||||
"download_uncached": false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "debridlink",
|
|
||||||
"api_key": "debridlink_key",
|
|
||||||
"folder": "/mnt/remote/debridlink/torrents/",
|
|
||||||
"rate_limit": "250/minute",
|
|
||||||
"download_uncached": false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "alldebrid",
|
|
||||||
"api_key": "alldebrid_key",
|
|
||||||
"folder": "/mnt/remote/alldebrid/magnet/",
|
|
||||||
"rate_limit": "600/minute",
|
|
||||||
"download_uncached": false,
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"max_cache_size": 1000,
|
|
||||||
"qbittorrent": {
|
|
||||||
"port": "8282",
|
|
||||||
"download_folder": "/mnt/symlinks/",
|
|
||||||
"categories": ["sonarr", "radarr"],
|
|
||||||
"refresh_interval": 5,
|
|
||||||
"skip_pre_cache": false
|
|
||||||
},
|
|
||||||
"arrs": [
|
|
||||||
{
|
|
||||||
"name": "sonarr",
|
|
||||||
"host": "http://sonarr:8989",
|
|
||||||
"token": "arr_key",
|
|
||||||
"cleanup": true,
|
|
||||||
"skip_repair": true,
|
|
||||||
"download_uncached": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "radarr",
|
|
||||||
"host": "http://radarr:7878",
|
|
||||||
"token": "arr_key",
|
|
||||||
"cleanup": false,
|
|
||||||
"download_uncached": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "lidarr",
|
|
||||||
"host": "http://lidarr:8686",
|
|
||||||
"token": "arr_key",
|
|
||||||
"cleanup": false,
|
|
||||||
"skip_repair": true,
|
|
||||||
"download_uncached": false
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"repair": {
|
|
||||||
"enabled": false,
|
|
||||||
"interval": "12h",
|
|
||||||
"run_on_start": false,
|
|
||||||
"zurg_url": "",
|
|
||||||
"use_webdav": false,
|
|
||||||
"auto_process": false
|
|
||||||
},
|
|
||||||
"log_level": "info",
|
|
||||||
"min_file_size": "",
|
|
||||||
"max_file_size": "",
|
|
||||||
"allowed_file_types": [],
|
|
||||||
"use_auth": false,
|
|
||||||
"discord_webhook_url": "https://discord.com/api/webhooks/..."
|
|
||||||
}
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
[decypharr]
|
|
||||||
type = webdav
|
|
||||||
url = http://decypharr:8282/webdav/realdebrid
|
|
||||||
vendor = other
|
|
||||||
pacer_min_sleep = 0
|
|
||||||
@@ -25,8 +25,10 @@ The Decypharr user interface provides:
|
|||||||
|
|
||||||
Decypharr includes several advanced features that extend its capabilities:
|
Decypharr includes several advanced features that extend its capabilities:
|
||||||
|
|
||||||
- [Repair Worker](repair-worker.md): Identifies and fixes issues with your media files
|
- [Repair Support](repair-worker.md): Identifies and fixes issues with your media files
|
||||||
- [WebDAV Server](webdav.md): Provides direct access to your Debrid files
|
- WebDav Server: Provides direct access to your Debrid files
|
||||||
|
- Mounting Support: Allows you to mount Debrid services using [rclone](https://rclone.org), making it easy to access your files directly from your system
|
||||||
|
- Multiple Debrid Providers: Supports Real Debrid, Torbox, Debrid Link, and All Debrid, allowing you to choose the best service for your needs
|
||||||
|
|
||||||
## Supported Debrid Providers
|
## Supported Debrid Providers
|
||||||
|
|
||||||
@@ -36,5 +38,7 @@ Decypharr supports multiple Debrid providers:
|
|||||||
- Torbox
|
- Torbox
|
||||||
- Debrid Link
|
- Debrid Link
|
||||||
- All Debrid
|
- All Debrid
|
||||||
|
- Premiumize(Coming Soon)
|
||||||
|
- Usenet(Coming Soon)
|
||||||
|
|
||||||
Each provider can be configured separately, allowing you to use one or multiple services simultaneously.
|
Each provider can be configured separately, allowing you to use one or multiple services simultaneously.
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
# Repair Worker
|
# Repair Worker
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
The Repair Worker is a powerful feature that helps maintain the health of your media library by scanning for and fixing issues with files.
|
The Repair Worker is a powerful feature that helps maintain the health of your media library by scanning for and fixing issues with files.
|
||||||
|
|
||||||
## What It Does
|
## What It Does
|
||||||
@@ -13,29 +15,4 @@ The Repair Worker performs the following tasks:
|
|||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
To enable and configure the Repair Worker, add the following to your `config.json`:
|
You can enable and configure the Repair Worker in the Decypharr settings. It can be set to run at regular intervals, such as every 12 hours or daily.
|
||||||
|
|
||||||
```json
|
|
||||||
"repair": {
|
|
||||||
"enabled": true,
|
|
||||||
"interval": "12h",
|
|
||||||
"run_on_start": false,
|
|
||||||
"use_webdav": false,
|
|
||||||
"zurg_url": "http://localhost:9999",
|
|
||||||
"auto_process": true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Configuration Options
|
|
||||||
|
|
||||||
- `enabled`: Set to `true` to enable the Repair Worker.
|
|
||||||
- `interval`: The time interval for the Repair Worker to run (e.g., `12h`, `1d`).
|
|
||||||
- `run_on_start`: If set to `true`, the Repair Worker will run immediately after Decypharr starts.
|
|
||||||
- `use_webdav`: If set to `true`, the Repair Worker will use WebDAV for file operations.
|
|
||||||
- `zurg_url`: The URL for the Zurg service (if using).
|
|
||||||
- `auto_process`: If set to `true`, the Repair Worker will automatically process files that it finds issues with.
|
|
||||||
|
|
||||||
|
|
||||||
### Performance Tips
|
|
||||||
- For users of the WebDAV server, enable `use_webdav` for exponentially faster repair processes
|
|
||||||
- If using Zurg, set the `zurg_url` parameter to greatly improve repair speed
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
# WebDAV Server
|
|
||||||
|
|
||||||
Decypharr includes a built-in WebDAV server that provides direct access to your Debrid files, making them easily accessible to media players and other applications.
|
|
||||||
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
While most Debrid providers have their own WebDAV servers, Decypharr's implementation offers faster access and additional features.
|
|
||||||
|
|
||||||
## Accessing the WebDAV Server
|
|
||||||
|
|
||||||
- URL: `http://localhost:8282/webdav` or `http://<your-server-ip>:8282/webdav`
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
You can configure WebDAV settings either globally or per-Debrid provider in your `config.json`:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"webdav": {
|
|
||||||
"torrents_refresh_interval": "15s",
|
|
||||||
"download_links_refresh_interval": "40m",
|
|
||||||
"folder_naming": "original_no_ext",
|
|
||||||
"auto_expire_links_after": "3d",
|
|
||||||
"rc_url": "http://localhost:5572",
|
|
||||||
"rc_user": "username",
|
|
||||||
"rc_pass": "password",
|
|
||||||
"serve_from_rclone": false,
|
|
||||||
"directories": {
|
|
||||||
"Newly Added": {
|
|
||||||
"filters": {
|
|
||||||
"exclude": "9-1-1",
|
|
||||||
"last_added": "20h"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Configuration Options
|
|
||||||
|
|
||||||
- `torrents_refresh_interval`: Interval for refreshing torrent data (e.g., `15s`, `1m`, `1h`).
|
|
||||||
- `download_links_refresh_interval`: Interval for refreshing download links (e.g., `40m`, `1h`).
|
|
||||||
- `workers`: Number of concurrent workers for processing requests.
|
|
||||||
- folder_naming: Naming convention for folders:
|
|
||||||
- `original_no_ext`: Original file name without extension
|
|
||||||
- `original`: Original file name with extension
|
|
||||||
- `filename`: Torrent filename
|
|
||||||
- `filename_no_ext`: Torrent filename without extension
|
|
||||||
- `id`: Torrent ID
|
|
||||||
- `auto_expire_links_after`: Time after which download links will expire (e.g., `3d`, `1w`).
|
|
||||||
- `rc_url`, `rc_user`, `rc_pass`: Rclone RC configuration for VFS refreshes
|
|
||||||
- `directories`: A map of virtual folders to serve via the WebDAV server. The key is the virtual folder name, and the values are a map of filters and their values.
|
|
||||||
- `serve_from_rclone`: Whether to serve files directly from Rclone (disabled by default).
|
|
||||||
|
|
||||||
### Using with Media Players
|
|
||||||
The WebDAV server works well with media players like:
|
|
||||||
|
|
||||||
- Infuse
|
|
||||||
- VidHub
|
|
||||||
- Plex, Emby, Jellyfin (with rclone, Check [this guide](../guides/rclone.md))
|
|
||||||
- Kodi
|
|
||||||
|
|
||||||
### Mounting with Rclone
|
|
||||||
You can mount the WebDAV server locally using Rclone. Example configuration:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[decypharr]
|
|
||||||
type = webdav
|
|
||||||
url = http://localhost:8282/webdav/realdebrid
|
|
||||||
vendor = other
|
|
||||||
```
|
|
||||||
For a complete Rclone configuration example, see our [sample rclone.conf](../extras/rclone.conf).
|
|
||||||
26
docs/docs/guides/downloading.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
### Downloading with Decypharr
|
||||||
|
|
||||||
|
While Decypharr provides a Qbittorent API for integration with media management applications, it also allows you to manually download torrents directly through its interface. This guide will walk you through the process of downloading torrents using Decypharr.
|
||||||
|
|
||||||
|
- You can either use the Decypharr UI to add torrents manually or use its [API](../api.md) to automate the process.
|
||||||
|
|
||||||
|
## Manual Downloading
|
||||||
|
|
||||||
|

|
||||||
|
To manually download a torrent using Decypharr, follow these steps:
|
||||||
|
1. Navigate to the "Download" section in the Decypharr UI.
|
||||||
|
2. You can either upload torrent file(s) or paste magnet links directly into the input fields
|
||||||
|
3. Select the action(defaults to Symlink)
|
||||||
|
|
||||||
|
4. Add any additional options, such as:
|
||||||
|
- *Download Folder*: Specify the folder where the downloaded files will be saved.
|
||||||
|
- *Arr Category*: Choose the category for the download, which helps in organizing files in your media management applications.
|
||||||
|
- **Post Download Action**: Select what to do after the download completes:
|
||||||
|
- **Create Symlink**: Create a symlink to the downloaded files in the mount folder(default)
|
||||||
|
- **Download**: Download the file directly.
|
||||||
|
- **No Action**: Do nothing after the download completes.
|
||||||
|
- **Debrid Provider**: Choose which Debrid service to use for the download(if you have multiple)
|
||||||
|
- **Download Uncached**: If enabled, Decypharr will attempt to download uncached files from the Debrid service.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
- If you use an arr category, your download will go into **{download_folder}/{arr}**
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Guides for setting up Decypharr
|
# Guides for setting up Decypharr
|
||||||
|
|
||||||
|
- [Manual Downloading with Decypharr](downloading.md)
|
||||||
- [Setting up with Rclone](rclone.md)
|
- [Internal Mounting](internal-mounting.md)
|
||||||
81
docs/docs/guides/internal-mounting.md
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# Internal Mounting
|
||||||
|
|
||||||
|
This guide explains how to use Decypharr's internal mounting feature to eliminate the need for external rclone setup.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Instead of requiring users to install and configure rclone separately, Decypharr can now mount your WebDAV endpoints internally using rclone as a library dependency. This provides a seamless experience where files appear as regular filesystem paths without any external dependencies.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- **Docker users**: FUSE support may need to be enabled in the container depending on your Docker setup
|
||||||
|
- **macOS users**: May need [macFUSE](https://osxfuse.github.io/) installed for mounting functionality
|
||||||
|
- **Linux users**: FUSE should be available by default on most distributions
|
||||||
|
- **Windows users**: Mounting functionality may be limited
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
|
||||||
|
You can set the options in the Web UI or directly in the configuration file:
|
||||||
|
|
||||||
|
#### Note:
|
||||||
|
Check the Rclone documentation for more details on the available options: [Rclone Mount Options](https://rclone.org/commands/rclone_mount/).
|
||||||
|
|
||||||
|
## How It Works
|
||||||
|
|
||||||
|
1. **WebDAV Server**: Decypharr starts its internal WebDAV server for enabled providers
|
||||||
|
2. **Internal Mount**: Rclone is used internally to mount the WebDAV endpoint to a local filesystem path
|
||||||
|
3. **File Access**: Your applications can access files using regular filesystem paths like `/mnt/decypharr/realdebrid/__all__/MyMovie/`
|
||||||
|
|
||||||
|
## Benefits
|
||||||
|
|
||||||
|
- **Automatic Setup**: Mounting is handled automatically by Decypharr using internal rclone rcd
|
||||||
|
- **Filesystem Access**: Files appear as regular directories and files
|
||||||
|
- **Seamless Integration**: Works with existing media servers without changes
|
||||||
|
|
||||||
|
## Docker Compose
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: '3.8'
|
||||||
|
services:
|
||||||
|
decypharr:
|
||||||
|
image: sirrobot01/decypharr:latest
|
||||||
|
container_name: decypharr
|
||||||
|
ports:
|
||||||
|
- "8282:8282"
|
||||||
|
volumes:
|
||||||
|
- ./config:/config
|
||||||
|
- /mnt:/mnt:rshared # Important: use 'rshared' for mount propagation
|
||||||
|
devices:
|
||||||
|
- /dev/fuse:/dev/fuse:rwm
|
||||||
|
cap_add:
|
||||||
|
- SYS_ADMIN
|
||||||
|
environment:
|
||||||
|
- UMASK=002
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important Docker Notes:**
|
||||||
|
- Mount volumes with `:rshared` to allow mount propagation
|
||||||
|
- Include `/dev/fuse` device for FUSE mounting
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Mount Failures
|
||||||
|
|
||||||
|
If mounting fails, check:
|
||||||
|
|
||||||
|
1. **FUSE Installation**:
|
||||||
|
- **macOS**: Install macFUSE from https://osxfuse.github.io/
|
||||||
|
- **Linux**: Install fuse package (`apt install fuse` or `yum install fuse`)
|
||||||
|
- **Docker**: Fuse is already included in the container, but ensure the host supports it
|
||||||
|
2. **Permissions**: Ensure the application has sufficient privileges
|
||||||
|
|
||||||
|
### No Mount Methods Available
|
||||||
|
|
||||||
|
If you see "no mount method available" errors:
|
||||||
|
|
||||||
|
1. **Check Platform Support**: Some platforms have limited FUSE support
|
||||||
|
2. **Install Dependencies**: Ensure FUSE libraries are installed
|
||||||
|
3. **Use WebDAV Directly**: Access files via `http://localhost:8282/webdav/provider/`
|
||||||
|
4. **External Mounting**: Use OS-native WebDAV mounting as fallback
|
||||||
@@ -1,142 +0,0 @@
|
|||||||
# Setting up Decypharr with Rclone
|
|
||||||
|
|
||||||
This guide will help you set up Decypharr with Rclone, allowing you to use your Debrid providers as a remote storage solution.
|
|
||||||
|
|
||||||
#### Rclone
|
|
||||||
Make sure you have Rclone installed and configured on your system. You can follow the [Rclone installation guide](https://rclone.org/install/) for instructions.
|
|
||||||
|
|
||||||
It's recommended to use docker version of Rclone, as it provides a consistent environment across different platforms.
|
|
||||||
|
|
||||||
|
|
||||||
### Steps
|
|
||||||
|
|
||||||
We'll be using docker compose to set up Rclone and Decypharr together.
|
|
||||||
|
|
||||||
#### Note
|
|
||||||
This guide assumes you have a basic understanding of Docker and Docker Compose. If you're new to Docker, consider checking out the [Docker documentation](https://docs.docker.com/get-started/) for more information.
|
|
||||||
|
|
||||||
Also, ensure you have Docker and Docker Compose installed on your system. You can find installation instructions in the [Docker documentation](https://docs.docker.com/get-docker/) and [Docker Compose documentation](https://docs.docker.com/compose/install/).
|
|
||||||
|
|
||||||
|
|
||||||
Create a directory for your Decypharr and Rclone setup:
|
|
||||||
```bash
|
|
||||||
mkdir -p /opt/decypharr
|
|
||||||
mkdir -p /opt/rclone
|
|
||||||
mkdir -p /mnt/remote/realdebrid
|
|
||||||
|
|
||||||
# Set permissions
|
|
||||||
chown -R $USER:$USER /opt/decypharr
|
|
||||||
chown -R $USER:$USER /opt/rclone
|
|
||||||
chown -R $USER:$USER /mnt/remote/realdebrid
|
|
||||||
```
|
|
||||||
|
|
||||||
Create a `rclone.conf` file in `/opt/rclone/` with your Rclone configuration.
|
|
||||||
|
|
||||||
```conf
|
|
||||||
[decypharr]
|
|
||||||
type = webdav
|
|
||||||
url = https://your-ip-or-domain:8282/webdav/realdebrid
|
|
||||||
vendor = other
|
|
||||||
pacer_min_sleep = 0
|
|
||||||
```
|
|
||||||
|
|
||||||
Create a `config.json` file in `/opt/decypharr/` with your Decypharr configuration.
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"debrids": [
|
|
||||||
{
|
|
||||||
"name": "realdebrid",
|
|
||||||
"api_key": "realdebrid_key",
|
|
||||||
"folder": "/mnt/remote/realdebrid/__all__/",
|
|
||||||
"rate_limit": "250/minute",
|
|
||||||
"use_webdav": true,
|
|
||||||
"rc_url": "http://your-ip-address:5572" // Rclone RC URL
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"qbittorrent": {
|
|
||||||
"download_folder": "data/media/symlinks/",
|
|
||||||
"refresh_interval": 10
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Create a `docker-compose.yml` file with the following content:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
services:
|
|
||||||
decypharr:
|
|
||||||
image: cy01/blackhole:latest
|
|
||||||
container_name: decypharr
|
|
||||||
user: "1000:1000"
|
|
||||||
volumes:
|
|
||||||
- /mnt/:/mnt
|
|
||||||
- /opt/decypharr/:/app
|
|
||||||
environment:
|
|
||||||
- PUID=1000
|
|
||||||
- PGID=1000
|
|
||||||
- UMASK=002
|
|
||||||
ports:
|
|
||||||
- "8282:8282/tcp"
|
|
||||||
restart: unless-stopped
|
|
||||||
|
|
||||||
rclone:
|
|
||||||
image: rclone/rclone:latest
|
|
||||||
container_name: rclone
|
|
||||||
restart: unless-stopped
|
|
||||||
environment:
|
|
||||||
TZ: UTC
|
|
||||||
PUID: 1000
|
|
||||||
PGID: 1000
|
|
||||||
ports:
|
|
||||||
- 5572:5572
|
|
||||||
volumes:
|
|
||||||
- /mnt/remote/realdebrid:/data:rshared
|
|
||||||
- /opt/rclone/rclone.conf:/config/rclone/rclone.conf
|
|
||||||
- /mnt:/mnt
|
|
||||||
cap_add:
|
|
||||||
- SYS_ADMIN
|
|
||||||
security_opt:
|
|
||||||
- apparmor:unconfined
|
|
||||||
devices:
|
|
||||||
- /dev/fuse:/dev/fuse:rwm
|
|
||||||
depends_on:
|
|
||||||
decypharr:
|
|
||||||
condition: service_healthy
|
|
||||||
restart: true
|
|
||||||
command: "mount decypharr: /data --allow-non-empty --allow-other --uid=1000 --gid=1000 --umask=002 --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth "
|
|
||||||
```
|
|
||||||
|
|
||||||
Start the containers:
|
|
||||||
```bash
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
Access the Decypharr web interface at `http://your-ip-address:8282` and configure your settings as needed.
|
|
||||||
|
|
||||||
- Access your webdav server at `http://your-ip-address:8282/webdav` to see your files.
|
|
||||||
- You should be able to see your files in the `/mnt/remote/realdebrid/__all__/` directory.
|
|
||||||
- You can now use your Debrid provider as a remote storage solution with Rclone and Decypharr.
|
|
||||||
- You can also use the Rclone mount command to mount your Debrid provider locally. For example:
|
|
||||||
|
|
||||||
|
|
||||||
### Notes
|
|
||||||
|
|
||||||
- Make sure to replace `your-ip-address` with the actual IP address of your server.
|
|
||||||
- You can use multiple Debrid providers by adding them to the `debrids` array in the `config.json` file.
|
|
||||||
|
|
||||||
For each provider, you'll need a different rclone. OR you can change your `rclone.conf`
|
|
||||||
|
|
||||||
|
|
||||||
```apache
|
|
||||||
[decypharr]
|
|
||||||
type = webdav
|
|
||||||
url = https://your-ip-or-domain:8282/webdav/
|
|
||||||
vendor = other
|
|
||||||
pacer_min_sleep = 0
|
|
||||||
```
|
|
||||||
|
|
||||||
You'll still be able to access the directories via `/mnt/remote/realdebrid, /mnt/remote/alldebrid` etc
|
|
||||||
|
|
||||||
|
|
||||||
BIN
docs/docs/images/download.png
Normal file
|
After Width: | Height: | Size: 293 KiB |
BIN
docs/docs/images/main-light.png
Normal file
|
After Width: | Height: | Size: 431 KiB |
|
Before Width: | Height: | Size: 188 KiB After Width: | Height: | Size: 417 KiB |
BIN
docs/docs/images/repair.png
Normal file
|
After Width: | Height: | Size: 286 KiB |
|
Before Width: | Height: | Size: 264 KiB After Width: | Height: | Size: 264 KiB |
BIN
docs/docs/images/settings/debrid.png
Normal file
|
After Width: | Height: | Size: 264 KiB |
BIN
docs/docs/images/settings/qbittorent.png
Normal file
|
After Width: | Height: | Size: 169 KiB |
BIN
docs/docs/images/settings/rclone.png
Normal file
|
After Width: | Height: | Size: 364 KiB |
BIN
docs/docs/images/settings/repair.png
Normal file
|
After Width: | Height: | Size: 216 KiB |
BIN
docs/docs/images/webdav.png
Normal file
|
After Width: | Height: | Size: 62 KiB |
@@ -1,20 +1,19 @@
|
|||||||
# Decypharr
|
# Decypharr
|
||||||
|
{: .light-mode-image}
|
||||||

|
{: .dark-mode-image}
|
||||||
|
|
||||||
**Decypharr** is an implementation of QbitTorrent with **Multiple Debrid service support**, written in Go.
|
**Decypharr** is an implementation of QbitTorrent with **Multiple Debrid service support**, written in Go.
|
||||||
|
|
||||||
## What is Decypharr?
|
## What is Decypharr?
|
||||||
|
|
||||||
TLDR; Decypharr is a self-hosted, open-source torrent client that integrates with multiple Debrid services. It provides a user-friendly interface for managing torrents and supports popular media management applications like Sonarr and Radarr.
|
**TLDR**; Decypharr is a self-hosted, open-source download client that integrates with multiple Debrid services. It provides a user-friendly interface for managing files and supports popular media management applications like Sonarr and Radarr.
|
||||||
|
|
||||||
|
|
||||||
## Key Features
|
## Key Features
|
||||||
|
|
||||||
- Mock Qbittorent API that supports Sonarr, Radarr, Lidarr, and other Arr applications
|
- Mock Qbittorent API that supports Sonarr, Radarr, Lidarr, and other Arr applications
|
||||||
- Full-fledged UI for managing torrents
|
|
||||||
- Multiple Debrid providers support
|
- Multiple Debrid providers support
|
||||||
- WebDAV server support for each Debrid provider
|
- WebDAV server support for each Debrid provider with an optional mounting feature(using [rclone](https://rclone.org))
|
||||||
- Repair Worker for missing files, symlinks etc
|
- Repair Worker for missing files, symlinks etc
|
||||||
|
|
||||||
## Supported Debrid Providers
|
## Supported Debrid Providers
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ You can use either Docker Hub or GitHub Container Registry to pull the image:
|
|||||||
- `latest`: The latest stable release
|
- `latest`: The latest stable release
|
||||||
- `beta`: The latest beta release
|
- `beta`: The latest beta release
|
||||||
- `vX.Y.Z`: A specific version (e.g., `v0.1.0`)
|
- `vX.Y.Z`: A specific version (e.g., `v0.1.0`)
|
||||||
- `nightly`: The latest nightly build (usually unstable)
|
|
||||||
- `experimental`: The latest experimental build (highly unstable)
|
- `experimental`: The latest experimental build (highly unstable)
|
||||||
|
|
||||||
### Docker CLI Setup
|
### Docker CLI Setup
|
||||||
@@ -31,12 +30,13 @@ Run the Docker container:
|
|||||||
```bash
|
```bash
|
||||||
docker run -d \
|
docker run -d \
|
||||||
--name decypharr \
|
--name decypharr \
|
||||||
|
--restart unless-stopped \
|
||||||
-p 8282:8282 \
|
-p 8282:8282 \
|
||||||
-v /mnt/:/mnt \
|
-v /mnt/:/mnt:rshared \
|
||||||
-v ./config/:/app \
|
-v ./config/:/app \
|
||||||
-e PUID=1000 \
|
--device /dev/fuse:/dev/fuse:rwm \
|
||||||
-e PGID=1000 \
|
--cap-add SYS_ADMIN \
|
||||||
-e UMASK=002 \
|
--security-opt apparmor:unconfined \
|
||||||
cy01/blackhole:latest
|
cy01/blackhole:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -45,23 +45,22 @@ docker run -d \
|
|||||||
Create a `docker-compose.yml` file with the following content:
|
Create a `docker-compose.yml` file with the following content:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
version: '3.7'
|
|
||||||
services:
|
services:
|
||||||
decypharr:
|
decypharr:
|
||||||
image: cy01/blackhole:latest
|
image: cy01/blackhole:latest
|
||||||
container_name: decypharr
|
container_name: decypharr
|
||||||
ports:
|
ports:
|
||||||
- "8282:8282"
|
- "8282:8282"
|
||||||
user: "1000:1000"
|
|
||||||
volumes:
|
volumes:
|
||||||
- /mnt/:/mnt # Mount your media directory
|
- /mnt/:/mnt:rshared
|
||||||
- ./config/:/app # config.json must be in this directory
|
- ./config/:/app
|
||||||
environment:
|
|
||||||
- PUID=1000
|
|
||||||
- PGID=1000
|
|
||||||
- UMASK=002
|
|
||||||
- QBIT_PORT=8282 # qBittorrent Port (optional)
|
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
devices:
|
||||||
|
- /dev/fuse:/dev/fuse:rwm
|
||||||
|
cap_add:
|
||||||
|
- SYS_ADMIN
|
||||||
|
security_opt:
|
||||||
|
- apparmor:unconfined
|
||||||
```
|
```
|
||||||
|
|
||||||
Run the Docker Compose setup:
|
Run the Docker Compose setup:
|
||||||
@@ -73,44 +72,36 @@ docker-compose up -d
|
|||||||
## Binary Installation
|
## Binary Installation
|
||||||
If you prefer not to use Docker, you can download and run the binary directly.
|
If you prefer not to use Docker, you can download and run the binary directly.
|
||||||
|
|
||||||
Download the binary from the releases page
|
Download your OS-specific release from the [release page](https://github.com/sirrobot01/decypharr/releases).
|
||||||
Create a configuration file (see Configuration)
|
Create a configuration file (see Configuration)
|
||||||
Run the binary:
|
Run the binary:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
chmod +x decypharr
|
chmod +x decypharr
|
||||||
./decypharr --config /path/to/config/folder
|
./decypharr --config /path/to/config/folder
|
||||||
```
|
```
|
||||||
|
|
||||||
The config directory should contain your config.json file.
|
### Notes for Docker Users
|
||||||
|
|
||||||
## config.json
|
- Ensure that the `/mnt/` directory is mounted correctly to access your media files.
|
||||||
|
- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions.
|
||||||
|
- The `UMASK` environment variable can be set to control file permissions created by Decypharr.
|
||||||
|
|
||||||
The `config.json` file is where you configure Decypharr. You can find a sample configuration file in the `configs` directory of the repository.
|
##### Health Checks
|
||||||
|
- Health checks are disabled by default. You can enable them by adding a `healthcheck` section in your `docker-compose.yml` file.
|
||||||
|
- Health checks the availability of several parts of the application;
|
||||||
|
- The main web interface
|
||||||
|
- The qBittorrent API
|
||||||
|
- The WebDAV server (if enabled). You should disable health checks for the initial indexes as they can take a long time to complete.
|
||||||
|
|
||||||
You can also configure Decypharr through the web interface, but it's recommended to start with the config file for initial setup.
|
```yaml
|
||||||
|
services:
|
||||||
```json
|
decypharr:
|
||||||
{
|
...
|
||||||
"debrids": [
|
...
|
||||||
{
|
healthcheck:
|
||||||
"name": "realdebrid",
|
test: ["CMD", "/usr/bin/healthcheck", "--config", "/app/"]
|
||||||
"api_key": "your_api_key_here",
|
interval: 10s
|
||||||
"folder": "/mnt/remote/realdebrid/__all__/",
|
timeout: 10s
|
||||||
"use_webdav": true
|
retries: 3
|
||||||
}
|
|
||||||
],
|
|
||||||
"qbittorrent": {
|
|
||||||
"download_folder": "/mnt/symlinks/",
|
|
||||||
"categories": ["sonarr", "radarr"]
|
|
||||||
},
|
|
||||||
"use_auth": false,
|
|
||||||
"log_level": "info",
|
|
||||||
"port": "8282"
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Few Notes
|
|
||||||
|
|
||||||
- Make sure decypharr has access to the directories specified in the configuration file.
|
|
||||||
- Ensure decypharr have write permissions to the qbittorrent download folder.
|
|
||||||
- Make sure decypharr can write to the `./config/` directory.
|
|
||||||
24
docs/docs/styles/styles.css
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
/* Light mode image - visible by default */
|
||||||
|
.light-mode-image {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Dark mode image - hidden by default */
|
||||||
|
.dark-mode-image {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* When dark theme (slate) is active */
|
||||||
|
[data-md-color-scheme="slate"] .light-mode-image {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
[data-md-color-scheme="slate"] .dark-mode-image {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Optional: smooth transition */
|
||||||
|
.light-mode-image,
|
||||||
|
.dark-mode-image {
|
||||||
|
transition: opacity 0.2s ease-in-out;
|
||||||
|
}
|
||||||
@@ -2,15 +2,35 @@
|
|||||||
|
|
||||||
This guide will help you get started with Decypharr after installation.
|
This guide will help you get started with Decypharr after installation.
|
||||||
|
|
||||||
## Basic Setup
|
After installing Decypharr, you can access the web interface at `http://localhost:8282` or your configured host/port.
|
||||||
|
|
||||||
1. Create your `config.json` file (see [Configuration](configuration/index.md) for details)
|
### Initial Configuration
|
||||||
2. Start the Decypharr service using Docker or binary
|
If it's the first time you're accessing the UI, you will be prompted to set up your credentials. You can skip this step if you don't want to enable authentication. If you choose to set up credentials, enter a username and password confirm password, then click **Save**. You will be redirected to the settings page.
|
||||||
3. Access the UI at `http://localhost:8282` (or your configured host/port)
|
|
||||||
4. Connect your Arr applications (Sonarr, Radarr, etc.)
|
|
||||||
|
|
||||||
## Connecting to Sonarr/Radarr
|
### Debrid Configuration
|
||||||
|

|
||||||
|
- Click on **Debrid** in the tab
|
||||||
|
- Add your desired Debrid services (Real Debrid, Torbox, Debrid Link, All Debrid) by entering the required API keys or tokens.
|
||||||
|
- Set the **Mount/Rclone Folder**. This is where decypharr will look for added torrents to symlink them to your media library.
|
||||||
|
- If you're using internal webdav, do not forget the `/__all__` suffix
|
||||||
|
- Enable WebDAV
|
||||||
|
- You can leave the remaining settings as default for now.
|
||||||
|
|
||||||
|
### Qbittorent Configuration
|
||||||
|

|
||||||
|
|
||||||
|
- Click on **Qbittorrent** in the tab
|
||||||
|
- Set the **Download Folder** to where you want Decypharr to save downloaded files. These files will be symlinked to the mount folder you configured earlier.
|
||||||
|
You can leave the remaining settings as default for now.
|
||||||
|
|
||||||
|
### Arrs Configuration
|
||||||
|
|
||||||
|
You can skip Arr configuration for now. Decypharr will auto-add them when you connect to Sonarr or Radarr later.
|
||||||
|
|
||||||
|
|
||||||
|
#### Connecting to Sonarr/Radarr
|
||||||
|
|
||||||
|

|
||||||
To connect Decypharr to your Sonarr or Radarr instance:
|
To connect Decypharr to your Sonarr or Radarr instance:
|
||||||
|
|
||||||
1. In Sonarr/Radarr, go to **Settings → Download Client → Add Client → qBittorrent**
|
1. In Sonarr/Radarr, go to **Settings → Download Client → Add Client → qBittorrent**
|
||||||
@@ -18,22 +38,38 @@ To connect Decypharr to your Sonarr or Radarr instance:
|
|||||||
- **Host**: `localhost` (or the IP of your Decypharr server)
|
- **Host**: `localhost` (or the IP of your Decypharr server)
|
||||||
- **Port**: `8282` (or your configured qBittorrent port)
|
- **Port**: `8282` (or your configured qBittorrent port)
|
||||||
- **Username**: `http://sonarr:8989` (your Arr host with http/https)
|
- **Username**: `http://sonarr:8989` (your Arr host with http/https)
|
||||||
- **Password**: `sonarr_token` (your Arr API token)
|
- **Password**: `sonarr_token` (your Arr API token, you can get this from Sonarr/Radarr settings)
|
||||||
- **Category**: e.g., `sonarr`, `radarr` (match what you configured in Decypharr)
|
- **Category**: e.g., `sonarr`, `radarr` (match what you configured in Decypharr)
|
||||||
- **Use SSL**: `No`
|
- **Use SSL**: `No`
|
||||||
- **Sequential Download**: `No` or `Yes` (if you want to download torrents locally instead of symlink)
|
- **Sequential Download**: `No` or `Yes` (if you want to download torrents locally instead of symlink)
|
||||||
3. Click **Test** to verify the connection
|
3. Click **Test** to verify the connection
|
||||||
4. Click **Save** to add the download client
|
4. Click **Save** to add the download client
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## Using the UI
|
### Rclone Configuration
|
||||||
|
|
||||||
The Decypharr UI provides a familiar qBittorrent-like interface with additional features for Debrid services:
|

|
||||||
|
|
||||||
- Add new torrents
|
If you want Decypharr to automatically mount WebDAV folders using Rclone, you need to set up Rclone first:
|
||||||
- Monitor download status
|
|
||||||
- Access WebDAV functionality
|
|
||||||
- Edit your configuration
|
|
||||||
|
|
||||||
Access the UI at `http://localhost:8282` or your configured host/port.
|
If you're using Docker, the rclone binary is already included in the container. If you're running Decypharr directly, make sure Rclone is installed on your system.
|
||||||
|
|
||||||
|
Enable **Mount**
|
||||||
|
- **Global Mount Path**: Set the path where you want to mount the WebDAV folders (e.g., `/mnt/remote`). Decypharr will create subfolders for each Debrid service. For example, if you set `/mnt/remote`, it will create `/mnt/remote/realdebrid`, `/mnt/remote/torbox`, etc. This should be the grandparent of your mount folder set in the Debrid configuration.
|
||||||
|
- **User ID**: Set the user ID for Rclone mounts (default is gotten from the environment variable `PUID`).
|
||||||
|
- **Group ID**: Set the group ID for Rclone mounts (default is gotten from the environment variable `PGID`).
|
||||||
|
- **Buffer Size**: Set the buffer size for Rclone mounts.
|
||||||
|
|
||||||
|
You should set other options based on your use case. If you don't know what you're doing, leave it as defaults. Checkout the [Rclone documentation](https://rclone.org/commands/rclone_mount/) for more details.
|
||||||
|
|
||||||
|
### Repair Configuration
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Repair is an optional feature that allows you to fix missing files, symlinks, and other issues in your media library.
|
||||||
|
- Click on **Repair** in the tab
|
||||||
|
- Enable **Scheduled Repair** if you want Decypharr to automatically check for missing files at your specified interval.
|
||||||
|
- Set the **Repair Interval** to how often you want Decypharr to check for missing files (e.g 1h, 6h, 12h, 24h, you can also use cron syntax like `0 0 * * *` for daily checks).
|
||||||
|
- Enable **WebDav**(You shoukd enable this, if you enabled WebDav in Debrid configuration)
|
||||||
|
- **Auto Process**: Enable this if you want Decypharr to automatically process repair jobs when they are done. This could delete the original files, symlinks, be wary!!!
|
||||||
|
- **Worker Threads**: Set the number of worker threads for processing repair jobs. More threads can speed up the process but may consume more resources.
|
||||||
@@ -6,6 +6,9 @@ repo_name: sirrobot01/decypharr
|
|||||||
edit_uri: blob/main/docs
|
edit_uri: blob/main/docs
|
||||||
|
|
||||||
|
|
||||||
|
extra_css:
|
||||||
|
- styles/styles.css
|
||||||
|
|
||||||
theme:
|
theme:
|
||||||
name: material
|
name: material
|
||||||
logo: images/logo.png
|
logo: images/logo.png
|
||||||
@@ -59,22 +62,17 @@ nav:
|
|||||||
- Home: index.md
|
- Home: index.md
|
||||||
- Installation: installation.md
|
- Installation: installation.md
|
||||||
- Usage: usage.md
|
- Usage: usage.md
|
||||||
- Configuration:
|
- API Documentation: api.md
|
||||||
- Overview: configuration/index.md
|
|
||||||
- General: configuration/general.md
|
|
||||||
- Debrid Providers: configuration/debrid.md
|
|
||||||
- qBittorrent: configuration/qbittorrent.md
|
|
||||||
- Arr Integration: configuration/arrs.md
|
|
||||||
- Features:
|
- Features:
|
||||||
- Overview: features/index.md
|
- Overview: features/index.md
|
||||||
- Repair Worker: features/repair-worker.md
|
- Repair Worker: features/repair-worker.md
|
||||||
- WebDAV: features/webdav.md
|
|
||||||
- Guides:
|
- Guides:
|
||||||
- Overview: guides/index.md
|
- Overview: guides/index.md
|
||||||
- Setting Up with Rclone: guides/rclone.md
|
- Manual Downloading: guides/downloading.md
|
||||||
- Changelog: changelog.md
|
- Internal Mounting: guides/internal-mounting.md
|
||||||
|
|
||||||
|
|
||||||
plugins:
|
plugins:
|
||||||
- search
|
- search
|
||||||
- tags
|
- tags
|
||||||
|
- swagger-ui-tag
|
||||||
3
docs/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
mkdocs==1.6.1
|
||||||
|
mkdocs-material==9.6.16
|
||||||
|
mkdocs-swagger-ui-tag==0.6.10
|
||||||
19
go.mod
@@ -7,32 +7,33 @@ toolchain go1.24.3
|
|||||||
require (
|
require (
|
||||||
github.com/anacrolix/torrent v1.55.0
|
github.com/anacrolix/torrent v1.55.0
|
||||||
github.com/cavaliergopher/grab/v3 v3.0.1
|
github.com/cavaliergopher/grab/v3 v3.0.1
|
||||||
github.com/go-chi/chi/v5 v5.1.0
|
github.com/go-chi/chi/v5 v5.2.2
|
||||||
github.com/go-co-op/gocron/v2 v2.16.1
|
github.com/go-co-op/gocron/v2 v2.16.1
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/sessions v1.4.0
|
github.com/gorilla/sessions v1.4.0
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/rs/zerolog v1.33.0
|
github.com/rs/zerolog v1.33.0
|
||||||
github.com/stanNthe5/stringbuf v0.0.3
|
github.com/stanNthe5/stringbuf v0.0.3
|
||||||
golang.org/x/crypto v0.33.0
|
go.uber.org/ratelimit v0.3.1
|
||||||
golang.org/x/net v0.35.0
|
golang.org/x/crypto v0.39.0
|
||||||
golang.org/x/sync v0.12.0
|
golang.org/x/net v0.41.0
|
||||||
golang.org/x/time v0.8.0
|
golang.org/x/sync v0.15.0
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/anacrolix/missinggo v1.3.0 // indirect
|
github.com/anacrolix/missinggo v1.3.0 // indirect
|
||||||
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
|
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
|
||||||
|
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/google/go-cmp v0.6.0 // indirect
|
github.com/google/go-cmp v0.7.0 // indirect
|
||||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||||
github.com/huandu/xstrings v1.3.2 // indirect
|
github.com/huandu/xstrings v1.3.2 // indirect
|
||||||
github.com/jonboulle/clockwork v0.5.0 // indirect
|
github.com/jonboulle/clockwork v0.5.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||||
golang.org/x/sys v0.30.0 // indirect
|
golang.org/x/sys v0.33.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
39
go.sum
@@ -36,6 +36,8 @@ github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CM
|
|||||||
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
|
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
|
||||||
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
|
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
|
||||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||||
|
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||||
|
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
@@ -68,8 +70,8 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod
|
|||||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||||
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||||
github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
|
||||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||||
github.com/go-co-op/gocron/v2 v2.16.1 h1:ux/5zxVRveCaCuTtNI3DiOk581KC1KpJbpJFYUEVYwo=
|
github.com/go-co-op/gocron/v2 v2.16.1 h1:ux/5zxVRveCaCuTtNI3DiOk581KC1KpJbpJFYUEVYwo=
|
||||||
github.com/go-co-op/gocron/v2 v2.16.1/go.mod h1:opexeOFy5BplhsKdA7bzY9zeYih8I8/WNJ4arTIFPVc=
|
github.com/go-co-op/gocron/v2 v2.16.1/go.mod h1:opexeOFy5BplhsKdA7bzY9zeYih8I8/WNJ4arTIFPVc=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
@@ -100,8 +102,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
|
|||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
@@ -141,8 +143,9 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
|
||||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
|
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||||
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
@@ -186,8 +189,8 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4
|
|||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||||
@@ -216,12 +219,16 @@ github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPy
|
|||||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||||
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
|
go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0=
|
||||||
|
go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
|
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
@@ -237,8 +244,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
|
|||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||||
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@@ -246,8 +253,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@@ -262,12 +269,10 @@ golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
|
||||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
|
||||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ package config
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"cmp"
|
"cmp"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -12,6 +14,13 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type RepairStrategy string
|
||||||
|
|
||||||
|
const (
|
||||||
|
RepairStrategyPerFile RepairStrategy = "per_file"
|
||||||
|
RepairStrategyPerTorrent RepairStrategy = "per_torrent"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
instance *Config
|
instance *Config
|
||||||
once sync.Once
|
once sync.Once
|
||||||
@@ -19,15 +28,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Debrid struct {
|
type Debrid struct {
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
APIKey string `json:"api_key,omitempty"`
|
APIKey string `json:"api_key,omitempty"`
|
||||||
DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
|
DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
|
||||||
Folder string `json:"folder,omitempty"`
|
Folder string `json:"folder,omitempty"`
|
||||||
DownloadUncached bool `json:"download_uncached,omitempty"`
|
DownloadUncached bool `json:"download_uncached,omitempty"`
|
||||||
CheckCached bool `json:"check_cached,omitempty"`
|
CheckCached bool `json:"check_cached,omitempty"`
|
||||||
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
|
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
|
||||||
Proxy string `json:"proxy,omitempty"`
|
RepairRateLimit string `json:"repair_rate_limit,omitempty"`
|
||||||
AddSamples bool `json:"add_samples,omitempty"`
|
DownloadRateLimit string `json:"download_rate_limit,omitempty"`
|
||||||
|
Proxy string `json:"proxy,omitempty"`
|
||||||
|
UnpackRar bool `json:"unpack_rar,omitempty"`
|
||||||
|
AddSamples bool `json:"add_samples,omitempty"`
|
||||||
|
MinimumFreeSlot int `json:"minimum_free_slot,omitempty"` // Minimum active pots to use this debrid
|
||||||
|
Limit int `json:"limit,omitempty"` // Maximum number of total torrents
|
||||||
|
|
||||||
UseWebDav bool `json:"use_webdav,omitempty"`
|
UseWebDav bool `json:"use_webdav,omitempty"`
|
||||||
WebDav
|
WebDav
|
||||||
@@ -51,22 +65,58 @@ type Arr struct {
|
|||||||
Cleanup bool `json:"cleanup,omitempty"`
|
Cleanup bool `json:"cleanup,omitempty"`
|
||||||
SkipRepair bool `json:"skip_repair,omitempty"`
|
SkipRepair bool `json:"skip_repair,omitempty"`
|
||||||
DownloadUncached *bool `json:"download_uncached,omitempty"`
|
DownloadUncached *bool `json:"download_uncached,omitempty"`
|
||||||
|
SelectedDebrid string `json:"selected_debrid,omitempty"`
|
||||||
|
Source string `json:"source,omitempty"` // The source of the arr, e.g. "auto", "config", "". Auto means it was automatically detected from the arr
|
||||||
}
|
}
|
||||||
|
|
||||||
type Repair struct {
|
type Repair struct {
|
||||||
Enabled bool `json:"enabled,omitempty"`
|
Enabled bool `json:"enabled,omitempty"`
|
||||||
Interval string `json:"interval,omitempty"`
|
Interval string `json:"interval,omitempty"`
|
||||||
RunOnStart bool `json:"run_on_start,omitempty"`
|
ZurgURL string `json:"zurg_url,omitempty"`
|
||||||
ZurgURL string `json:"zurg_url,omitempty"`
|
AutoProcess bool `json:"auto_process,omitempty"`
|
||||||
AutoProcess bool `json:"auto_process,omitempty"`
|
UseWebDav bool `json:"use_webdav,omitempty"`
|
||||||
UseWebDav bool `json:"use_webdav,omitempty"`
|
Workers int `json:"workers,omitempty"`
|
||||||
Workers int `json:"workers,omitempty"`
|
ReInsert bool `json:"reinsert,omitempty"`
|
||||||
ReInsert bool `json:"reinsert,omitempty"`
|
Strategy RepairStrategy `json:"strategy,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Auth struct {
|
type Auth struct {
|
||||||
Username string `json:"username,omitempty"`
|
Username string `json:"username,omitempty"`
|
||||||
Password string `json:"password,omitempty"`
|
Password string `json:"password,omitempty"`
|
||||||
|
APIToken string `json:"api_token,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Rclone struct {
|
||||||
|
// Global mount folder where all providers will be mounted as subfolders
|
||||||
|
Enabled bool `json:"enabled,omitempty"`
|
||||||
|
MountPath string `json:"mount_path,omitempty"`
|
||||||
|
|
||||||
|
// Cache settings
|
||||||
|
CacheDir string `json:"cache_dir,omitempty"`
|
||||||
|
|
||||||
|
// VFS settings
|
||||||
|
VfsCacheMode string `json:"vfs_cache_mode,omitempty"` // off, minimal, writes, full
|
||||||
|
VfsCacheMaxAge string `json:"vfs_cache_max_age,omitempty"` // Maximum age of objects in the cache (default 1h)
|
||||||
|
VfsCacheMaxSize string `json:"vfs_cache_max_size,omitempty"` // Maximum size of the cache (default off)
|
||||||
|
VfsCachePollInterval string `json:"vfs_cache_poll_interval,omitempty"` // How often to poll for changes (default 1m)
|
||||||
|
VfsReadChunkSize string `json:"vfs_read_chunk_size,omitempty"` // Read chunk size (default 128M)
|
||||||
|
VfsReadChunkSizeLimit string `json:"vfs_read_chunk_size_limit,omitempty"` // Max chunk size (default off)
|
||||||
|
VfsReadAhead string `json:"vfs_read_ahead,omitempty"` // read ahead size
|
||||||
|
VfsPollInterval string `json:"vfs_poll_interval,omitempty"` // How often to rclone cleans the cache (default 1m)
|
||||||
|
BufferSize string `json:"buffer_size,omitempty"` // Buffer size for reading files (default 16M)
|
||||||
|
|
||||||
|
// File system settings
|
||||||
|
UID uint32 `json:"uid,omitempty"` // User ID for mounted files
|
||||||
|
GID uint32 `json:"gid,omitempty"` // Group ID for mounted files
|
||||||
|
Umask string `json:"umask,omitempty"`
|
||||||
|
|
||||||
|
// Timeout settings
|
||||||
|
AttrTimeout string `json:"attr_timeout,omitempty"` // Attribute cache timeout (default 1s)
|
||||||
|
DirCacheTime string `json:"dir_cache_time,omitempty"` // Directory cache time (default 5m)
|
||||||
|
|
||||||
|
// Performance settings
|
||||||
|
NoModTime bool `json:"no_modtime,omitempty"` // Don't read/write modification time
|
||||||
|
NoChecksum bool `json:"no_checksum,omitempty"` // Don't checksum files on upload
|
||||||
}
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
@@ -75,19 +125,21 @@ type Config struct {
|
|||||||
URLBase string `json:"url_base,omitempty"`
|
URLBase string `json:"url_base,omitempty"`
|
||||||
Port string `json:"port,omitempty"`
|
Port string `json:"port,omitempty"`
|
||||||
|
|
||||||
LogLevel string `json:"log_level,omitempty"`
|
LogLevel string `json:"log_level,omitempty"`
|
||||||
Debrids []Debrid `json:"debrids,omitempty"`
|
Debrids []Debrid `json:"debrids,omitempty"`
|
||||||
QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"`
|
QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"`
|
||||||
Arrs []Arr `json:"arrs,omitempty"`
|
Arrs []Arr `json:"arrs,omitempty"`
|
||||||
Repair Repair `json:"repair,omitempty"`
|
Repair Repair `json:"repair,omitempty"`
|
||||||
WebDav WebDav `json:"webdav,omitempty"`
|
WebDav WebDav `json:"webdav,omitempty"`
|
||||||
AllowedExt []string `json:"allowed_file_types,omitempty"`
|
Rclone Rclone `json:"rclone,omitempty"`
|
||||||
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
|
AllowedExt []string `json:"allowed_file_types,omitempty"`
|
||||||
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
|
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
|
||||||
Path string `json:"-"` // Path to save the config file
|
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
|
||||||
UseAuth bool `json:"use_auth,omitempty"`
|
Path string `json:"-"` // Path to save the config file
|
||||||
Auth *Auth `json:"-"`
|
UseAuth bool `json:"use_auth,omitempty"`
|
||||||
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
|
Auth *Auth `json:"-"`
|
||||||
|
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
|
||||||
|
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) JsonFile() string {
|
func (c *Config) JsonFile() string {
|
||||||
@@ -97,6 +149,10 @@ func (c *Config) AuthFile() string {
|
|||||||
return filepath.Join(c.Path, "auth.json")
|
return filepath.Join(c.Path, "auth.json")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Config) TorrentsFile() string {
|
||||||
|
return filepath.Join(c.Path, "torrents.json")
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Config) loadConfig() error {
|
func (c *Config) loadConfig() error {
|
||||||
// Load the config file
|
// Load the config file
|
||||||
if configPath == "" {
|
if configPath == "" {
|
||||||
@@ -179,6 +235,15 @@ func ValidateConfig(config *Config) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// generateAPIToken creates a new random API token
|
||||||
|
func generateAPIToken() (string, error) {
|
||||||
|
bytes := make([]byte, 32) // 256-bit token
|
||||||
|
if _, err := rand.Read(bytes); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return hex.EncodeToString(bytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
func SetConfigPath(path string) {
|
func SetConfigPath(path string) {
|
||||||
configPath = path
|
configPath = path
|
||||||
}
|
}
|
||||||
@@ -271,16 +336,22 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
|
|||||||
workers := runtime.NumCPU() * 50
|
workers := runtime.NumCPU() * 50
|
||||||
perDebrid := workers / len(c.Debrids)
|
perDebrid := workers / len(c.Debrids)
|
||||||
|
|
||||||
if len(d.DownloadAPIKeys) == 0 {
|
var downloadKeys []string
|
||||||
d.DownloadAPIKeys = append(d.DownloadAPIKeys, d.APIKey)
|
|
||||||
|
if len(d.DownloadAPIKeys) > 0 {
|
||||||
|
downloadKeys = d.DownloadAPIKeys
|
||||||
|
} else {
|
||||||
|
// If no download API keys are specified, use the main API key
|
||||||
|
downloadKeys = []string{d.APIKey}
|
||||||
}
|
}
|
||||||
|
d.DownloadAPIKeys = downloadKeys
|
||||||
|
|
||||||
if !d.UseWebDav {
|
if !d.UseWebDav {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.TorrentsRefreshInterval == "" {
|
if d.TorrentsRefreshInterval == "" {
|
||||||
d.TorrentsRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "15s") // 15 seconds
|
d.TorrentsRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "45s") // 45 seconds
|
||||||
}
|
}
|
||||||
if d.WebDav.DownloadLinksRefreshInterval == "" {
|
if d.WebDav.DownloadLinksRefreshInterval == "" {
|
||||||
d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes
|
d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes
|
||||||
@@ -336,8 +407,46 @@ func (c *Config) setDefaults() {
|
|||||||
c.URLBase += "/"
|
c.URLBase += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set repair defaults
|
||||||
|
if c.Repair.Strategy == "" {
|
||||||
|
c.Repair.Strategy = RepairStrategyPerTorrent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rclone defaults
|
||||||
|
if c.Rclone.Enabled {
|
||||||
|
c.Rclone.VfsCacheMode = cmp.Or(c.Rclone.VfsCacheMode, "off")
|
||||||
|
if c.Rclone.UID == 0 {
|
||||||
|
c.Rclone.UID = uint32(os.Getuid())
|
||||||
|
}
|
||||||
|
if c.Rclone.GID == 0 {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
// On Windows, we use the current user's SID as GID
|
||||||
|
c.Rclone.GID = uint32(os.Getuid()) // Windows does not have GID, using UID instead
|
||||||
|
} else {
|
||||||
|
c.Rclone.GID = uint32(os.Getgid())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.Rclone.VfsCacheMode != "off" {
|
||||||
|
c.Rclone.VfsCachePollInterval = cmp.Or(c.Rclone.VfsCachePollInterval, "1m") // Clean cache every minute
|
||||||
|
}
|
||||||
|
c.Rclone.DirCacheTime = cmp.Or(c.Rclone.DirCacheTime, "5m")
|
||||||
|
}
|
||||||
// Load the auth file
|
// Load the auth file
|
||||||
c.Auth = c.GetAuth()
|
c.Auth = c.GetAuth()
|
||||||
|
|
||||||
|
// Generate API token if auth is enabled and no token exists
|
||||||
|
if c.UseAuth {
|
||||||
|
if c.Auth == nil {
|
||||||
|
c.Auth = &Auth{}
|
||||||
|
}
|
||||||
|
if c.Auth.APIToken == "" {
|
||||||
|
if token, err := generateAPIToken(); err == nil {
|
||||||
|
c.Auth.APIToken = token
|
||||||
|
// Save the updated auth config
|
||||||
|
_ = c.SaveAuth(c.Auth)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) Save() error {
|
func (c *Config) Save() error {
|
||||||
@@ -379,3 +488,7 @@ func Reload() {
|
|||||||
instance = nil
|
instance = nil
|
||||||
once = sync.Once{}
|
once = sync.Once{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DefaultFreeSlot() int {
|
||||||
|
return 10
|
||||||
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ func (c *Config) IsAllowedFile(filename string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getDefaultExtensions() []string {
|
func getDefaultExtensions() []string {
|
||||||
videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,img,iso,vob,mkv,mk3d,ts,wtv,m2ts'", ",")
|
videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,vob,mkv,mk3d,ts,wtv,m2ts", ",")
|
||||||
musicExts := strings.Split("MP3,WAV,FLAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",")
|
musicExts := strings.Split("MP3,WAV,FLAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",")
|
||||||
|
|
||||||
// Combine both slices
|
// Combine both slices
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package request
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@@ -10,10 +9,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/decypharr/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
"go.uber.org/ratelimit"
|
||||||
"golang.org/x/net/proxy"
|
"golang.org/x/net/proxy"
|
||||||
"golang.org/x/time/rate"
|
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -53,7 +51,7 @@ type ClientOption func(*Client)
|
|||||||
// Client represents an HTTP client with additional capabilities
|
// Client represents an HTTP client with additional capabilities
|
||||||
type Client struct {
|
type Client struct {
|
||||||
client *http.Client
|
client *http.Client
|
||||||
rateLimiter *rate.Limiter
|
rateLimiter ratelimit.Limiter
|
||||||
headers map[string]string
|
headers map[string]string
|
||||||
headersMu sync.RWMutex
|
headersMu sync.RWMutex
|
||||||
maxRetries int
|
maxRetries int
|
||||||
@@ -85,7 +83,7 @@ func WithRedirectPolicy(policy func(req *http.Request, via []*http.Request) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithRateLimiter sets a rate limiter
|
// WithRateLimiter sets a rate limiter
|
||||||
func WithRateLimiter(rl *rate.Limiter) ClientOption {
|
func WithRateLimiter(rl ratelimit.Limiter) ClientOption {
|
||||||
return func(c *Client) {
|
return func(c *Client) {
|
||||||
c.rateLimiter = rl
|
c.rateLimiter = rl
|
||||||
}
|
}
|
||||||
@@ -137,9 +135,11 @@ func WithProxy(proxyURL string) ClientOption {
|
|||||||
// doRequest performs a single HTTP request with rate limiting
|
// doRequest performs a single HTTP request with rate limiting
|
||||||
func (c *Client) doRequest(req *http.Request) (*http.Response, error) {
|
func (c *Client) doRequest(req *http.Request) (*http.Response, error) {
|
||||||
if c.rateLimiter != nil {
|
if c.rateLimiter != nil {
|
||||||
err := c.rateLimiter.Wait(req.Context())
|
select {
|
||||||
if err != nil {
|
case <-req.Context().Done():
|
||||||
return nil, fmt.Errorf("rate limiter wait: %w", err)
|
return nil, req.Context().Err()
|
||||||
|
default:
|
||||||
|
c.rateLimiter.Take()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -340,7 +340,10 @@ func New(options ...ClientOption) *Client {
|
|||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseRateLimit(rateStr string) *rate.Limiter {
|
func ParseRateLimit(rateStr string) ratelimit.Limiter {
|
||||||
|
if rateStr == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
parts := strings.SplitN(rateStr, "/", 2)
|
parts := strings.SplitN(rateStr, "/", 2)
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return nil
|
return nil
|
||||||
@@ -352,23 +355,21 @@ func ParseRateLimit(rateStr string) *rate.Limiter {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set slack size to 10%
|
||||||
|
slackSize := count / 10
|
||||||
|
|
||||||
// normalize unit
|
// normalize unit
|
||||||
unit := strings.ToLower(strings.TrimSpace(parts[1]))
|
unit := strings.ToLower(strings.TrimSpace(parts[1]))
|
||||||
unit = strings.TrimSuffix(unit, "s")
|
unit = strings.TrimSuffix(unit, "s")
|
||||||
burstSize := int(math.Ceil(float64(count) * 0.1))
|
|
||||||
if burstSize < 1 {
|
|
||||||
burstSize = 1
|
|
||||||
}
|
|
||||||
if burstSize > count {
|
|
||||||
burstSize = count
|
|
||||||
}
|
|
||||||
switch unit {
|
switch unit {
|
||||||
case "minute", "min":
|
case "minute", "min":
|
||||||
return rate.NewLimiter(rate.Limit(float64(count)/60.0), burstSize)
|
return ratelimit.New(count, ratelimit.Per(time.Minute), ratelimit.WithSlack(slackSize))
|
||||||
case "second", "sec":
|
case "second", "sec":
|
||||||
return rate.NewLimiter(rate.Limit(float64(count)), burstSize)
|
return ratelimit.New(count, ratelimit.Per(time.Second), ratelimit.WithSlack(slackSize))
|
||||||
case "hour", "hr":
|
case "hour", "hr":
|
||||||
return rate.NewLimiter(rate.Limit(float64(count)/3600.0), burstSize)
|
return ratelimit.New(count, ratelimit.Per(time.Hour), ratelimit.WithSlack(slackSize))
|
||||||
|
case "day", "d":
|
||||||
|
return ratelimit.New(count, ratelimit.Per(24*time.Hour), ratelimit.WithSlack(slackSize))
|
||||||
default:
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -383,31 +384,6 @@ func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Gzip(body []byte) []byte {
|
|
||||||
if len(body) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the pool is nil
|
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, len(body)))
|
|
||||||
|
|
||||||
gz, err := gzip.NewWriterLevel(buf, gzip.BestSpeed)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := gz.Write(body); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := gz.Close(); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
result := make([]byte, buf.Len())
|
|
||||||
copy(result, buf.Bytes())
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func Default() *Client {
|
func Default() *Client {
|
||||||
once.Do(func() {
|
once.Do(func() {
|
||||||
instance = New()
|
instance = New()
|
||||||
@@ -435,7 +411,7 @@ func isRetryableError(err error) bool {
|
|||||||
var netErr net.Error
|
var netErr net.Error
|
||||||
if errors.As(err, &netErr) {
|
if errors.As(err, &netErr) {
|
||||||
// Retry on timeout errors and temporary errors
|
// Retry on timeout errors and temporary errors
|
||||||
return netErr.Timeout() || netErr.Temporary()
|
return netErr.Timeout()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Not a retryable error
|
// Not a retryable error
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
package request
|
package utils
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
type HTTPError struct {
|
type HTTPError struct {
|
||||||
StatusCode int
|
StatusCode int
|
||||||
@@ -33,3 +35,13 @@ var TorrentNotFoundError = &HTTPError{
|
|||||||
Message: "Torrent not found",
|
Message: "Torrent not found",
|
||||||
Code: "torrent_not_found",
|
Code: "torrent_not_found",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var TooManyActiveDownloadsError = &HTTPError{
|
||||||
|
StatusCode: 509,
|
||||||
|
Message: "Too many active downloads",
|
||||||
|
Code: "too_many_active_downloads",
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsTooManyActiveDownloadsError(err error) bool {
|
||||||
|
return errors.As(err, &TooManyActiveDownloadsError)
|
||||||
|
}
|
||||||
@@ -1,7 +1,10 @@
|
|||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -19,3 +22,65 @@ func PathUnescape(path string) string {
|
|||||||
|
|
||||||
return unescapedPath
|
return unescapedPath
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func PreCacheFile(filePaths []string) error {
|
||||||
|
if len(filePaths) == 0 {
|
||||||
|
return fmt.Errorf("no file paths provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, filePath := range filePaths {
|
||||||
|
err := func(f string) error {
|
||||||
|
|
||||||
|
file, err := os.Open(f)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
// File has probably been moved by arr, return silently
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("failed to open file: %s: %v", f, err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// Pre-cache the file header (first 256KB) using 16KB chunks.
|
||||||
|
if err := readSmallChunks(file, 0, 256*1024, 16*1024); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error {
|
||||||
|
_, err := file.Seek(startPos, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, chunkSize)
|
||||||
|
bytesRemaining := totalToRead
|
||||||
|
|
||||||
|
for bytesRemaining > 0 {
|
||||||
|
toRead := chunkSize
|
||||||
|
if bytesRemaining < chunkSize {
|
||||||
|
toRead = bytesRemaining
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := file.Read(buf[:toRead])
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesRemaining -= n
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -25,11 +25,11 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Magnet struct {
|
type Magnet struct {
|
||||||
Name string
|
Name string `json:"name"`
|
||||||
InfoHash string
|
InfoHash string `json:"infoHash"`
|
||||||
Size int64
|
Size int64 `json:"size"`
|
||||||
Link string
|
Link string `json:"link"`
|
||||||
File []byte
|
File []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Magnet) IsTorrent() bool {
|
func (m *Magnet) IsTorrent() bool {
|
||||||
@@ -83,7 +83,6 @@ func GetMagnetFromBytes(torrentData []byte) (*Magnet, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Println("InfoHash: ", infoHash)
|
|
||||||
magnet := &Magnet{
|
magnet := &Magnet{
|
||||||
InfoHash: infoHash,
|
InfoHash: infoHash,
|
||||||
Name: info.Name,
|
Name: info.Name,
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
videoMatch = "(?i)(\\.)(webm|m4v|3gp|nsv|ty|strm|rm|rmvb|m3u|ifo|mov|qt|divx|xvid|bivx|nrg|pva|wmv|asf|asx|ogm|ogv|m2v|avi|bin|dat|dvr-ms|mpg|mpeg|mp4|avc|vp3|svq3|nuv|viv|dv|fli|flv|wpl|img|iso|vob|mkv|mk3d|ts|wtv|m2ts)$"
|
videoMatch = "(?i)(\\.)(webm|m4v|3gp|nsv|ty|strm|rm|rmvb|m3u|ifo|mov|qt|divx|xvid|bivx|nrg|pva|wmv|asf|asx|ogm|ogv|m2v|avi|bin|dat|dvr-ms|mpg|mpeg|mp4|avc|vp3|svq3|nuv|viv|dv|fli|flv|wpl|vob|mkv|mk3d|ts|wtv|m2ts)$"
|
||||||
musicMatch = "(?i)(\\.)(mp2|mp3|m4a|m4b|m4p|ogg|oga|opus|wma|wav|wv|flac|ape|aif|aiff|aifc)$"
|
musicMatch = "(?i)(\\.)(mp2|mp3|m4a|m4b|m4p|ogg|oga|opus|wma|wav|wv|flac|ape|aif|aiff|aifc)$"
|
||||||
sampleMatch = `(?i)(^|[\s/\\])(sample|trailer|thumb|special|extras?)s?[-/]|(\((sample|trailer|thumb|special|extras?)s?\))|(-\s*(sample|trailer|thumb|special|extras?)s?)`
|
sampleMatch = `(?i)(^|[\s/\\])(sample|trailer|thumb|special|extras?)s?[-/]|(\((sample|trailer|thumb|special|extras?)s?\))|(-\s*(sample|trailer|thumb|special|extras?)s?)`
|
||||||
)
|
)
|
||||||
@@ -40,12 +40,10 @@ func RemoveInvalidChars(value string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func RemoveExtension(value string) string {
|
func RemoveExtension(value string) string {
|
||||||
loc := mediaRegex.FindStringIndex(value)
|
if loc := mediaRegex.FindStringIndex(value); loc != nil {
|
||||||
if loc != nil {
|
|
||||||
return value[:loc[0]]
|
return value[:loc[0]]
|
||||||
} else {
|
|
||||||
return value
|
|
||||||
}
|
}
|
||||||
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsMediaFile(path string) bool {
|
func IsMediaFile(path string) bool {
|
||||||
@@ -53,7 +51,8 @@ func IsMediaFile(path string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func IsSampleFile(path string) bool {
|
func IsSampleFile(path string) bool {
|
||||||
if strings.HasSuffix(strings.ToLower(path), "sample.mkv") {
|
filename := filepath.Base(path)
|
||||||
|
if strings.HasSuffix(strings.ToLower(filename), "sample.mkv") {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return RegexMatch(sampleRegex, path)
|
return RegexMatch(sampleRegex, path)
|
||||||
|
|||||||
1624
package-lock.json
generated
Normal file
19
package.json
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"name": "decypharr",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "Media management tool",
|
||||||
|
"scripts": {
|
||||||
|
"build-css": "tailwindcss -i ./pkg/web/assets/styles.css -o ./pkg/web/assets/build/css/styles.css --minify",
|
||||||
|
"minify-js": "node scripts/minify-js.js",
|
||||||
|
"download-assets": "node scripts/download-assets.js",
|
||||||
|
"build": "npm run build-css && npm run minify-js",
|
||||||
|
"build-all": "npm run download-assets && npm run build",
|
||||||
|
"dev": "npm run build && air"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"tailwindcss": "^3.4.0",
|
||||||
|
"daisyui": "^4.12.10",
|
||||||
|
"terser": "^5.24.0",
|
||||||
|
"clean-css": "^5.3.3"
|
||||||
|
}
|
||||||
|
}
|
||||||
103
pkg/arr/arr.go
@@ -3,6 +3,7 @@ package arr
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
@@ -11,7 +12,6 @@ import (
|
|||||||
"github.com/sirrobot01/decypharr/internal/request"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -20,6 +20,13 @@ import (
|
|||||||
// Type is a type of arr
|
// Type is a type of arr
|
||||||
type Type string
|
type Type string
|
||||||
|
|
||||||
|
var sharedClient = &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||||
|
},
|
||||||
|
Timeout: 60 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Sonarr Type = "sonarr"
|
Sonarr Type = "sonarr"
|
||||||
Radarr Type = "radarr"
|
Radarr Type = "radarr"
|
||||||
@@ -35,10 +42,11 @@ type Arr struct {
|
|||||||
Cleanup bool `json:"cleanup"`
|
Cleanup bool `json:"cleanup"`
|
||||||
SkipRepair bool `json:"skip_repair"`
|
SkipRepair bool `json:"skip_repair"`
|
||||||
DownloadUncached *bool `json:"download_uncached"`
|
DownloadUncached *bool `json:"download_uncached"`
|
||||||
client *request.Client
|
SelectedDebrid string `json:"selected_debrid,omitempty"` // The debrid service selected for this arr
|
||||||
|
Source string `json:"source,omitempty"` // The source of the arr, e.g. "auto", "manual". Auto means it was automatically detected from the arr
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool) *Arr {
|
func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool, selectedDebrid, source string) *Arr {
|
||||||
return &Arr{
|
return &Arr{
|
||||||
Name: name,
|
Name: name,
|
||||||
Host: host,
|
Host: host,
|
||||||
@@ -47,7 +55,8 @@ func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *b
|
|||||||
Cleanup: cleanup,
|
Cleanup: cleanup,
|
||||||
SkipRepair: skipRepair,
|
SkipRepair: skipRepair,
|
||||||
DownloadUncached: downloadUncached,
|
DownloadUncached: downloadUncached,
|
||||||
client: request.New(),
|
SelectedDebrid: selectedDebrid,
|
||||||
|
Source: source,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,14 +83,11 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo
|
|||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
req.Header.Set("X-Api-Key", a.Token)
|
req.Header.Set("X-Api-Key", a.Token)
|
||||||
if a.client == nil {
|
|
||||||
a.client = request.New()
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
|
|
||||||
for attempts := 0; attempts < 5; attempts++ {
|
for attempts := 0; attempts < 5; attempts++ {
|
||||||
resp, err = a.client.Do(req)
|
resp, err = sharedClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -103,14 +109,16 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo
|
|||||||
|
|
||||||
func (a *Arr) Validate() error {
|
func (a *Arr) Validate() error {
|
||||||
if a.Token == "" || a.Host == "" {
|
if a.Token == "" || a.Host == "" {
|
||||||
return nil
|
return fmt.Errorf("arr not configured: %s", a.Name)
|
||||||
}
|
}
|
||||||
resp, err := a.Request("GET", "/api/v3/health", nil)
|
resp, err := a.Request("GET", "/api/v3/health", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if resp.StatusCode != http.StatusOK {
|
defer resp.Body.Close()
|
||||||
return fmt.Errorf("arr test failed: %s", resp.Status)
|
// If response is not 200 or 404(this is the case for Lidarr, etc), return an error
|
||||||
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNotFound {
|
||||||
|
return fmt.Errorf("failed to validate arr %s: %s", a.Name, resp.Status)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -121,10 +129,10 @@ type Storage struct {
|
|||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *Storage) Cleanup() {
|
func (s *Storage) Cleanup() {
|
||||||
as.mu.Lock()
|
s.mu.Lock()
|
||||||
defer as.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
as.Arrs = make(map[string]*Arr)
|
s.Arrs = make(map[string]*Arr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func InferType(host, name string) Type {
|
func InferType(host, name string) Type {
|
||||||
@@ -145,8 +153,11 @@ func InferType(host, name string) Type {
|
|||||||
func NewStorage() *Storage {
|
func NewStorage() *Storage {
|
||||||
arrs := make(map[string]*Arr)
|
arrs := make(map[string]*Arr)
|
||||||
for _, a := range config.Get().Arrs {
|
for _, a := range config.Get().Arrs {
|
||||||
|
if a.Host == "" || a.Token == "" || a.Name == "" {
|
||||||
|
continue // Skip if host or token is not set
|
||||||
|
}
|
||||||
name := a.Name
|
name := a.Name
|
||||||
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached)
|
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid, a.Source)
|
||||||
}
|
}
|
||||||
return &Storage{
|
return &Storage{
|
||||||
Arrs: arrs,
|
Arrs: arrs,
|
||||||
@@ -154,46 +165,38 @@ func NewStorage() *Storage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *Storage) AddOrUpdate(arr *Arr) {
|
func (s *Storage) AddOrUpdate(arr *Arr) {
|
||||||
as.mu.Lock()
|
s.mu.Lock()
|
||||||
defer as.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
if arr.Name == "" {
|
if arr.Host == "" || arr.Token == "" || arr.Name == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
as.Arrs[arr.Name] = arr
|
s.Arrs[arr.Name] = arr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *Storage) Get(name string) *Arr {
|
func (s *Storage) Get(name string) *Arr {
|
||||||
as.mu.Lock()
|
s.mu.Lock()
|
||||||
defer as.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
return as.Arrs[name]
|
return s.Arrs[name]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *Storage) GetAll() []*Arr {
|
func (s *Storage) GetAll() []*Arr {
|
||||||
as.mu.Lock()
|
s.mu.Lock()
|
||||||
defer as.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
arrs := make([]*Arr, 0, len(as.Arrs))
|
arrs := make([]*Arr, 0, len(s.Arrs))
|
||||||
for _, arr := range as.Arrs {
|
for _, arr := range s.Arrs {
|
||||||
if arr.Host != "" && arr.Token != "" {
|
arrs = append(arrs, arr)
|
||||||
arrs = append(arrs, arr)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return arrs
|
return arrs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *Storage) Clear() {
|
func (s *Storage) StartSchedule(ctx context.Context) error {
|
||||||
as.mu.Lock()
|
|
||||||
defer as.mu.Unlock()
|
|
||||||
as.Arrs = make(map[string]*Arr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *Storage) StartSchedule(ctx context.Context) error {
|
|
||||||
|
|
||||||
ticker := time.NewTicker(10 * time.Second)
|
ticker := time.NewTicker(10 * time.Second)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
as.cleanupArrsQueue()
|
s.cleanupArrsQueue()
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
ticker.Stop()
|
ticker.Stop()
|
||||||
return nil
|
return nil
|
||||||
@@ -201,9 +204,9 @@ func (as *Storage) StartSchedule(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *Storage) cleanupArrsQueue() {
|
func (s *Storage) cleanupArrsQueue() {
|
||||||
arrs := make([]*Arr, 0)
|
arrs := make([]*Arr, 0)
|
||||||
for _, arr := range as.Arrs {
|
for _, arr := range s.Arrs {
|
||||||
if !arr.Cleanup {
|
if !arr.Cleanup {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -212,26 +215,18 @@ func (as *Storage) cleanupArrsQueue() {
|
|||||||
if len(arrs) > 0 {
|
if len(arrs) > 0 {
|
||||||
for _, arr := range arrs {
|
for _, arr := range arrs {
|
||||||
if err := arr.CleanupQueue(); err != nil {
|
if err := arr.CleanupQueue(); err != nil {
|
||||||
as.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name)
|
s.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Arr) Refresh() error {
|
func (a *Arr) Refresh() {
|
||||||
payload := struct {
|
payload := struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}{
|
}{
|
||||||
Name: "RefreshMonitoredDownloads",
|
Name: "RefreshMonitoredDownloads",
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
_, _ = a.Request(http.MethodPost, "api/v3/command", payload)
|
||||||
if err == nil && resp != nil {
|
|
||||||
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
|
|
||||||
if statusOk {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("failed to refresh: %v", err)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -105,6 +105,7 @@ func (a *Arr) GetMedia(mediaId string) ([]Content, error) {
|
|||||||
Id: d.Id,
|
Id: d.Id,
|
||||||
EpisodeId: eId,
|
EpisodeId: eId,
|
||||||
SeasonNumber: file.SeasonNumber,
|
SeasonNumber: file.SeasonNumber,
|
||||||
|
Size: file.Size,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if len(files) == 0 {
|
if len(files) == 0 {
|
||||||
@@ -148,6 +149,7 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) {
|
|||||||
FileId: movie.MovieFile.Id,
|
FileId: movie.MovieFile.Id,
|
||||||
Id: movie.Id,
|
Id: movie.Id,
|
||||||
Path: movie.MovieFile.Path,
|
Path: movie.MovieFile.Path,
|
||||||
|
Size: movie.MovieFile.Size,
|
||||||
})
|
})
|
||||||
ct.Files = files
|
ct.Files = files
|
||||||
contents = append(contents, ct)
|
contents = append(contents, ct)
|
||||||
|
|||||||
@@ -205,5 +205,4 @@ func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, e
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
return resp.Body, nil
|
return resp.Body, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ type Movie struct {
|
|||||||
RelativePath string `json:"relativePath"`
|
RelativePath string `json:"relativePath"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Id int `json:"id"`
|
Id int `json:"id"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
} `json:"movieFile"`
|
} `json:"movieFile"`
|
||||||
Id int `json:"id"`
|
Id int `json:"id"`
|
||||||
}
|
}
|
||||||
@@ -25,6 +26,8 @@ type ContentFile struct {
|
|||||||
IsSymlink bool `json:"isSymlink"`
|
IsSymlink bool `json:"isSymlink"`
|
||||||
IsBroken bool `json:"isBroken"`
|
IsBroken bool `json:"isBroken"`
|
||||||
SeasonNumber int `json:"seasonNumber"`
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
|
Processed bool `json:"processed"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (file *ContentFile) Delete() {
|
func (file *ContentFile) Delete() {
|
||||||
@@ -44,4 +47,5 @@ type seriesFile struct {
|
|||||||
SeasonNumber int `json:"seasonNumber"`
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Id int `json:"id"`
|
Id int `json:"id"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
}
|
}
|
||||||
|
|||||||
269
pkg/debrid/debrid.go
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/providers/debrid_link"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox"
|
||||||
|
debridStore "github.com/sirrobot01/decypharr/pkg/debrid/store"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/rclone"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Debrid struct {
|
||||||
|
cache *debridStore.Cache // Could be nil if not using WebDAV
|
||||||
|
client types.Client // HTTP client for making requests to the debrid service
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de *Debrid) Client() types.Client {
|
||||||
|
return de.client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de *Debrid) Cache() *debridStore.Cache {
|
||||||
|
return de.cache
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de *Debrid) Reset() {
|
||||||
|
if de.cache != nil {
|
||||||
|
de.cache.Reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Storage struct {
|
||||||
|
debrids map[string]*Debrid
|
||||||
|
mu sync.RWMutex
|
||||||
|
lastUsed string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStorage(rcManager *rclone.Manager) *Storage {
|
||||||
|
cfg := config.Get()
|
||||||
|
|
||||||
|
_logger := logger.Default()
|
||||||
|
|
||||||
|
debrids := make(map[string]*Debrid)
|
||||||
|
|
||||||
|
bindAddress := cfg.BindAddress
|
||||||
|
if bindAddress == "" {
|
||||||
|
bindAddress = "localhost"
|
||||||
|
}
|
||||||
|
webdavUrl := fmt.Sprintf("http://%s:%s%s/webdav", bindAddress, cfg.Port, cfg.URLBase)
|
||||||
|
|
||||||
|
for _, dc := range cfg.Debrids {
|
||||||
|
client, err := createDebridClient(dc)
|
||||||
|
if err != nil {
|
||||||
|
_logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
cache *debridStore.Cache
|
||||||
|
mounter *rclone.Mount
|
||||||
|
)
|
||||||
|
_log := client.Logger()
|
||||||
|
if dc.UseWebDav {
|
||||||
|
if cfg.Rclone.Enabled && rcManager != nil {
|
||||||
|
mounter = rclone.NewMount(dc.Name, webdavUrl, rcManager)
|
||||||
|
}
|
||||||
|
cache = debridStore.NewDebridCache(dc, client, mounter)
|
||||||
|
_log.Info().Msg("Debrid Service started with WebDAV")
|
||||||
|
} else {
|
||||||
|
_log.Info().Msg("Debrid Service started")
|
||||||
|
}
|
||||||
|
debrids[dc.Name] = &Debrid{
|
||||||
|
cache: cache,
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d := &Storage{
|
||||||
|
debrids: debrids,
|
||||||
|
lastUsed: "",
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Storage) Debrid(name string) *Debrid {
|
||||||
|
d.mu.RLock()
|
||||||
|
defer d.mu.RUnlock()
|
||||||
|
if debrid, exists := d.debrids[name]; exists {
|
||||||
|
return debrid
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Storage) Debrids() map[string]*Debrid {
|
||||||
|
d.mu.RLock()
|
||||||
|
defer d.mu.RUnlock()
|
||||||
|
debridsCopy := make(map[string]*Debrid)
|
||||||
|
for name, debrid := range d.debrids {
|
||||||
|
if debrid != nil {
|
||||||
|
debridsCopy[name] = debrid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return debridsCopy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Storage) Client(name string) types.Client {
|
||||||
|
d.mu.RLock()
|
||||||
|
defer d.mu.RUnlock()
|
||||||
|
if client, exists := d.debrids[name]; exists {
|
||||||
|
return client.client
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Storage) Reset() {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
|
||||||
|
// Reset all debrid clients and caches
|
||||||
|
for _, debrid := range d.debrids {
|
||||||
|
if debrid != nil {
|
||||||
|
debrid.Reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reinitialize the debrids map
|
||||||
|
d.debrids = make(map[string]*Debrid)
|
||||||
|
d.lastUsed = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Storage) Clients() map[string]types.Client {
|
||||||
|
d.mu.RLock()
|
||||||
|
defer d.mu.RUnlock()
|
||||||
|
clientsCopy := make(map[string]types.Client)
|
||||||
|
for name, debrid := range d.debrids {
|
||||||
|
if debrid != nil && debrid.client != nil {
|
||||||
|
clientsCopy[name] = debrid.client
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return clientsCopy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Storage) Caches() map[string]*debridStore.Cache {
|
||||||
|
d.mu.RLock()
|
||||||
|
defer d.mu.RUnlock()
|
||||||
|
cachesCopy := make(map[string]*debridStore.Cache)
|
||||||
|
for name, debrid := range d.debrids {
|
||||||
|
if debrid != nil && debrid.cache != nil {
|
||||||
|
cachesCopy[name] = debrid.cache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cachesCopy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types.Client {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
filteredClients := make(map[string]types.Client)
|
||||||
|
for name, client := range d.debrids {
|
||||||
|
if client != nil && filter(client.client) {
|
||||||
|
filteredClients[name] = client.client
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filteredClients
|
||||||
|
}
|
||||||
|
|
||||||
|
func createDebridClient(dc config.Debrid) (types.Client, error) {
|
||||||
|
switch dc.Name {
|
||||||
|
case "realdebrid":
|
||||||
|
return realdebrid.New(dc)
|
||||||
|
case "torbox":
|
||||||
|
return torbox.New(dc)
|
||||||
|
case "debridlink":
|
||||||
|
return debrid_link.New(dc)
|
||||||
|
case "alldebrid":
|
||||||
|
return alldebrid.New(dc)
|
||||||
|
default:
|
||||||
|
return realdebrid.New(dc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, action string, overrideDownloadUncached bool) (*types.Torrent, error) {
|
||||||
|
|
||||||
|
debridTorrent := &types.Torrent{
|
||||||
|
InfoHash: magnet.InfoHash,
|
||||||
|
Magnet: magnet,
|
||||||
|
Name: magnet.Name,
|
||||||
|
Arr: a,
|
||||||
|
Size: magnet.Size,
|
||||||
|
Files: make(map[string]types.File),
|
||||||
|
}
|
||||||
|
|
||||||
|
clients := store.FilterClients(func(c types.Client) bool {
|
||||||
|
if selectedDebrid != "" && c.Name() != selectedDebrid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(clients) == 0 {
|
||||||
|
return nil, fmt.Errorf("no debrid clients available")
|
||||||
|
}
|
||||||
|
|
||||||
|
errs := make([]error, 0, len(clients))
|
||||||
|
|
||||||
|
// Override first, arr second, debrid third
|
||||||
|
|
||||||
|
if overrideDownloadUncached {
|
||||||
|
debridTorrent.DownloadUncached = true
|
||||||
|
} else if a.DownloadUncached != nil {
|
||||||
|
// Arr cached is set
|
||||||
|
debridTorrent.DownloadUncached = *a.DownloadUncached
|
||||||
|
} else {
|
||||||
|
debridTorrent.DownloadUncached = false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, db := range clients {
|
||||||
|
_logger := db.Logger()
|
||||||
|
_logger.Info().
|
||||||
|
Str("Debrid", db.Name()).
|
||||||
|
Str("Arr", a.Name).
|
||||||
|
Str("Hash", debridTorrent.InfoHash).
|
||||||
|
Str("Name", debridTorrent.Name).
|
||||||
|
Str("Action", action).
|
||||||
|
Msg("Processing torrent")
|
||||||
|
|
||||||
|
if !overrideDownloadUncached && a.DownloadUncached == nil {
|
||||||
|
debridTorrent.DownloadUncached = db.GetDownloadUncached()
|
||||||
|
}
|
||||||
|
|
||||||
|
dbt, err := db.SubmitMagnet(debridTorrent)
|
||||||
|
if err != nil || dbt == nil || dbt.Id == "" {
|
||||||
|
errs = append(errs, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dbt.Arr = a
|
||||||
|
_logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.Name())
|
||||||
|
store.lastUsed = db.Name()
|
||||||
|
|
||||||
|
torrent, err := db.CheckStatus(dbt)
|
||||||
|
if err != nil && torrent != nil && torrent.Id != "" {
|
||||||
|
// Delete the torrent if it was not downloaded
|
||||||
|
go func(id string) {
|
||||||
|
_ = db.DeleteTorrent(id)
|
||||||
|
}(torrent.Id)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if torrent == nil {
|
||||||
|
errs = append(errs, fmt.Errorf("torrent %s returned nil after checking status", dbt.Name))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
if len(errs) == 0 {
|
||||||
|
return nil, fmt.Errorf("failed to process torrent: no clients available")
|
||||||
|
}
|
||||||
|
joinedErrors := errors.Join(errs...)
|
||||||
|
return nil, fmt.Errorf("failed to process torrent: %w", joinedErrors)
|
||||||
|
}
|
||||||
@@ -1,103 +0,0 @@
|
|||||||
package debrid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/config"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/alldebrid"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/debrid_link"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/realdebrid"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/torbox"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func createDebridClient(dc config.Debrid) types.Client {
|
|
||||||
switch dc.Name {
|
|
||||||
case "realdebrid":
|
|
||||||
return realdebrid.New(dc)
|
|
||||||
case "torbox":
|
|
||||||
return torbox.New(dc)
|
|
||||||
case "debridlink":
|
|
||||||
return debrid_link.New(dc)
|
|
||||||
case "alldebrid":
|
|
||||||
return alldebrid.New(dc)
|
|
||||||
default:
|
|
||||||
return realdebrid.New(dc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ProcessTorrent(d *Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) {
|
|
||||||
|
|
||||||
debridTorrent := &types.Torrent{
|
|
||||||
InfoHash: magnet.InfoHash,
|
|
||||||
Magnet: magnet,
|
|
||||||
Name: magnet.Name,
|
|
||||||
Arr: a,
|
|
||||||
Size: magnet.Size,
|
|
||||||
Files: make(map[string]types.File),
|
|
||||||
}
|
|
||||||
|
|
||||||
errs := make([]error, 0, len(d.Clients))
|
|
||||||
|
|
||||||
// Override first, arr second, debrid third
|
|
||||||
|
|
||||||
if overrideDownloadUncached {
|
|
||||||
debridTorrent.DownloadUncached = true
|
|
||||||
} else if a.DownloadUncached != nil {
|
|
||||||
// Arr cached is set
|
|
||||||
debridTorrent.DownloadUncached = *a.DownloadUncached
|
|
||||||
} else {
|
|
||||||
debridTorrent.DownloadUncached = false
|
|
||||||
}
|
|
||||||
|
|
||||||
for index, db := range d.Clients {
|
|
||||||
logger := db.GetLogger()
|
|
||||||
logger.Info().Str("Debrid", db.GetName()).Str("Hash", debridTorrent.InfoHash).Msg("Processing torrent")
|
|
||||||
|
|
||||||
if !overrideDownloadUncached && a.DownloadUncached == nil {
|
|
||||||
debridTorrent.DownloadUncached = db.GetDownloadUncached()
|
|
||||||
}
|
|
||||||
|
|
||||||
//if db.GetCheckCached() {
|
|
||||||
// hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
|
|
||||||
// if !exists || !hash {
|
|
||||||
// logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name)
|
|
||||||
// continue
|
|
||||||
// } else {
|
|
||||||
// logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
|
|
||||||
// }
|
|
||||||
//}
|
|
||||||
|
|
||||||
dbt, err := db.SubmitMagnet(debridTorrent)
|
|
||||||
if err != nil || dbt == nil || dbt.Id == "" {
|
|
||||||
errs = append(errs, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dbt.Arr = a
|
|
||||||
logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.GetName())
|
|
||||||
d.LastUsed = index
|
|
||||||
|
|
||||||
torrent, err := db.CheckStatus(dbt, isSymlink)
|
|
||||||
if err != nil && torrent != nil && torrent.Id != "" {
|
|
||||||
// Delete the torrent if it was not downloaded
|
|
||||||
go func(id string) {
|
|
||||||
_ = db.DeleteTorrent(id)
|
|
||||||
}(torrent.Id)
|
|
||||||
}
|
|
||||||
return torrent, err
|
|
||||||
}
|
|
||||||
if len(errs) == 0 {
|
|
||||||
return nil, fmt.Errorf("failed to process torrent: no clients available")
|
|
||||||
}
|
|
||||||
if len(errs) == 1 {
|
|
||||||
return nil, fmt.Errorf("failed to process torrent: %w", errs[0])
|
|
||||||
} else {
|
|
||||||
errStrings := make([]string, 0, len(errs))
|
|
||||||
for _, err := range errs {
|
|
||||||
errStrings = append(errStrings, err.Error())
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", "))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,224 +0,0 @@
|
|||||||
package debrid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/request"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type linkCache struct {
|
|
||||||
Id string
|
|
||||||
link string
|
|
||||||
accountId string
|
|
||||||
expiresAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type downloadLinkCache struct {
|
|
||||||
data map[string]linkCache
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDownloadLinkCache() *downloadLinkCache {
|
|
||||||
return &downloadLinkCache{
|
|
||||||
data: make(map[string]linkCache),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *downloadLinkCache) reset() {
|
|
||||||
c.mu.Lock()
|
|
||||||
c.data = make(map[string]linkCache)
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *downloadLinkCache) Load(key string) (linkCache, bool) {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
dl, ok := c.data[key]
|
|
||||||
return dl, ok
|
|
||||||
}
|
|
||||||
func (c *downloadLinkCache) Store(key string, value linkCache) {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
c.data[key] = value
|
|
||||||
}
|
|
||||||
func (c *downloadLinkCache) Delete(key string) {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
delete(c.data, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
type downloadLinkRequest struct {
|
|
||||||
result string
|
|
||||||
err error
|
|
||||||
done chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDownloadLinkRequest() *downloadLinkRequest {
|
|
||||||
return &downloadLinkRequest{
|
|
||||||
done: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *downloadLinkRequest) Complete(result string, err error) {
|
|
||||||
r.result = result
|
|
||||||
r.err = err
|
|
||||||
close(r.done)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *downloadLinkRequest) Wait() (string, error) {
|
|
||||||
<-r.done
|
|
||||||
return r.result, r.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
|
|
||||||
// Check link cache
|
|
||||||
if dl := c.checkDownloadLink(fileLink); dl != "" {
|
|
||||||
return dl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight {
|
|
||||||
// Wait for the other request to complete and use its result
|
|
||||||
result := req.(*downloadLinkRequest)
|
|
||||||
return result.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new request object
|
|
||||||
req := newDownloadLinkRequest()
|
|
||||||
c.downloadLinkRequests.Store(fileLink, req)
|
|
||||||
|
|
||||||
downloadLink, err := c.fetchDownloadLink(torrentName, filename, fileLink)
|
|
||||||
|
|
||||||
// Complete the request and remove it from the map
|
|
||||||
req.Complete(downloadLink, err)
|
|
||||||
c.downloadLinkRequests.Delete(fileLink)
|
|
||||||
|
|
||||||
return downloadLink, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (string, error) {
|
|
||||||
ct := c.GetTorrentByName(torrentName)
|
|
||||||
if ct == nil {
|
|
||||||
return "", fmt.Errorf("torrent not found")
|
|
||||||
}
|
|
||||||
file := ct.Files[filename]
|
|
||||||
|
|
||||||
if file.Link == "" {
|
|
||||||
// file link is empty, refresh the torrent to get restricted links
|
|
||||||
ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid
|
|
||||||
if ct == nil {
|
|
||||||
return "", fmt.Errorf("failed to refresh torrent")
|
|
||||||
} else {
|
|
||||||
file = ct.Files[filename]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If file.Link is still empty, return
|
|
||||||
if file.Link == "" {
|
|
||||||
// Try to reinsert the torrent?
|
|
||||||
newCt, err := c.reInsertTorrent(ct)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to reinsert torrent. %w", err)
|
|
||||||
}
|
|
||||||
ct = newCt
|
|
||||||
file = ct.Files[filename]
|
|
||||||
}
|
|
||||||
|
|
||||||
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
|
|
||||||
downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, request.HosterUnavailableError) {
|
|
||||||
newCt, err := c.reInsertTorrent(ct)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to reinsert torrent: %w", err)
|
|
||||||
}
|
|
||||||
ct = newCt
|
|
||||||
file = ct.Files[filename]
|
|
||||||
// Retry getting the download link
|
|
||||||
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if downloadLink == nil {
|
|
||||||
return "", fmt.Errorf("download link is empty for")
|
|
||||||
}
|
|
||||||
c.updateDownloadLink(downloadLink)
|
|
||||||
return "", nil
|
|
||||||
} else if errors.Is(err, request.TrafficExceededError) {
|
|
||||||
// This is likely a fair usage limit error
|
|
||||||
return "", err
|
|
||||||
} else {
|
|
||||||
return "", fmt.Errorf("failed to get download link: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if downloadLink == nil {
|
|
||||||
return "", fmt.Errorf("download link is empty")
|
|
||||||
}
|
|
||||||
c.updateDownloadLink(downloadLink)
|
|
||||||
return downloadLink.DownloadLink, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) GenerateDownloadLinks(t CachedTorrent) {
|
|
||||||
if err := c.client.GenerateDownloadLinks(t.Torrent); err != nil {
|
|
||||||
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("Failed to generate download links")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, file := range t.Files {
|
|
||||||
if file.DownloadLink != nil {
|
|
||||||
c.updateDownloadLink(file.DownloadLink)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.setTorrent(t, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) updateDownloadLink(dl *types.DownloadLink) {
|
|
||||||
c.downloadLinks.Store(dl.Link, linkCache{
|
|
||||||
Id: dl.Id,
|
|
||||||
link: dl.DownloadLink,
|
|
||||||
expiresAt: time.Now().Add(c.autoExpiresLinksAfterDuration),
|
|
||||||
accountId: dl.AccountId,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) checkDownloadLink(link string) string {
|
|
||||||
if dl, ok := c.downloadLinks.Load(link); ok {
|
|
||||||
if dl.expiresAt.After(time.Now()) && !c.IsDownloadLinkInvalid(dl.link) {
|
|
||||||
return dl.link
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
|
|
||||||
c.invalidDownloadLinks.Store(downloadLink, reason)
|
|
||||||
// Remove the download api key from active
|
|
||||||
if reason == "bandwidth_exceeded" {
|
|
||||||
if dl, ok := c.downloadLinks.Load(link); ok {
|
|
||||||
if dl.accountId != "" && dl.link == downloadLink {
|
|
||||||
c.client.DisableAccount(dl.accountId)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.removeDownloadLink(link)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) removeDownloadLink(link string) {
|
|
||||||
if dl, ok := c.downloadLinks.Load(link); ok {
|
|
||||||
// Delete dl from cache
|
|
||||||
c.downloadLinks.Delete(link)
|
|
||||||
// Delete dl from debrid
|
|
||||||
if dl.Id != "" {
|
|
||||||
_ = c.client.DeleteDownloadLink(dl.Id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) IsDownloadLinkInvalid(downloadLink string) bool {
|
|
||||||
if reason, ok := c.invalidDownloadLinks.Load(downloadLink); ok {
|
|
||||||
c.logger.Debug().Msgf("Download link %s is invalid: %s", downloadLink, reason)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
package debrid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/sirrobot01/decypharr/internal/config"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Engine struct {
|
|
||||||
Clients map[string]types.Client
|
|
||||||
clientsMu sync.Mutex
|
|
||||||
Caches map[string]*Cache
|
|
||||||
CacheMu sync.Mutex
|
|
||||||
LastUsed string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewEngine() *Engine {
|
|
||||||
cfg := config.Get()
|
|
||||||
clients := make(map[string]types.Client)
|
|
||||||
|
|
||||||
caches := make(map[string]*Cache)
|
|
||||||
|
|
||||||
for _, dc := range cfg.Debrids {
|
|
||||||
client := createDebridClient(dc)
|
|
||||||
logger := client.GetLogger()
|
|
||||||
if dc.UseWebDav {
|
|
||||||
caches[dc.Name] = New(dc, client)
|
|
||||||
logger.Info().Msg("Debrid Service started with WebDAV")
|
|
||||||
} else {
|
|
||||||
logger.Info().Msg("Debrid Service started")
|
|
||||||
}
|
|
||||||
clients[dc.Name] = client
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &Engine{
|
|
||||||
Clients: clients,
|
|
||||||
LastUsed: "",
|
|
||||||
Caches: caches,
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Engine) GetClient(name string) types.Client {
|
|
||||||
d.clientsMu.Lock()
|
|
||||||
defer d.clientsMu.Unlock()
|
|
||||||
return d.Clients[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Engine) Reset() {
|
|
||||||
d.clientsMu.Lock()
|
|
||||||
d.Clients = make(map[string]types.Client)
|
|
||||||
d.clientsMu.Unlock()
|
|
||||||
|
|
||||||
d.CacheMu.Lock()
|
|
||||||
d.Caches = make(map[string]*Cache)
|
|
||||||
d.CacheMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Engine) GetDebrids() map[string]types.Client {
|
|
||||||
return d.Clients
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
package debrid
|
|
||||||
@@ -18,20 +18,26 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type AllDebrid struct {
|
type AllDebrid struct {
|
||||||
Name string
|
name string
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
APIKey string
|
APIKey string
|
||||||
accounts map[string]types.Account
|
accounts *types.Accounts
|
||||||
DownloadUncached bool
|
autoExpiresLinksAfter time.Duration
|
||||||
client *request.Client
|
DownloadUncached bool
|
||||||
|
client *request.Client
|
||||||
|
|
||||||
MountPath string
|
MountPath string
|
||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
checkCached bool
|
checkCached bool
|
||||||
addSamples bool
|
addSamples bool
|
||||||
|
minimumFreeSlot int
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dc config.Debrid) *AllDebrid {
|
func (ad *AllDebrid) GetProfile() (*types.Profile, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(dc config.Debrid) (*AllDebrid, error) {
|
||||||
rl := request.ParseRateLimit(dc.RateLimit)
|
rl := request.ParseRateLimit(dc.RateLimit)
|
||||||
|
|
||||||
headers := map[string]string{
|
headers := map[string]string{
|
||||||
@@ -45,34 +51,31 @@ func New(dc config.Debrid) *AllDebrid {
|
|||||||
request.WithProxy(dc.Proxy),
|
request.WithProxy(dc.Proxy),
|
||||||
)
|
)
|
||||||
|
|
||||||
accounts := make(map[string]types.Account)
|
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
|
||||||
for idx, key := range dc.DownloadAPIKeys {
|
if autoExpiresLinksAfter == 0 || err != nil {
|
||||||
id := strconv.Itoa(idx)
|
autoExpiresLinksAfter = 48 * time.Hour
|
||||||
accounts[id] = types.Account{
|
|
||||||
Name: key,
|
|
||||||
ID: id,
|
|
||||||
Token: key,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return &AllDebrid{
|
return &AllDebrid{
|
||||||
Name: "alldebrid",
|
name: "alldebrid",
|
||||||
Host: "http://api.alldebrid.com/v4.1",
|
Host: "http://api.alldebrid.com/v4.1",
|
||||||
APIKey: dc.APIKey,
|
APIKey: dc.APIKey,
|
||||||
accounts: accounts,
|
accounts: types.NewAccounts(dc),
|
||||||
DownloadUncached: dc.DownloadUncached,
|
DownloadUncached: dc.DownloadUncached,
|
||||||
client: client,
|
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||||
MountPath: dc.Folder,
|
client: client,
|
||||||
logger: logger.New(dc.Name),
|
MountPath: dc.Folder,
|
||||||
checkCached: dc.CheckCached,
|
logger: logger.New(dc.Name),
|
||||||
addSamples: dc.AddSamples,
|
checkCached: dc.CheckCached,
|
||||||
}
|
addSamples: dc.AddSamples,
|
||||||
|
minimumFreeSlot: dc.MinimumFreeSlot,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) GetName() string {
|
func (ad *AllDebrid) Name() string {
|
||||||
return ad.Name
|
return ad.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) GetLogger() zerolog.Logger {
|
func (ad *AllDebrid) Logger() zerolog.Logger {
|
||||||
return ad.logger
|
return ad.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -186,7 +189,7 @@ func (ad *AllDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
|
|||||||
var res TorrentInfoResponse
|
var res TorrentInfoResponse
|
||||||
err = json.Unmarshal(resp, &res)
|
err = json.Unmarshal(resp, &res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
|
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
data := res.Data.Magnets
|
data := res.Data.Magnets
|
||||||
@@ -200,7 +203,7 @@ func (ad *AllDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
|
|||||||
OriginalFilename: name,
|
OriginalFilename: name,
|
||||||
Files: make(map[string]types.File),
|
Files: make(map[string]types.File),
|
||||||
InfoHash: data.Hash,
|
InfoHash: data.Hash,
|
||||||
Debrid: ad.Name,
|
Debrid: ad.name,
|
||||||
MountPath: ad.MountPath,
|
MountPath: ad.MountPath,
|
||||||
Added: time.Unix(data.CompletionDate, 0).Format(time.RFC3339),
|
Added: time.Unix(data.CompletionDate, 0).Format(time.RFC3339),
|
||||||
}
|
}
|
||||||
@@ -228,7 +231,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
|
|||||||
var res TorrentInfoResponse
|
var res TorrentInfoResponse
|
||||||
err = json.Unmarshal(resp, &res)
|
err = json.Unmarshal(resp, &res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
|
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
data := res.Data.Magnets
|
data := res.Data.Magnets
|
||||||
@@ -240,7 +243,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
|
|||||||
t.OriginalFilename = name
|
t.OriginalFilename = name
|
||||||
t.Folder = name
|
t.Folder = name
|
||||||
t.MountPath = ad.MountPath
|
t.MountPath = ad.MountPath
|
||||||
t.Debrid = ad.Name
|
t.Debrid = ad.name
|
||||||
t.Bytes = data.Size
|
t.Bytes = data.Size
|
||||||
t.Seeders = data.Seeders
|
t.Seeders = data.Seeders
|
||||||
t.Added = time.Unix(data.CompletionDate, 0).Format(time.RFC3339)
|
t.Added = time.Unix(data.CompletionDate, 0).Format(time.RFC3339)
|
||||||
@@ -256,7 +259,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
|
func (ad *AllDebrid) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
|
||||||
for {
|
for {
|
||||||
err := ad.UpdateTorrent(torrent)
|
err := ad.UpdateTorrent(torrent)
|
||||||
|
|
||||||
@@ -266,13 +269,7 @@ func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types
|
|||||||
status := torrent.Status
|
status := torrent.Status
|
||||||
if status == "downloaded" {
|
if status == "downloaded" {
|
||||||
ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||||
if !isSymlink {
|
return torrent, nil
|
||||||
err = ad.GenerateDownloadLinks(torrent)
|
|
||||||
if err != nil {
|
|
||||||
return torrent, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break
|
|
||||||
} else if utils.Contains(ad.GetDownloadingStatus(), status) {
|
} else if utils.Contains(ad.GetDownloadingStatus(), status) {
|
||||||
if !torrent.DownloadUncached {
|
if !torrent.DownloadUncached {
|
||||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
@@ -285,7 +282,6 @@ func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return torrent, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
|
func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
|
||||||
@@ -298,8 +294,9 @@ func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error {
|
func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||||
filesCh := make(chan types.File, len(t.Files))
|
filesCh := make(chan types.File, len(t.Files))
|
||||||
|
linksCh := make(chan *types.DownloadLink, len(t.Files))
|
||||||
errCh := make(chan error, len(t.Files))
|
errCh := make(chan error, len(t.Files))
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@@ -312,17 +309,19 @@ func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error {
|
|||||||
errCh <- err
|
errCh <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
file.DownloadLink = link
|
if link == nil {
|
||||||
if link != nil {
|
|
||||||
errCh <- fmt.Errorf("download link is empty")
|
errCh <- fmt.Errorf("download link is empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
linksCh <- link
|
||||||
|
file.DownloadLink = link
|
||||||
filesCh <- file
|
filesCh <- file
|
||||||
}(file)
|
}(file)
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
close(filesCh)
|
close(filesCh)
|
||||||
|
close(linksCh)
|
||||||
close(errCh)
|
close(errCh)
|
||||||
}()
|
}()
|
||||||
files := make(map[string]types.File, len(t.Files))
|
files := make(map[string]types.File, len(t.Files))
|
||||||
@@ -330,10 +329,22 @@ func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error {
|
|||||||
files[file.Name] = file
|
files[file.Name] = file
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Collect download links
|
||||||
|
links := make(map[string]*types.DownloadLink, len(t.Files))
|
||||||
|
|
||||||
|
for link := range linksCh {
|
||||||
|
if link == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
links[link.Link] = link
|
||||||
|
}
|
||||||
|
// Update the files with download links
|
||||||
|
ad.accounts.SetDownloadLinks(links)
|
||||||
|
|
||||||
// Check for errors
|
// Check for errors
|
||||||
for err := range errCh {
|
for err := range errCh {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err // Return the first error encountered
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -363,21 +374,18 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
|
|||||||
if link == "" {
|
if link == "" {
|
||||||
return nil, fmt.Errorf("download link is empty")
|
return nil, fmt.Errorf("download link is empty")
|
||||||
}
|
}
|
||||||
|
now := time.Now()
|
||||||
return &types.DownloadLink{
|
return &types.DownloadLink{
|
||||||
Link: file.Link,
|
Link: file.Link,
|
||||||
DownloadLink: link,
|
DownloadLink: link,
|
||||||
Id: data.Data.Id,
|
Id: data.Data.Id,
|
||||||
Size: file.Size,
|
Size: file.Size,
|
||||||
Filename: file.Name,
|
Filename: file.Name,
|
||||||
Generated: time.Now(),
|
Generated: now,
|
||||||
AccountId: "0",
|
ExpiresAt: now.Add(ad.autoExpiresLinksAfter),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) GetCheckCached() bool {
|
|
||||||
return ad.checkCached
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
|
func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
|
||||||
url := fmt.Sprintf("%s/magnet/status?status=ready", ad.Host)
|
url := fmt.Sprintf("%s/magnet/status?status=ready", ad.Host)
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
@@ -389,7 +397,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
|
|||||||
var res TorrentsListResponse
|
var res TorrentsListResponse
|
||||||
err = json.Unmarshal(resp, &res)
|
err = json.Unmarshal(resp, &res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
|
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
|
||||||
return torrents, err
|
return torrents, err
|
||||||
}
|
}
|
||||||
for _, magnet := range res.Data.Magnets {
|
for _, magnet := range res.Data.Magnets {
|
||||||
@@ -402,7 +410,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
|
|||||||
OriginalFilename: magnet.Filename,
|
OriginalFilename: magnet.Filename,
|
||||||
Files: make(map[string]types.File),
|
Files: make(map[string]types.File),
|
||||||
InfoHash: magnet.Hash,
|
InfoHash: magnet.Hash,
|
||||||
Debrid: ad.Name,
|
Debrid: ad.name,
|
||||||
MountPath: ad.MountPath,
|
MountPath: ad.MountPath,
|
||||||
Added: time.Unix(magnet.CompletionDate, 0).Format(time.RFC3339),
|
Added: time.Unix(magnet.CompletionDate, 0).Format(time.RFC3339),
|
||||||
})
|
})
|
||||||
@@ -411,7 +419,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
|
|||||||
return torrents, nil
|
return torrents, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) GetDownloads() (map[string]types.DownloadLink, error) {
|
func (ad *AllDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -431,12 +439,16 @@ func (ad *AllDebrid) GetMountPath() string {
|
|||||||
return ad.MountPath
|
return ad.MountPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) DisableAccount(accountId string) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ad *AllDebrid) ResetActiveDownloadKeys() {
|
|
||||||
|
|
||||||
}
|
|
||||||
func (ad *AllDebrid) DeleteDownloadLink(linkId string) error {
|
func (ad *AllDebrid) DeleteDownloadLink(linkId string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ad *AllDebrid) GetAvailableSlots() (int, error) {
|
||||||
|
// This function is a placeholder for AllDebrid
|
||||||
|
//TODO: Implement the logic to check available slots for AllDebrid
|
||||||
|
return 0, fmt.Errorf("GetAvailableSlots not implemented for AllDebrid")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ad *AllDebrid) Accounts() *types.Accounts {
|
||||||
|
return ad.accounts
|
||||||
|
}
|
||||||
@@ -1,5 +1,10 @@
|
|||||||
package alldebrid
|
package alldebrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
type errorResponse struct {
|
type errorResponse struct {
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
@@ -32,6 +37,8 @@ type magnetInfo struct {
|
|||||||
Files []MagnetFile `json:"files"`
|
Files []MagnetFile `json:"files"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Magnets []magnetInfo
|
||||||
|
|
||||||
type TorrentInfoResponse struct {
|
type TorrentInfoResponse struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Data struct {
|
Data struct {
|
||||||
@@ -43,7 +50,7 @@ type TorrentInfoResponse struct {
|
|||||||
type TorrentsListResponse struct {
|
type TorrentsListResponse struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Data struct {
|
Data struct {
|
||||||
Magnets []magnetInfo `json:"magnets"`
|
Magnets Magnets `json:"magnets"`
|
||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
Error *errorResponse `json:"error"`
|
Error *errorResponse `json:"error"`
|
||||||
}
|
}
|
||||||
@@ -81,3 +88,27 @@ type DownloadLink struct {
|
|||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
Error *errorResponse `json:"error"`
|
Error *errorResponse `json:"error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements custom unmarshaling for Magnets type
|
||||||
|
// It can handle both an array of magnetInfo objects or a map with string keys.
|
||||||
|
// If the input is an array, it will be unmarshaled directly into the Magnets slice.
|
||||||
|
// If the input is a map, it will extract the values and append them to the Magnets slice.
|
||||||
|
// If the input is neither, it will return an error.
|
||||||
|
func (m *Magnets) UnmarshalJSON(data []byte) error {
|
||||||
|
// Try to unmarshal as array
|
||||||
|
var arr []magnetInfo
|
||||||
|
if err := json.Unmarshal(data, &arr); err == nil {
|
||||||
|
*m = arr
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to unmarshal as map
|
||||||
|
var obj map[string]magnetInfo
|
||||||
|
if err := json.Unmarshal(data, &obj); err == nil {
|
||||||
|
for _, v := range obj {
|
||||||
|
*m = append(*m, v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("magnets: unsupported JSON format")
|
||||||
|
}
|
||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
"github.com/sirrobot01/decypharr/internal/request"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -18,24 +17,64 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type DebridLink struct {
|
type DebridLink struct {
|
||||||
Name string
|
name string
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
APIKey string
|
APIKey string
|
||||||
accounts map[string]types.Account
|
accounts *types.Accounts
|
||||||
DownloadUncached bool
|
DownloadUncached bool
|
||||||
client *request.Client
|
client *request.Client
|
||||||
|
|
||||||
|
autoExpiresLinksAfter time.Duration
|
||||||
|
|
||||||
MountPath string
|
MountPath string
|
||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
checkCached bool
|
checkCached bool
|
||||||
addSamples bool
|
addSamples bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GetName() string {
|
func New(dc config.Debrid) (*DebridLink, error) {
|
||||||
return dl.Name
|
rl := request.ParseRateLimit(dc.RateLimit)
|
||||||
|
|
||||||
|
headers := map[string]string{
|
||||||
|
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
_log := logger.New(dc.Name)
|
||||||
|
client := request.New(
|
||||||
|
request.WithHeaders(headers),
|
||||||
|
request.WithLogger(_log),
|
||||||
|
request.WithRateLimiter(rl),
|
||||||
|
request.WithProxy(dc.Proxy),
|
||||||
|
)
|
||||||
|
|
||||||
|
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
|
||||||
|
if autoExpiresLinksAfter == 0 || err != nil {
|
||||||
|
autoExpiresLinksAfter = 48 * time.Hour
|
||||||
|
}
|
||||||
|
return &DebridLink{
|
||||||
|
name: "debridlink",
|
||||||
|
Host: "https://debrid-link.com/api/v2",
|
||||||
|
APIKey: dc.APIKey,
|
||||||
|
accounts: types.NewAccounts(dc),
|
||||||
|
DownloadUncached: dc.DownloadUncached,
|
||||||
|
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||||
|
client: client,
|
||||||
|
MountPath: dc.Folder,
|
||||||
|
logger: logger.New(dc.Name),
|
||||||
|
checkCached: dc.CheckCached,
|
||||||
|
addSamples: dc.AddSamples,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GetLogger() zerolog.Logger {
|
func (dl *DebridLink) GetProfile() (*types.Profile, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) Name() string {
|
||||||
|
return dl.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) Logger() zerolog.Logger {
|
||||||
return dl.logger
|
return dl.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,13 +107,13 @@ func (dl *DebridLink) IsAvailable(hashes []string) map[string]bool {
|
|||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := dl.client.MakeRequest(req)
|
resp, err := dl.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dl.logger.Info().Msgf("Error checking availability: %v", err)
|
dl.logger.Error().Err(err).Msgf("Error checking availability")
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
var data AvailableResponse
|
var data AvailableResponse
|
||||||
err = json.Unmarshal(resp, &data)
|
err = json.Unmarshal(resp, &data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dl.logger.Info().Msgf("Error marshalling availability: %v", err)
|
dl.logger.Error().Err(err).Msgf("Error marshalling availability")
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
if data.Value == nil {
|
if data.Value == nil {
|
||||||
@@ -121,7 +160,7 @@ func (dl *DebridLink) GetTorrent(torrentId string) (*types.Torrent, error) {
|
|||||||
Filename: name,
|
Filename: name,
|
||||||
OriginalFilename: name,
|
OriginalFilename: name,
|
||||||
MountPath: dl.MountPath,
|
MountPath: dl.MountPath,
|
||||||
Debrid: dl.Name,
|
Debrid: dl.name,
|
||||||
Added: time.Unix(t.Created, 0).Format(time.RFC3339),
|
Added: time.Unix(t.Created, 0).Format(time.RFC3339),
|
||||||
}
|
}
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
@@ -135,14 +174,7 @@ func (dl *DebridLink) GetTorrent(torrentId string) (*types.Torrent, error) {
|
|||||||
Name: f.Name,
|
Name: f.Name,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Path: f.Name,
|
Path: f.Name,
|
||||||
DownloadLink: &types.DownloadLink{
|
Link: f.DownloadURL,
|
||||||
Filename: f.Name,
|
|
||||||
Link: f.DownloadURL,
|
|
||||||
DownloadLink: f.DownloadURL,
|
|
||||||
Generated: time.Now(),
|
|
||||||
AccountId: "0",
|
|
||||||
},
|
|
||||||
Link: f.DownloadURL,
|
|
||||||
}
|
}
|
||||||
torrent.Files[file.Name] = file
|
torrent.Files[file.Name] = file
|
||||||
}
|
}
|
||||||
@@ -191,6 +223,8 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
|
|||||||
t.OriginalFilename = name
|
t.OriginalFilename = name
|
||||||
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
|
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
|
links := make(map[string]*types.DownloadLink)
|
||||||
|
now := time.Now()
|
||||||
for _, f := range data.Files {
|
for _, f := range data.Files {
|
||||||
if !cfg.IsSizeAllowed(f.Size) {
|
if !cfg.IsSizeAllowed(f.Size) {
|
||||||
continue
|
continue
|
||||||
@@ -201,17 +235,21 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
|
|||||||
Name: f.Name,
|
Name: f.Name,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Path: f.Name,
|
Path: f.Name,
|
||||||
DownloadLink: &types.DownloadLink{
|
Link: f.DownloadURL,
|
||||||
Filename: f.Name,
|
|
||||||
Link: f.DownloadURL,
|
|
||||||
DownloadLink: f.DownloadURL,
|
|
||||||
Generated: time.Now(),
|
|
||||||
AccountId: "0",
|
|
||||||
},
|
|
||||||
Link: f.DownloadURL,
|
|
||||||
}
|
}
|
||||||
|
link := &types.DownloadLink{
|
||||||
|
Filename: f.Name,
|
||||||
|
Link: f.DownloadURL,
|
||||||
|
DownloadLink: f.DownloadURL,
|
||||||
|
Generated: now,
|
||||||
|
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
|
||||||
|
}
|
||||||
|
links[file.Link] = link
|
||||||
|
file.DownloadLink = link
|
||||||
t.Files[f.Name] = file
|
t.Files[f.Name] = file
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dl.accounts.SetDownloadLinks(links)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -246,8 +284,11 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
|
|||||||
t.Filename = name
|
t.Filename = name
|
||||||
t.OriginalFilename = name
|
t.OriginalFilename = name
|
||||||
t.MountPath = dl.MountPath
|
t.MountPath = dl.MountPath
|
||||||
t.Debrid = dl.Name
|
t.Debrid = dl.name
|
||||||
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
|
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
|
||||||
|
|
||||||
|
links := make(map[string]*types.DownloadLink)
|
||||||
|
now := time.Now()
|
||||||
for _, f := range data.Files {
|
for _, f := range data.Files {
|
||||||
file := types.File{
|
file := types.File{
|
||||||
TorrentId: t.Id,
|
TorrentId: t.Id,
|
||||||
@@ -256,22 +297,26 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
|
|||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Path: f.Name,
|
Path: f.Name,
|
||||||
Link: f.DownloadURL,
|
Link: f.DownloadURL,
|
||||||
DownloadLink: &types.DownloadLink{
|
Generated: now,
|
||||||
Filename: f.Name,
|
|
||||||
Link: f.DownloadURL,
|
|
||||||
DownloadLink: f.DownloadURL,
|
|
||||||
Generated: time.Now(),
|
|
||||||
AccountId: "0",
|
|
||||||
},
|
|
||||||
Generated: time.Now(),
|
|
||||||
}
|
}
|
||||||
|
link := &types.DownloadLink{
|
||||||
|
Filename: f.Name,
|
||||||
|
Link: f.DownloadURL,
|
||||||
|
DownloadLink: f.DownloadURL,
|
||||||
|
Generated: now,
|
||||||
|
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
|
||||||
|
}
|
||||||
|
links[file.Link] = link
|
||||||
|
file.DownloadLink = link
|
||||||
t.Files[f.Name] = file
|
t.Files[f.Name] = file
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dl.accounts.SetDownloadLinks(links)
|
||||||
|
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
|
func (dl *DebridLink) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
|
||||||
for {
|
for {
|
||||||
err := dl.UpdateTorrent(torrent)
|
err := dl.UpdateTorrent(torrent)
|
||||||
if err != nil || torrent == nil {
|
if err != nil || torrent == nil {
|
||||||
@@ -280,11 +325,7 @@ func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*type
|
|||||||
status := torrent.Status
|
status := torrent.Status
|
||||||
if status == "downloaded" {
|
if status == "downloaded" {
|
||||||
dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||||
err = dl.GenerateDownloadLinks(torrent)
|
return torrent, nil
|
||||||
if err != nil {
|
|
||||||
return torrent, err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
} else if utils.Contains(dl.GetDownloadingStatus(), status) {
|
} else if utils.Contains(dl.GetDownloadingStatus(), status) {
|
||||||
if !torrent.DownloadUncached {
|
if !torrent.DownloadUncached {
|
||||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
@@ -297,7 +338,6 @@ func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*type
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return torrent, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) DeleteTorrent(torrentId string) error {
|
func (dl *DebridLink) DeleteTorrent(torrentId string) error {
|
||||||
@@ -310,69 +350,27 @@ func (dl *DebridLink) DeleteTorrent(torrentId string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GenerateDownloadLinks(t *types.Torrent) error {
|
func (dl *DebridLink) GetFileDownloadLinks(t *types.Torrent) error {
|
||||||
// Download links are already generated
|
// Download links are already generated
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GetDownloads() (map[string]types.DownloadLink, error) {
|
func (dl *DebridLink) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
||||||
return file.DownloadLink, nil
|
return dl.accounts.GetDownloadLink(file.Link)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GetDownloadingStatus() []string {
|
func (dl *DebridLink) GetDownloadingStatus() []string {
|
||||||
return []string{"downloading"}
|
return []string{"downloading"}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GetCheckCached() bool {
|
|
||||||
return dl.checkCached
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dl *DebridLink) GetDownloadUncached() bool {
|
func (dl *DebridLink) GetDownloadUncached() bool {
|
||||||
return dl.DownloadUncached
|
return dl.DownloadUncached
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dc config.Debrid) *DebridLink {
|
|
||||||
rl := request.ParseRateLimit(dc.RateLimit)
|
|
||||||
|
|
||||||
headers := map[string]string{
|
|
||||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
}
|
|
||||||
_log := logger.New(dc.Name)
|
|
||||||
client := request.New(
|
|
||||||
request.WithHeaders(headers),
|
|
||||||
request.WithLogger(_log),
|
|
||||||
request.WithRateLimiter(rl),
|
|
||||||
request.WithProxy(dc.Proxy),
|
|
||||||
)
|
|
||||||
|
|
||||||
accounts := make(map[string]types.Account)
|
|
||||||
for idx, key := range dc.DownloadAPIKeys {
|
|
||||||
id := strconv.Itoa(idx)
|
|
||||||
accounts[id] = types.Account{
|
|
||||||
Name: key,
|
|
||||||
ID: id,
|
|
||||||
Token: key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &DebridLink{
|
|
||||||
Name: "debridlink",
|
|
||||||
Host: "https://debrid-link.com/api/v2",
|
|
||||||
APIKey: dc.APIKey,
|
|
||||||
accounts: accounts,
|
|
||||||
DownloadUncached: dc.DownloadUncached,
|
|
||||||
client: client,
|
|
||||||
MountPath: dc.Folder,
|
|
||||||
logger: logger.New(dc.Name),
|
|
||||||
checkCached: dc.CheckCached,
|
|
||||||
addSamples: dc.AddSamples,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dl *DebridLink) GetTorrents() ([]*types.Torrent, error) {
|
func (dl *DebridLink) GetTorrents() ([]*types.Torrent, error) {
|
||||||
page := 0
|
page := 0
|
||||||
perPage := 100
|
perPage := 100
|
||||||
@@ -402,11 +400,12 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
|
|||||||
var res torrentInfo
|
var res torrentInfo
|
||||||
err = json.Unmarshal(resp, &res)
|
err = json.Unmarshal(resp, &res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dl.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
|
dl.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
|
||||||
return torrents, err
|
return torrents, err
|
||||||
}
|
}
|
||||||
|
|
||||||
data := *res.Value
|
data := *res.Value
|
||||||
|
links := make(map[string]*types.DownloadLink)
|
||||||
|
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
return torrents, nil
|
return torrents, nil
|
||||||
@@ -424,11 +423,12 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
|
|||||||
OriginalFilename: t.Name,
|
OriginalFilename: t.Name,
|
||||||
InfoHash: t.HashString,
|
InfoHash: t.HashString,
|
||||||
Files: make(map[string]types.File),
|
Files: make(map[string]types.File),
|
||||||
Debrid: dl.Name,
|
Debrid: dl.name,
|
||||||
MountPath: dl.MountPath,
|
MountPath: dl.MountPath,
|
||||||
Added: time.Unix(t.Created, 0).Format(time.RFC3339),
|
Added: time.Unix(t.Created, 0).Format(time.RFC3339),
|
||||||
}
|
}
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
|
now := time.Now()
|
||||||
for _, f := range t.Files {
|
for _, f := range t.Files {
|
||||||
if !cfg.IsSizeAllowed(f.Size) {
|
if !cfg.IsSizeAllowed(f.Size) {
|
||||||
continue
|
continue
|
||||||
@@ -439,19 +439,23 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
|
|||||||
Name: f.Name,
|
Name: f.Name,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Path: f.Name,
|
Path: f.Name,
|
||||||
DownloadLink: &types.DownloadLink{
|
Link: f.DownloadURL,
|
||||||
Filename: f.Name,
|
|
||||||
Link: f.DownloadURL,
|
|
||||||
DownloadLink: f.DownloadURL,
|
|
||||||
Generated: time.Now(),
|
|
||||||
AccountId: "0",
|
|
||||||
},
|
|
||||||
Link: f.DownloadURL,
|
|
||||||
}
|
}
|
||||||
|
link := &types.DownloadLink{
|
||||||
|
Filename: f.Name,
|
||||||
|
Link: f.DownloadURL,
|
||||||
|
DownloadLink: f.DownloadURL,
|
||||||
|
Generated: now,
|
||||||
|
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
|
||||||
|
}
|
||||||
|
links[file.Link] = link
|
||||||
|
file.DownloadLink = link
|
||||||
torrent.Files[f.Name] = file
|
torrent.Files[f.Name] = file
|
||||||
}
|
}
|
||||||
torrents = append(torrents, torrent)
|
torrents = append(torrents, torrent)
|
||||||
}
|
}
|
||||||
|
dl.accounts.SetDownloadLinks(links)
|
||||||
|
|
||||||
return torrents, nil
|
return torrents, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -463,12 +467,15 @@ func (dl *DebridLink) GetMountPath() string {
|
|||||||
return dl.MountPath
|
return dl.MountPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) DisableAccount(accountId string) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dl *DebridLink) ResetActiveDownloadKeys() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dl *DebridLink) DeleteDownloadLink(linkId string) error {
|
func (dl *DebridLink) DeleteDownloadLink(linkId string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) GetAvailableSlots() (int, error) {
|
||||||
|
//TODO: Implement the logic to check available slots for DebridLink
|
||||||
|
return 0, fmt.Errorf("GetAvailableSlots not implemented for DebridLink")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) Accounts() *types.Accounts {
|
||||||
|
return dl.accounts
|
||||||
|
}
|
||||||
1
pkg/debrid/providers/realdebrid/misc.go
Normal file
@@ -0,0 +1 @@
|
|||||||
|
package realdebrid
|
||||||
@@ -2,129 +2,238 @@ package realdebrid
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"cmp"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/config"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/logger"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/request"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/rar"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RealDebrid struct {
|
type RealDebrid struct {
|
||||||
Name string
|
name string
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
|
|
||||||
APIKey string
|
APIKey string
|
||||||
currentDownloadKey string
|
accounts *types.Accounts
|
||||||
accounts map[string]types.Account
|
|
||||||
accountsMutex sync.RWMutex
|
|
||||||
|
|
||||||
DownloadUncached bool
|
DownloadUncached bool
|
||||||
client *request.Client
|
client *request.Client
|
||||||
downloadClient *request.Client
|
downloadClient *request.Client
|
||||||
|
repairClient *request.Client
|
||||||
|
autoExpiresLinksAfter time.Duration
|
||||||
|
|
||||||
MountPath string
|
MountPath string
|
||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
checkCached bool
|
UnpackRar bool
|
||||||
addSamples bool
|
|
||||||
|
rarSemaphore chan struct{}
|
||||||
|
checkCached bool
|
||||||
|
addSamples bool
|
||||||
|
Profile *types.Profile
|
||||||
|
minimumFreeSlot int // Minimum number of active pots to maintain (used for cached stuffs, etc.)
|
||||||
|
limit int
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dc config.Debrid) *RealDebrid {
|
func New(dc config.Debrid) (*RealDebrid, error) {
|
||||||
rl := request.ParseRateLimit(dc.RateLimit)
|
rl := request.ParseRateLimit(dc.RateLimit)
|
||||||
|
repairRl := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
|
||||||
|
downloadRl := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
|
||||||
|
|
||||||
headers := map[string]string{
|
headers := map[string]string{
|
||||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||||
}
|
}
|
||||||
_log := logger.New(dc.Name)
|
_log := logger.New(dc.Name)
|
||||||
|
|
||||||
accounts := make(map[string]types.Account)
|
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
|
||||||
currentDownloadKey := dc.DownloadAPIKeys[0]
|
if autoExpiresLinksAfter == 0 || err != nil {
|
||||||
for idx, key := range dc.DownloadAPIKeys {
|
autoExpiresLinksAfter = 48 * time.Hour
|
||||||
id := strconv.Itoa(idx)
|
|
||||||
accounts[id] = types.Account{
|
|
||||||
Name: key,
|
|
||||||
ID: id,
|
|
||||||
Token: key,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
downloadHeaders := map[string]string{
|
r := &RealDebrid{
|
||||||
"Authorization": fmt.Sprintf("Bearer %s", currentDownloadKey),
|
name: "realdebrid",
|
||||||
}
|
Host: "https://api.real-debrid.com/rest/1.0",
|
||||||
|
APIKey: dc.APIKey,
|
||||||
return &RealDebrid{
|
accounts: types.NewAccounts(dc),
|
||||||
Name: "realdebrid",
|
DownloadUncached: dc.DownloadUncached,
|
||||||
Host: "https://api.real-debrid.com/rest/1.0",
|
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||||
APIKey: dc.APIKey,
|
UnpackRar: dc.UnpackRar,
|
||||||
accounts: accounts,
|
|
||||||
DownloadUncached: dc.DownloadUncached,
|
|
||||||
client: request.New(
|
client: request.New(
|
||||||
request.WithHeaders(headers),
|
request.WithHeaders(headers),
|
||||||
request.WithRateLimiter(rl),
|
request.WithRateLimiter(rl),
|
||||||
request.WithLogger(_log),
|
request.WithLogger(_log),
|
||||||
request.WithMaxRetries(5),
|
request.WithMaxRetries(10),
|
||||||
request.WithRetryableStatus(429, 502),
|
request.WithRetryableStatus(429, 502),
|
||||||
request.WithProxy(dc.Proxy),
|
request.WithProxy(dc.Proxy),
|
||||||
),
|
),
|
||||||
downloadClient: request.New(
|
downloadClient: request.New(
|
||||||
request.WithHeaders(downloadHeaders),
|
request.WithRateLimiter(downloadRl),
|
||||||
request.WithLogger(_log),
|
request.WithLogger(_log),
|
||||||
request.WithMaxRetries(10),
|
request.WithMaxRetries(10),
|
||||||
request.WithRetryableStatus(429, 447, 502),
|
request.WithRetryableStatus(429, 447, 502),
|
||||||
request.WithProxy(dc.Proxy),
|
request.WithProxy(dc.Proxy),
|
||||||
),
|
),
|
||||||
currentDownloadKey: currentDownloadKey,
|
repairClient: request.New(
|
||||||
MountPath: dc.Folder,
|
request.WithRateLimiter(repairRl),
|
||||||
logger: logger.New(dc.Name),
|
request.WithHeaders(headers),
|
||||||
checkCached: dc.CheckCached,
|
request.WithLogger(_log),
|
||||||
addSamples: dc.AddSamples,
|
request.WithMaxRetries(4),
|
||||||
|
request.WithRetryableStatus(429, 502),
|
||||||
|
request.WithProxy(dc.Proxy),
|
||||||
|
),
|
||||||
|
MountPath: dc.Folder,
|
||||||
|
logger: logger.New(dc.Name),
|
||||||
|
rarSemaphore: make(chan struct{}, 2),
|
||||||
|
checkCached: dc.CheckCached,
|
||||||
|
addSamples: dc.AddSamples,
|
||||||
|
minimumFreeSlot: dc.MinimumFreeSlot,
|
||||||
|
limit: dc.Limit,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := r.GetProfile(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
return r, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetName() string {
|
func (r *RealDebrid) Name() string {
|
||||||
return r.Name
|
return r.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetLogger() zerolog.Logger {
|
func (r *RealDebrid) Logger() zerolog.Logger {
|
||||||
return r.logger
|
return r.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSelectedFiles(t *types.Torrent, data torrentInfo) map[string]types.File {
|
func (r *RealDebrid) getSelectedFiles(t *types.Torrent, data torrentInfo) (map[string]types.File, error) {
|
||||||
|
files := make(map[string]types.File)
|
||||||
selectedFiles := make([]types.File, 0)
|
selectedFiles := make([]types.File, 0)
|
||||||
|
|
||||||
for _, f := range data.Files {
|
for _, f := range data.Files {
|
||||||
if f.Selected == 1 {
|
if f.Selected == 1 {
|
||||||
name := filepath.Base(f.Path)
|
selectedFiles = append(selectedFiles, types.File{
|
||||||
file := types.File{
|
|
||||||
TorrentId: t.Id,
|
TorrentId: t.Id,
|
||||||
Name: name,
|
Name: filepath.Base(f.Path),
|
||||||
Path: name,
|
Path: filepath.Base(f.Path),
|
||||||
Size: f.Bytes,
|
Size: f.Bytes,
|
||||||
Id: strconv.Itoa(f.ID),
|
Id: strconv.Itoa(f.ID),
|
||||||
}
|
})
|
||||||
selectedFiles = append(selectedFiles, file)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(selectedFiles) == 0 {
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle RARed torrents (single link, multiple files)
|
||||||
|
if len(data.Links) == 1 && len(selectedFiles) > 1 {
|
||||||
|
return r.handleRarArchive(t, data, selectedFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Standard case - map files to links
|
||||||
|
if len(selectedFiles) > len(data.Links) {
|
||||||
|
r.logger.Warn().Msgf("More files than links available: %d files, %d links for %s", len(selectedFiles), len(data.Links), t.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, f := range selectedFiles {
|
||||||
|
if i < len(data.Links) {
|
||||||
|
f.Link = data.Links[i]
|
||||||
|
files[f.Name] = f
|
||||||
|
} else {
|
||||||
|
r.logger.Warn().Str("file", f.Name).Msg("No link available for file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleRarArchive processes RAR archives with multiple files
|
||||||
|
func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, selectedFiles []types.File) (map[string]types.File, error) {
|
||||||
|
// This will block if 2 RAR operations are already in progress
|
||||||
|
r.rarSemaphore <- struct{}{}
|
||||||
|
defer func() {
|
||||||
|
<-r.rarSemaphore
|
||||||
|
}()
|
||||||
|
|
||||||
files := make(map[string]types.File)
|
files := make(map[string]types.File)
|
||||||
for index, f := range selectedFiles {
|
|
||||||
if index >= len(data.Links) {
|
if !r.UnpackRar {
|
||||||
break
|
r.logger.Debug().Msgf("RAR file detected, but unpacking is disabled: %s", t.Name)
|
||||||
|
// Create a single file representing the RAR archive
|
||||||
|
file := types.File{
|
||||||
|
TorrentId: t.Id,
|
||||||
|
Id: "0",
|
||||||
|
Name: t.Name + ".rar",
|
||||||
|
Size: 0,
|
||||||
|
IsRar: true,
|
||||||
|
ByteRange: nil,
|
||||||
|
Path: t.Name + ".rar",
|
||||||
|
Link: data.Links[0],
|
||||||
|
Generated: time.Now(),
|
||||||
}
|
}
|
||||||
f.Link = data.Links[index]
|
files[file.Name] = file
|
||||||
files[f.Name] = f
|
return files, nil
|
||||||
}
|
}
|
||||||
return files
|
|
||||||
|
r.logger.Info().Msgf("RAR file detected, unpacking: %s", t.Name)
|
||||||
|
linkFile := &types.File{TorrentId: t.Id, Link: data.Links[0]}
|
||||||
|
downloadLinkObj, err := r.GetDownloadLink(t, linkFile)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get download link for RAR file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dlLink := downloadLinkObj.DownloadLink
|
||||||
|
reader, err := rar.NewReader(dlLink)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create RAR reader: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rarFiles, err := reader.GetFiles()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read RAR files: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create lookup map for faster matching
|
||||||
|
fileMap := make(map[string]*types.File)
|
||||||
|
for i := range selectedFiles {
|
||||||
|
// RD converts special chars to '_' for RAR file paths
|
||||||
|
// @TODO: there might be more special chars to replace
|
||||||
|
safeName := strings.NewReplacer("|", "_", "\"", "_", "\\", "_", "?", "_", "*", "_", ":", "_", "<", "_", ">", "_").Replace(selectedFiles[i].Name)
|
||||||
|
fileMap[safeName] = &selectedFiles[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
for _, rarFile := range rarFiles {
|
||||||
|
if file, exists := fileMap[rarFile.Name()]; exists {
|
||||||
|
file.IsRar = true
|
||||||
|
file.ByteRange = rarFile.ByteRange()
|
||||||
|
file.Link = data.Links[0]
|
||||||
|
file.Generated = now
|
||||||
|
files[file.Name] = *file
|
||||||
|
} else if !rarFile.IsDirectory {
|
||||||
|
r.logger.Warn().Msgf("RAR file %s not found in torrent files", rarFile.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getTorrentFiles returns a list of torrent files from the torrent info
|
// getTorrentFiles returns a list of torrent files from the torrent info
|
||||||
@@ -191,13 +300,13 @@ func (r *RealDebrid) IsAvailable(hashes []string) map[string]bool {
|
|||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := r.client.MakeRequest(req)
|
resp, err := r.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.logger.Info().Msgf("Error checking availability: %v", err)
|
r.logger.Error().Err(err).Msgf("Error checking availability")
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
var data AvailabilityResponse
|
var data AvailabilityResponse
|
||||||
err = json.Unmarshal(resp, &data)
|
err = json.Unmarshal(resp, &data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.logger.Info().Msgf("Error marshalling availability: %v", err)
|
r.logger.Error().Err(err).Msgf("Error marshalling availability")
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
for _, h := range hashes[i:end] {
|
for _, h := range hashes[i:end] {
|
||||||
@@ -226,15 +335,30 @@ func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req.Header.Add("Content-Type", "application/x-bittorrent")
|
req.Header.Add("Content-Type", "application/x-bittorrent")
|
||||||
resp, err := r.client.MakeRequest(req)
|
resp, err := r.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err = json.Unmarshal(resp, &data); err != nil {
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||||
|
// Handle multiple_downloads
|
||||||
|
|
||||||
|
if resp.StatusCode == 509 {
|
||||||
|
return nil, utils.TooManyActiveDownloadsError
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
bodyBytes, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading response body: %w", err)
|
||||||
|
}
|
||||||
|
if err = json.Unmarshal(bodyBytes, &data); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
t.Id = data.Id
|
t.Id = data.Id
|
||||||
t.Debrid = r.Name
|
t.Debrid = r.name
|
||||||
t.MountPath = r.MountPath
|
t.MountPath = r.MountPath
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
@@ -246,15 +370,30 @@ func (r *RealDebrid) addMagnet(t *types.Torrent) (*types.Torrent, error) {
|
|||||||
}
|
}
|
||||||
var data AddMagnetSchema
|
var data AddMagnetSchema
|
||||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||||
resp, err := r.client.MakeRequest(req)
|
resp, err := r.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err = json.Unmarshal(resp, &data); err != nil {
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||||
|
// Handle multiple_downloads
|
||||||
|
|
||||||
|
if resp.StatusCode == 509 {
|
||||||
|
return nil, utils.TooManyActiveDownloadsError
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
bodyBytes, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading response body: %w", err)
|
||||||
|
}
|
||||||
|
if err = json.Unmarshal(bodyBytes, &data); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
t.Id = data.Id
|
t.Id = data.Id
|
||||||
t.Debrid = r.Name
|
t.Debrid = r.name
|
||||||
t.MountPath = r.MountPath
|
t.MountPath = r.MountPath
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
@@ -273,7 +412,7 @@ func (r *RealDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
|
|||||||
}
|
}
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
return nil, request.TorrentNotFoundError
|
return nil, utils.TorrentNotFoundError
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||||
}
|
}
|
||||||
@@ -295,7 +434,7 @@ func (r *RealDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
|
|||||||
Filename: data.Filename,
|
Filename: data.Filename,
|
||||||
OriginalFilename: data.OriginalFilename,
|
OriginalFilename: data.OriginalFilename,
|
||||||
Links: data.Links,
|
Links: data.Links,
|
||||||
Debrid: r.Name,
|
Debrid: r.name,
|
||||||
MountPath: r.MountPath,
|
MountPath: r.MountPath,
|
||||||
}
|
}
|
||||||
t.Files = r.getTorrentFiles(t, data) // Get selected files
|
t.Files = r.getTorrentFiles(t, data) // Get selected files
|
||||||
@@ -316,7 +455,7 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
|
|||||||
}
|
}
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
return request.TorrentNotFoundError
|
return utils.TorrentNotFoundError
|
||||||
}
|
}
|
||||||
return fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
return fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||||
}
|
}
|
||||||
@@ -336,13 +475,14 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
|
|||||||
t.OriginalFilename = data.OriginalFilename
|
t.OriginalFilename = data.OriginalFilename
|
||||||
t.Links = data.Links
|
t.Links = data.Links
|
||||||
t.MountPath = r.MountPath
|
t.MountPath = r.MountPath
|
||||||
t.Debrid = r.Name
|
t.Debrid = r.name
|
||||||
t.Added = data.Added
|
t.Added = data.Added
|
||||||
t.Files = getSelectedFiles(t, data) // Get selected files
|
t.Files, _ = r.getSelectedFiles(t, data) // Get selected files
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torrent, error) {
|
func (r *RealDebrid) CheckStatus(t *types.Torrent) (*types.Torrent, error) {
|
||||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
|
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
for {
|
for {
|
||||||
@@ -366,12 +506,12 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre
|
|||||||
t.Seeders = data.Seeders
|
t.Seeders = data.Seeders
|
||||||
t.Links = data.Links
|
t.Links = data.Links
|
||||||
t.Status = status
|
t.Status = status
|
||||||
t.Debrid = r.Name
|
t.Debrid = r.name
|
||||||
t.MountPath = r.MountPath
|
t.MountPath = r.MountPath
|
||||||
if status == "waiting_files_selection" {
|
if status == "waiting_files_selection" {
|
||||||
t.Files = r.getTorrentFiles(t, data)
|
t.Files = r.getTorrentFiles(t, data)
|
||||||
if len(t.Files) == 0 {
|
if len(t.Files) == 0 {
|
||||||
return t, fmt.Errorf("no video files found")
|
return t, fmt.Errorf("no valid files found")
|
||||||
}
|
}
|
||||||
filesId := make([]string, 0)
|
filesId := make([]string, 0)
|
||||||
for _, f := range t.Files {
|
for _, f := range t.Files {
|
||||||
@@ -387,18 +527,19 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre
|
|||||||
return t, err
|
return t, err
|
||||||
}
|
}
|
||||||
if res.StatusCode != http.StatusNoContent {
|
if res.StatusCode != http.StatusNoContent {
|
||||||
|
if res.StatusCode == 509 {
|
||||||
|
return nil, utils.TooManyActiveDownloadsError
|
||||||
|
}
|
||||||
return t, fmt.Errorf("realdebrid API error: Status: %d", res.StatusCode)
|
return t, fmt.Errorf("realdebrid API error: Status: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
} else if status == "downloaded" {
|
} else if status == "downloaded" {
|
||||||
t.Files = getSelectedFiles(t, data) // Get selected files
|
t.Files, err = r.getSelectedFiles(t, data) // Get selected files
|
||||||
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
|
if err != nil {
|
||||||
if !isSymlink {
|
return t, err
|
||||||
err = r.GenerateDownloadLinks(t)
|
|
||||||
if err != nil {
|
|
||||||
return t, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
break
|
|
||||||
|
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
|
||||||
|
return t, nil
|
||||||
} else if utils.Contains(r.GetDownloadingStatus(), status) {
|
} else if utils.Contains(r.GetDownloadingStatus(), status) {
|
||||||
if !t.DownloadUncached {
|
if !t.DownloadUncached {
|
||||||
return t, fmt.Errorf("torrent: %s not cached", t.Name)
|
return t, fmt.Errorf("torrent: %s not cached", t.Name)
|
||||||
@@ -409,7 +550,6 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return t, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) DeleteTorrent(torrentId string) error {
|
func (r *RealDebrid) DeleteTorrent(torrentId string) error {
|
||||||
@@ -422,46 +562,56 @@ func (r *RealDebrid) DeleteTorrent(torrentId string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error {
|
func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||||
filesCh := make(chan types.File, len(t.Files))
|
|
||||||
errCh := make(chan error, len(t.Files))
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(t.Files))
|
var mu sync.Mutex
|
||||||
for _, f := range t.Files {
|
var firstErr error
|
||||||
|
|
||||||
|
files := make(map[string]types.File)
|
||||||
|
links := make(map[string]*types.DownloadLink)
|
||||||
|
|
||||||
|
_files := t.GetFiles()
|
||||||
|
wg.Add(len(_files))
|
||||||
|
|
||||||
|
for _, f := range _files {
|
||||||
go func(file types.File) {
|
go func(file types.File) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
link, err := r.GetDownloadLink(t, &file)
|
link, err := r.GetDownloadLink(t, &file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- err
|
mu.Lock()
|
||||||
|
if firstErr == nil {
|
||||||
|
firstErr = err
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if link == nil {
|
||||||
|
mu.Lock()
|
||||||
|
if firstErr == nil {
|
||||||
|
firstErr = fmt.Errorf("realdebrid API error: download link not found for file %s", file.Name)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
file.DownloadLink = link
|
file.DownloadLink = link
|
||||||
filesCh <- file
|
|
||||||
|
mu.Lock()
|
||||||
|
files[file.Name] = file
|
||||||
|
links[link.Link] = link
|
||||||
|
mu.Unlock()
|
||||||
}(f)
|
}(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
wg.Wait()
|
||||||
wg.Wait()
|
|
||||||
close(filesCh)
|
|
||||||
close(errCh)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Collect results
|
if firstErr != nil {
|
||||||
files := make(map[string]types.File, len(t.Files))
|
return firstErr
|
||||||
for file := range filesCh {
|
|
||||||
files[file.Name] = file
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for errors
|
|
||||||
for err := range errCh {
|
|
||||||
if err != nil {
|
|
||||||
return err // Return the first error encountered
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add links to cache
|
||||||
|
r.accounts.SetDownloadLinks(links)
|
||||||
t.Files = files
|
t.Files = files
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -472,20 +622,24 @@ func (r *RealDebrid) CheckLink(link string) error {
|
|||||||
"link": {link},
|
"link": {link},
|
||||||
}
|
}
|
||||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||||
resp, err := r.client.Do(req)
|
resp, err := r.repairClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
return request.HosterUnavailableError // File has been removed
|
return utils.HosterUnavailableError // File has been removed
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, error) {
|
func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, error) {
|
||||||
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
||||||
|
_link := file.Link
|
||||||
|
if strings.HasPrefix(file.Link, "https://real-debrid.com/d/") && len(file.Link) > 39 {
|
||||||
|
_link = file.Link[0:39]
|
||||||
|
}
|
||||||
payload := gourl.Values{
|
payload := gourl.Values{
|
||||||
"link": {file.Link},
|
"link": {_link},
|
||||||
}
|
}
|
||||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||||
resp, err := r.downloadClient.Do(req)
|
resp, err := r.downloadClient.Do(req)
|
||||||
@@ -506,17 +660,17 @@ func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, er
|
|||||||
}
|
}
|
||||||
switch data.ErrorCode {
|
switch data.ErrorCode {
|
||||||
case 19:
|
case 19:
|
||||||
return nil, request.HosterUnavailableError // File has been removed
|
return nil, utils.HosterUnavailableError // File has been removed
|
||||||
case 23:
|
case 23:
|
||||||
return nil, request.TrafficExceededError
|
return nil, utils.TrafficExceededError
|
||||||
case 24:
|
case 24:
|
||||||
return nil, request.HosterUnavailableError // Link has been nerfed
|
return nil, utils.HosterUnavailableError // Link has been nerfed
|
||||||
case 34:
|
case 34:
|
||||||
return nil, request.TrafficExceededError // traffic exceeded
|
return nil, utils.TrafficExceededError // traffic exceeded
|
||||||
case 35:
|
case 35:
|
||||||
return nil, request.HosterUnavailableError
|
return nil, utils.HosterUnavailableError
|
||||||
case 36:
|
case 36:
|
||||||
return nil, request.TrafficExceededError // traffic exceeded
|
return nil, utils.TrafficExceededError // traffic exceeded
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
|
return nil, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
|
||||||
}
|
}
|
||||||
@@ -532,58 +686,54 @@ func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, er
|
|||||||
if data.Download == "" {
|
if data.Download == "" {
|
||||||
return nil, fmt.Errorf("realdebrid API error: download link not found")
|
return nil, fmt.Errorf("realdebrid API error: download link not found")
|
||||||
}
|
}
|
||||||
|
now := time.Now()
|
||||||
return &types.DownloadLink{
|
return &types.DownloadLink{
|
||||||
Filename: data.Filename,
|
Filename: data.Filename,
|
||||||
Size: data.Filesize,
|
Size: data.Filesize,
|
||||||
Link: data.Link,
|
Link: data.Link,
|
||||||
DownloadLink: data.Download,
|
DownloadLink: data.Download,
|
||||||
Generated: time.Now(),
|
Generated: now,
|
||||||
|
ExpiresAt: now.Add(r.autoExpiresLinksAfter),
|
||||||
}, nil
|
}, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
||||||
|
|
||||||
if r.currentDownloadKey == "" {
|
accounts := r.accounts.All()
|
||||||
// If no download key is set, use the first one
|
|
||||||
accounts := r.getActiveAccounts()
|
|
||||||
if len(accounts) < 1 {
|
|
||||||
// No active download keys. It's likely that the key has reached bandwidth limit
|
|
||||||
return nil, fmt.Errorf("no active download keys")
|
|
||||||
}
|
|
||||||
r.currentDownloadKey = accounts[0].Token
|
|
||||||
}
|
|
||||||
|
|
||||||
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.currentDownloadKey))
|
for _, account := range accounts {
|
||||||
downloadLink, err := r._getDownloadLink(file)
|
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", account.Token))
|
||||||
retries := 0
|
downloadLink, err := r._getDownloadLink(file)
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, request.TrafficExceededError) {
|
if err == nil {
|
||||||
|
return downloadLink, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
retries := 0
|
||||||
|
if errors.Is(err, utils.TrafficExceededError) {
|
||||||
// Retries generating
|
// Retries generating
|
||||||
retries = 5
|
retries = 5
|
||||||
} else {
|
} else {
|
||||||
// If the error is not traffic exceeded, return the error
|
// If the error is not traffic exceeded, return the error
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
backOff := 1 * time.Second
|
||||||
backOff := 1 * time.Second
|
for retries > 0 {
|
||||||
for retries > 0 {
|
downloadLink, err = r._getDownloadLink(file)
|
||||||
downloadLink, err = r._getDownloadLink(file)
|
if err == nil {
|
||||||
if err == nil {
|
return downloadLink, nil
|
||||||
return downloadLink, nil
|
}
|
||||||
|
if !errors.Is(err, utils.TrafficExceededError) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Add a delay before retrying
|
||||||
|
time.Sleep(backOff)
|
||||||
|
backOff *= 2 // Exponential backoff
|
||||||
|
retries--
|
||||||
}
|
}
|
||||||
if !errors.Is(err, request.TrafficExceededError) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Add a delay before retrying
|
|
||||||
time.Sleep(backOff)
|
|
||||||
backOff *= 2 // Exponential backoff
|
|
||||||
}
|
}
|
||||||
return downloadLink, nil
|
return nil, fmt.Errorf("realdebrid API error: download link not found")
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RealDebrid) GetCheckCached() bool {
|
|
||||||
return r.checkCached
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) {
|
func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) {
|
||||||
@@ -634,7 +784,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent,
|
|||||||
Links: t.Links,
|
Links: t.Links,
|
||||||
Files: make(map[string]types.File),
|
Files: make(map[string]types.File),
|
||||||
InfoHash: t.Hash,
|
InfoHash: t.Hash,
|
||||||
Debrid: r.Name,
|
Debrid: r.name,
|
||||||
MountPath: r.MountPath,
|
MountPath: r.MountPath,
|
||||||
Added: t.Added.Format(time.RFC3339),
|
Added: t.Added.Format(time.RFC3339),
|
||||||
})
|
})
|
||||||
@@ -645,6 +795,10 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent,
|
|||||||
|
|
||||||
func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
|
func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
|
||||||
limit := 5000
|
limit := 5000
|
||||||
|
if r.limit != 0 {
|
||||||
|
limit = r.limit
|
||||||
|
}
|
||||||
|
hardLimit := r.limit
|
||||||
|
|
||||||
// Get first batch and total count
|
// Get first batch and total count
|
||||||
allTorrents := make([]*types.Torrent, 0)
|
allTorrents := make([]*types.Torrent, 0)
|
||||||
@@ -663,6 +817,10 @@ func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
|
|||||||
}
|
}
|
||||||
allTorrents = append(allTorrents, torrents...)
|
allTorrents = append(allTorrents, torrents...)
|
||||||
offset += totalTorrents
|
offset += totalTorrents
|
||||||
|
if hardLimit != 0 && len(allTorrents) >= hardLimit {
|
||||||
|
// If hard limit is set, stop fetching more torrents
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if fetchError != nil {
|
if fetchError != nil {
|
||||||
@@ -672,18 +830,19 @@ func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
|
|||||||
return allTorrents, nil
|
return allTorrents, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetDownloads() (map[string]types.DownloadLink, error) {
|
func (r *RealDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||||
links := make(map[string]types.DownloadLink)
|
links := make(map[string]*types.DownloadLink)
|
||||||
offset := 0
|
offset := 0
|
||||||
limit := 1000
|
limit := 1000
|
||||||
|
|
||||||
accounts := r.getActiveAccounts()
|
accounts := r.accounts.All()
|
||||||
|
|
||||||
if len(accounts) < 1 {
|
if len(accounts) < 1 {
|
||||||
// No active download keys. It's likely that the key has reached bandwidth limit
|
// No active download keys. It's likely that the key has reached bandwidth limit
|
||||||
return nil, fmt.Errorf("no active download keys")
|
return links, fmt.Errorf("no active download keys")
|
||||||
}
|
}
|
||||||
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", accounts[0].Token))
|
activeAccount := accounts[0]
|
||||||
|
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", activeAccount.Token))
|
||||||
for {
|
for {
|
||||||
dl, err := r._getDownloads(offset, limit)
|
dl, err := r._getDownloads(offset, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -698,11 +857,12 @@ func (r *RealDebrid) GetDownloads() (map[string]types.DownloadLink, error) {
|
|||||||
// This is ordered by date, so we can skip the rest
|
// This is ordered by date, so we can skip the rest
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
links[d.Link] = d
|
links[d.Link] = &d
|
||||||
}
|
}
|
||||||
|
|
||||||
offset += len(dl)
|
offset += len(dl)
|
||||||
}
|
}
|
||||||
|
|
||||||
return links, nil
|
return links, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -728,6 +888,7 @@ func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink,
|
|||||||
Link: d.Link,
|
Link: d.Link,
|
||||||
DownloadLink: d.Download,
|
DownloadLink: d.Download,
|
||||||
Generated: d.Generated,
|
Generated: d.Generated,
|
||||||
|
ExpiresAt: d.Generated.Add(r.autoExpiresLinksAfter),
|
||||||
Id: d.Id,
|
Id: d.Id,
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -747,49 +908,6 @@ func (r *RealDebrid) GetMountPath() string {
|
|||||||
return r.MountPath
|
return r.MountPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) DisableAccount(accountId string) {
|
|
||||||
r.accountsMutex.Lock()
|
|
||||||
defer r.accountsMutex.Unlock()
|
|
||||||
if len(r.accounts) == 1 {
|
|
||||||
r.logger.Info().Msgf("Cannot disable last account: %s", accountId)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
r.currentDownloadKey = ""
|
|
||||||
if value, ok := r.accounts[accountId]; ok {
|
|
||||||
value.Disabled = true
|
|
||||||
r.accounts[accountId] = value
|
|
||||||
r.logger.Info().Msgf("Disabled account Index: %s", value.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RealDebrid) ResetActiveDownloadKeys() {
|
|
||||||
r.accountsMutex.Lock()
|
|
||||||
defer r.accountsMutex.Unlock()
|
|
||||||
for key, value := range r.accounts {
|
|
||||||
value.Disabled = false
|
|
||||||
r.accounts[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RealDebrid) getActiveAccounts() []types.Account {
|
|
||||||
r.accountsMutex.RLock()
|
|
||||||
defer r.accountsMutex.RUnlock()
|
|
||||||
accounts := make([]types.Account, 0)
|
|
||||||
|
|
||||||
for _, value := range r.accounts {
|
|
||||||
if value.Disabled {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
accounts = append(accounts, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort accounts by ID
|
|
||||||
sort.Slice(accounts, func(i, j int) bool {
|
|
||||||
return accounts[i].ID < accounts[j].ID
|
|
||||||
})
|
|
||||||
return accounts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
|
func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
|
||||||
url := fmt.Sprintf("%s/downloads/delete/%s", r.Host, linkId)
|
url := fmt.Sprintf("%s/downloads/delete/%s", r.Host, linkId)
|
||||||
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||||
@@ -798,3 +916,49 @@ func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetProfile() (*types.Profile, error) {
|
||||||
|
if r.Profile != nil {
|
||||||
|
return r.Profile, nil
|
||||||
|
}
|
||||||
|
url := fmt.Sprintf("%s/user", r.Host)
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var data profileResponse
|
||||||
|
if json.Unmarshal(resp, &data) != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
profile := &types.Profile{
|
||||||
|
Id: data.Id,
|
||||||
|
Username: data.Username,
|
||||||
|
Email: data.Email,
|
||||||
|
Points: data.Points,
|
||||||
|
Premium: data.Premium,
|
||||||
|
Expiration: data.Expiration,
|
||||||
|
Type: data.Type,
|
||||||
|
}
|
||||||
|
r.Profile = profile
|
||||||
|
return profile, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetAvailableSlots() (int, error) {
|
||||||
|
url := fmt.Sprintf("%s/torrents/activeCount", r.Host)
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
var data AvailableSlotsResponse
|
||||||
|
if json.Unmarshal(resp, &data) != nil {
|
||||||
|
return 0, fmt.Errorf("error unmarshalling available slots response: %w", err)
|
||||||
|
}
|
||||||
|
return data.TotalSlots - data.ActiveSlots - r.minimumFreeSlot, nil // Ensure we maintain minimum active pots
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) Accounts() *types.Accounts {
|
||||||
|
return r.accounts
|
||||||
|
}
|
||||||
@@ -139,3 +139,20 @@ type ErrorResponse struct {
|
|||||||
Error string `json:"error"`
|
Error string `json:"error"`
|
||||||
ErrorCode int `json:"error_code"`
|
ErrorCode int `json:"error_code"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type profileResponse struct {
|
||||||
|
Id int64 `json:"id"`
|
||||||
|
Username string `json:"username"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
Points int64 `json:"points"`
|
||||||
|
Locale string `json:"locale"`
|
||||||
|
Avatar string `json:"avatar"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Premium int `json:"premium"`
|
||||||
|
Expiration time.Time `json:"expiration"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AvailableSlotsResponse struct {
|
||||||
|
ActiveSlots int `json:"nb"`
|
||||||
|
TotalSlots int `json:"limit"`
|
||||||
|
}
|
||||||
@@ -4,13 +4,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/config"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/logger"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/request"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/version"
|
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
@@ -21,13 +14,23 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Torbox struct {
|
type Torbox struct {
|
||||||
Name string
|
name string
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
APIKey string
|
APIKey string
|
||||||
accounts map[string]types.Account
|
accounts *types.Accounts
|
||||||
|
autoExpiresLinksAfter time.Duration
|
||||||
|
|
||||||
DownloadUncached bool
|
DownloadUncached bool
|
||||||
client *request.Client
|
client *request.Client
|
||||||
|
|
||||||
@@ -37,7 +40,11 @@ type Torbox struct {
|
|||||||
addSamples bool
|
addSamples bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dc config.Debrid) *Torbox {
|
func (tb *Torbox) GetProfile() (*types.Profile, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(dc config.Debrid) (*Torbox, error) {
|
||||||
rl := request.ParseRateLimit(dc.RateLimit)
|
rl := request.ParseRateLimit(dc.RateLimit)
|
||||||
|
|
||||||
headers := map[string]string{
|
headers := map[string]string{
|
||||||
@@ -51,36 +58,31 @@ func New(dc config.Debrid) *Torbox {
|
|||||||
request.WithLogger(_log),
|
request.WithLogger(_log),
|
||||||
request.WithProxy(dc.Proxy),
|
request.WithProxy(dc.Proxy),
|
||||||
)
|
)
|
||||||
|
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
|
||||||
accounts := make(map[string]types.Account)
|
if autoExpiresLinksAfter == 0 || err != nil {
|
||||||
for idx, key := range dc.DownloadAPIKeys {
|
autoExpiresLinksAfter = 48 * time.Hour
|
||||||
id := strconv.Itoa(idx)
|
|
||||||
accounts[id] = types.Account{
|
|
||||||
Name: key,
|
|
||||||
ID: id,
|
|
||||||
Token: key,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Torbox{
|
return &Torbox{
|
||||||
Name: "torbox",
|
name: "torbox",
|
||||||
Host: "https://api.torbox.app/v1",
|
Host: "https://api.torbox.app/v1",
|
||||||
APIKey: dc.APIKey,
|
APIKey: dc.APIKey,
|
||||||
accounts: accounts,
|
accounts: types.NewAccounts(dc),
|
||||||
DownloadUncached: dc.DownloadUncached,
|
DownloadUncached: dc.DownloadUncached,
|
||||||
client: client,
|
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||||
MountPath: dc.Folder,
|
client: client,
|
||||||
logger: _log,
|
MountPath: dc.Folder,
|
||||||
checkCached: dc.CheckCached,
|
logger: _log,
|
||||||
addSamples: dc.AddSamples,
|
checkCached: dc.CheckCached,
|
||||||
}
|
addSamples: dc.AddSamples,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetName() string {
|
func (tb *Torbox) Name() string {
|
||||||
return tb.Name
|
return tb.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetLogger() zerolog.Logger {
|
func (tb *Torbox) Logger() zerolog.Logger {
|
||||||
return tb.logger
|
return tb.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,13 +115,13 @@ func (tb *Torbox) IsAvailable(hashes []string) map[string]bool {
|
|||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := tb.client.MakeRequest(req)
|
resp, err := tb.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.logger.Info().Msgf("Error checking availability: %v", err)
|
tb.logger.Error().Err(err).Msgf("Error checking availability")
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
var res AvailableResponse
|
var res AvailableResponse
|
||||||
err = json.Unmarshal(resp, &res)
|
err = json.Unmarshal(resp, &res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.logger.Info().Msgf("Error marshalling availability: %v", err)
|
tb.logger.Error().Err(err).Msgf("Error marshalling availability")
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
if res.Data == nil {
|
if res.Data == nil {
|
||||||
@@ -162,12 +164,12 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
|
|||||||
torrentId := strconv.Itoa(dt.Id)
|
torrentId := strconv.Itoa(dt.Id)
|
||||||
torrent.Id = torrentId
|
torrent.Id = torrentId
|
||||||
torrent.MountPath = tb.MountPath
|
torrent.MountPath = tb.MountPath
|
||||||
torrent.Debrid = tb.Name
|
torrent.Debrid = tb.name
|
||||||
|
|
||||||
return torrent, nil
|
return torrent, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTorboxStatus(status string, finished bool) string {
|
func (tb *Torbox) getTorboxStatus(status string, finished bool) string {
|
||||||
if finished {
|
if finished {
|
||||||
return "downloaded"
|
return "downloaded"
|
||||||
}
|
}
|
||||||
@@ -175,12 +177,16 @@ func getTorboxStatus(status string, finished bool) string {
|
|||||||
"checkingResumeData", "metaDL", "pausedUP", "queuedUP", "checkingUP",
|
"checkingResumeData", "metaDL", "pausedUP", "queuedUP", "checkingUP",
|
||||||
"forcedUP", "allocating", "downloading", "metaDL", "pausedDL",
|
"forcedUP", "allocating", "downloading", "metaDL", "pausedDL",
|
||||||
"queuedDL", "checkingDL", "forcedDL", "checkingResumeData", "moving"}
|
"queuedDL", "checkingDL", "forcedDL", "checkingResumeData", "moving"}
|
||||||
|
|
||||||
|
var determinedStatus string
|
||||||
switch {
|
switch {
|
||||||
case utils.Contains(downloading, status):
|
case utils.Contains(downloading, status):
|
||||||
return "downloading"
|
determinedStatus = "downloading"
|
||||||
default:
|
default:
|
||||||
return "error"
|
determinedStatus = "error"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return determinedStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
|
func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
|
||||||
@@ -205,39 +211,71 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
|
|||||||
Bytes: data.Size,
|
Bytes: data.Size,
|
||||||
Folder: data.Name,
|
Folder: data.Name,
|
||||||
Progress: data.Progress * 100,
|
Progress: data.Progress * 100,
|
||||||
Status: getTorboxStatus(data.DownloadState, data.DownloadFinished),
|
Status: tb.getTorboxStatus(data.DownloadState, data.DownloadFinished),
|
||||||
Speed: data.DownloadSpeed,
|
Speed: data.DownloadSpeed,
|
||||||
Seeders: data.Seeds,
|
Seeders: data.Seeds,
|
||||||
Filename: data.Name,
|
Filename: data.Name,
|
||||||
OriginalFilename: data.Name,
|
OriginalFilename: data.Name,
|
||||||
MountPath: tb.MountPath,
|
MountPath: tb.MountPath,
|
||||||
Debrid: tb.Name,
|
Debrid: tb.name,
|
||||||
Files: make(map[string]types.File),
|
Files: make(map[string]types.File),
|
||||||
Added: data.CreatedAt.Format(time.RFC3339),
|
Added: data.CreatedAt.Format(time.RFC3339),
|
||||||
}
|
}
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
|
|
||||||
|
totalFiles := 0
|
||||||
|
skippedSamples := 0
|
||||||
|
skippedFileType := 0
|
||||||
|
skippedSize := 0
|
||||||
|
validFiles := 0
|
||||||
|
filesWithLinks := 0
|
||||||
|
|
||||||
for _, f := range data.Files {
|
for _, f := range data.Files {
|
||||||
|
totalFiles++
|
||||||
fileName := filepath.Base(f.Name)
|
fileName := filepath.Base(f.Name)
|
||||||
|
|
||||||
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
|
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
|
||||||
// Skip sample files
|
skippedSamples++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !cfg.IsAllowedFile(fileName) {
|
if !cfg.IsAllowedFile(fileName) {
|
||||||
|
skippedFileType++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cfg.IsSizeAllowed(f.Size) {
|
if !cfg.IsSizeAllowed(f.Size) {
|
||||||
|
skippedSize++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
validFiles++
|
||||||
file := types.File{
|
file := types.File{
|
||||||
TorrentId: t.Id,
|
TorrentId: t.Id,
|
||||||
Id: strconv.Itoa(f.Id),
|
Id: strconv.Itoa(f.Id),
|
||||||
Name: fileName,
|
Name: fileName,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Path: fileName,
|
Path: f.Name,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For downloaded torrents, set a placeholder link to indicate file is available
|
||||||
|
if data.DownloadFinished {
|
||||||
|
file.Link = fmt.Sprintf("torbox://%s/%d", t.Id, f.Id)
|
||||||
|
filesWithLinks++
|
||||||
|
}
|
||||||
|
|
||||||
t.Files[fileName] = file
|
t.Files[fileName] = file
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Log summary only if there are issues or for debugging
|
||||||
|
tb.logger.Debug().
|
||||||
|
Str("torrent_id", t.Id).
|
||||||
|
Str("torrent_name", t.Name).
|
||||||
|
Bool("download_finished", data.DownloadFinished).
|
||||||
|
Str("status", t.Status).
|
||||||
|
Int("total_files", totalFiles).
|
||||||
|
Int("valid_files", validFiles).
|
||||||
|
Int("final_file_count", len(t.Files)).
|
||||||
|
Msg("Torrent file processing completed")
|
||||||
var cleanPath string
|
var cleanPath string
|
||||||
if len(t.Files) > 0 {
|
if len(t.Files) > 0 {
|
||||||
cleanPath = path.Clean(data.Files[0].Name)
|
cleanPath = path.Clean(data.Files[0].Name)
|
||||||
@@ -246,7 +284,7 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
||||||
t.Debrid = tb.Name
|
t.Debrid = tb.name
|
||||||
|
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
@@ -265,24 +303,33 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
|
|||||||
}
|
}
|
||||||
data := res.Data
|
data := res.Data
|
||||||
name := data.Name
|
name := data.Name
|
||||||
|
|
||||||
t.Name = name
|
t.Name = name
|
||||||
t.Bytes = data.Size
|
t.Bytes = data.Size
|
||||||
t.Folder = name
|
t.Folder = name
|
||||||
t.Progress = data.Progress * 100
|
t.Progress = data.Progress * 100
|
||||||
t.Status = getTorboxStatus(data.DownloadState, data.DownloadFinished)
|
t.Status = tb.getTorboxStatus(data.DownloadState, data.DownloadFinished)
|
||||||
t.Speed = data.DownloadSpeed
|
t.Speed = data.DownloadSpeed
|
||||||
t.Seeders = data.Seeds
|
t.Seeders = data.Seeds
|
||||||
t.Filename = name
|
t.Filename = name
|
||||||
t.OriginalFilename = name
|
t.OriginalFilename = name
|
||||||
t.MountPath = tb.MountPath
|
t.MountPath = tb.MountPath
|
||||||
t.Debrid = tb.Name
|
t.Debrid = tb.name
|
||||||
|
|
||||||
|
// Clear existing files map to rebuild it
|
||||||
|
t.Files = make(map[string]types.File)
|
||||||
|
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
|
validFiles := 0
|
||||||
|
filesWithLinks := 0
|
||||||
|
|
||||||
for _, f := range data.Files {
|
for _, f := range data.Files {
|
||||||
fileName := filepath.Base(f.Name)
|
fileName := filepath.Base(f.Name)
|
||||||
|
|
||||||
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
|
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
|
||||||
// Skip sample files
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cfg.IsAllowedFile(fileName) {
|
if !cfg.IsAllowedFile(fileName) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -290,6 +337,8 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
|
|||||||
if !cfg.IsSizeAllowed(f.Size) {
|
if !cfg.IsSizeAllowed(f.Size) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
validFiles++
|
||||||
file := types.File{
|
file := types.File{
|
||||||
TorrentId: t.Id,
|
TorrentId: t.Id,
|
||||||
Id: strconv.Itoa(f.Id),
|
Id: strconv.Itoa(f.Id),
|
||||||
@@ -297,8 +346,16 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
|
|||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Path: fileName,
|
Path: fileName,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For downloaded torrents, set a placeholder link to indicate file is available
|
||||||
|
if data.DownloadFinished {
|
||||||
|
file.Link = fmt.Sprintf("torbox://%s/%s", t.Id, strconv.Itoa(f.Id))
|
||||||
|
filesWithLinks++
|
||||||
|
}
|
||||||
|
|
||||||
t.Files[fileName] = file
|
t.Files[fileName] = file
|
||||||
}
|
}
|
||||||
|
|
||||||
var cleanPath string
|
var cleanPath string
|
||||||
if len(t.Files) > 0 {
|
if len(t.Files) > 0 {
|
||||||
cleanPath = path.Clean(data.Files[0].Name)
|
cleanPath = path.Clean(data.Files[0].Name)
|
||||||
@@ -307,11 +364,11 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
||||||
t.Debrid = tb.Name
|
t.Debrid = tb.name
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
|
func (tb *Torbox) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
|
||||||
for {
|
for {
|
||||||
err := tb.UpdateTorrent(torrent)
|
err := tb.UpdateTorrent(torrent)
|
||||||
|
|
||||||
@@ -321,13 +378,7 @@ func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.To
|
|||||||
status := torrent.Status
|
status := torrent.Status
|
||||||
if status == "downloaded" {
|
if status == "downloaded" {
|
||||||
tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||||
if !isSymlink {
|
return torrent, nil
|
||||||
err = tb.GenerateDownloadLinks(torrent)
|
|
||||||
if err != nil {
|
|
||||||
return torrent, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break
|
|
||||||
} else if utils.Contains(tb.GetDownloadingStatus(), status) {
|
} else if utils.Contains(tb.GetDownloadingStatus(), status) {
|
||||||
if !torrent.DownloadUncached {
|
if !torrent.DownloadUncached {
|
||||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
@@ -340,7 +391,6 @@ func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.To
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return torrent, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) DeleteTorrent(torrentId string) error {
|
func (tb *Torbox) DeleteTorrent(torrentId string) error {
|
||||||
@@ -355,8 +405,9 @@ func (tb *Torbox) DeleteTorrent(torrentId string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error {
|
func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
|
||||||
filesCh := make(chan types.File, len(t.Files))
|
filesCh := make(chan types.File, len(t.Files))
|
||||||
|
linkCh := make(chan *types.DownloadLink)
|
||||||
errCh := make(chan error, len(t.Files))
|
errCh := make(chan error, len(t.Files))
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@@ -369,13 +420,17 @@ func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error {
|
|||||||
errCh <- err
|
errCh <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
file.DownloadLink = link
|
if link != nil {
|
||||||
|
linkCh <- link
|
||||||
|
file.DownloadLink = link
|
||||||
|
}
|
||||||
filesCh <- file
|
filesCh <- file
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
close(filesCh)
|
close(filesCh)
|
||||||
|
close(linkCh)
|
||||||
close(errCh)
|
close(errCh)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -385,6 +440,13 @@ func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error {
|
|||||||
files[file.Name] = file
|
files[file.Name] = file
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Collect download links
|
||||||
|
for link := range linkCh {
|
||||||
|
if link != nil {
|
||||||
|
tb.accounts.SetDownloadLink(link.Link, link)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check for errors
|
// Check for errors
|
||||||
for err := range errCh {
|
for err := range errCh {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -403,48 +465,153 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
|
|||||||
query.Add("token", tb.APIKey)
|
query.Add("token", tb.APIKey)
|
||||||
query.Add("file_id", file.Id)
|
query.Add("file_id", file.Id)
|
||||||
url += "?" + query.Encode()
|
url += "?" + query.Encode()
|
||||||
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := tb.client.MakeRequest(req)
|
resp, err := tb.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
tb.logger.Error().
|
||||||
|
Err(err).
|
||||||
|
Str("torrent_id", t.Id).
|
||||||
|
Str("file_id", file.Id).
|
||||||
|
Msg("Failed to make request to Torbox API")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var data DownloadLinksResponse
|
var data DownloadLinksResponse
|
||||||
if err = json.Unmarshal(resp, &data); err != nil {
|
if err = json.Unmarshal(resp, &data); err != nil {
|
||||||
|
tb.logger.Error().
|
||||||
|
Err(err).
|
||||||
|
Str("torrent_id", t.Id).
|
||||||
|
Str("file_id", file.Id).
|
||||||
|
Msg("Failed to unmarshal Torbox API response")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if data.Data == nil {
|
if data.Data == nil {
|
||||||
|
tb.logger.Error().
|
||||||
|
Str("torrent_id", t.Id).
|
||||||
|
Str("file_id", file.Id).
|
||||||
|
Bool("success", data.Success).
|
||||||
|
Interface("error", data.Error).
|
||||||
|
Str("detail", data.Detail).
|
||||||
|
Msg("Torbox API returned no data")
|
||||||
return nil, fmt.Errorf("error getting download links")
|
return nil, fmt.Errorf("error getting download links")
|
||||||
}
|
}
|
||||||
|
|
||||||
link := *data.Data
|
link := *data.Data
|
||||||
if link == "" {
|
if link == "" {
|
||||||
|
tb.logger.Error().
|
||||||
|
Str("torrent_id", t.Id).
|
||||||
|
Str("file_id", file.Id).
|
||||||
|
Msg("Torbox API returned empty download link")
|
||||||
return nil, fmt.Errorf("error getting download links")
|
return nil, fmt.Errorf("error getting download links")
|
||||||
}
|
}
|
||||||
return &types.DownloadLink{
|
|
||||||
|
now := time.Now()
|
||||||
|
downloadLink := &types.DownloadLink{
|
||||||
Link: file.Link,
|
Link: file.Link,
|
||||||
DownloadLink: link,
|
DownloadLink: link,
|
||||||
Id: file.Id,
|
Id: file.Id,
|
||||||
AccountId: "0",
|
Generated: now,
|
||||||
Generated: time.Now(),
|
ExpiresAt: now.Add(tb.autoExpiresLinksAfter),
|
||||||
}, nil
|
}
|
||||||
|
|
||||||
|
return downloadLink, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetDownloadingStatus() []string {
|
func (tb *Torbox) GetDownloadingStatus() []string {
|
||||||
return []string{"downloading"}
|
return []string{"downloading"}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetCheckCached() bool {
|
|
||||||
return tb.checkCached
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
|
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
|
||||||
return nil, nil
|
url := fmt.Sprintf("%s/api/torrents/mylist", tb.Host)
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := tb.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var res TorrentsListResponse
|
||||||
|
err = json.Unmarshal(resp, &res)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !res.Success || res.Data == nil {
|
||||||
|
return nil, fmt.Errorf("torbox API error: %v", res.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
torrents := make([]*types.Torrent, 0, len(*res.Data))
|
||||||
|
cfg := config.Get()
|
||||||
|
|
||||||
|
for _, data := range *res.Data {
|
||||||
|
t := &types.Torrent{
|
||||||
|
Id: strconv.Itoa(data.Id),
|
||||||
|
Name: data.Name,
|
||||||
|
Bytes: data.Size,
|
||||||
|
Folder: data.Name,
|
||||||
|
Progress: data.Progress * 100,
|
||||||
|
Status: tb.getTorboxStatus(data.DownloadState, data.DownloadFinished),
|
||||||
|
Speed: data.DownloadSpeed,
|
||||||
|
Seeders: data.Seeds,
|
||||||
|
Filename: data.Name,
|
||||||
|
OriginalFilename: data.Name,
|
||||||
|
MountPath: tb.MountPath,
|
||||||
|
Debrid: tb.name,
|
||||||
|
Files: make(map[string]types.File),
|
||||||
|
Added: data.CreatedAt.Format(time.RFC3339),
|
||||||
|
InfoHash: data.Hash,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process files
|
||||||
|
for _, f := range data.Files {
|
||||||
|
fileName := filepath.Base(f.Name)
|
||||||
|
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
|
||||||
|
// Skip sample files
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !cfg.IsAllowedFile(fileName) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !cfg.IsSizeAllowed(f.Size) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
file := types.File{
|
||||||
|
TorrentId: t.Id,
|
||||||
|
Id: strconv.Itoa(f.Id),
|
||||||
|
Name: fileName,
|
||||||
|
Size: f.Size,
|
||||||
|
Path: f.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
// For downloaded torrents, set a placeholder link to indicate file is available
|
||||||
|
if data.DownloadFinished {
|
||||||
|
file.Link = fmt.Sprintf("torbox://%s/%d", t.Id, f.Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Files[fileName] = file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set original filename based on first file or torrent name
|
||||||
|
var cleanPath string
|
||||||
|
if len(t.Files) > 0 {
|
||||||
|
cleanPath = path.Clean(data.Files[0].Name)
|
||||||
|
} else {
|
||||||
|
cleanPath = path.Clean(data.Name)
|
||||||
|
}
|
||||||
|
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
||||||
|
|
||||||
|
torrents = append(torrents, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return torrents, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetDownloadUncached() bool {
|
func (tb *Torbox) GetDownloadUncached() bool {
|
||||||
return tb.DownloadUncached
|
return tb.DownloadUncached
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetDownloads() (map[string]types.DownloadLink, error) {
|
func (tb *Torbox) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -456,13 +623,15 @@ func (tb *Torbox) GetMountPath() string {
|
|||||||
return tb.MountPath
|
return tb.MountPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) DisableAccount(accountId string) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *Torbox) ResetActiveDownloadKeys() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *Torbox) DeleteDownloadLink(linkId string) error {
|
func (tb *Torbox) DeleteDownloadLink(linkId string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tb *Torbox) GetAvailableSlots() (int, error) {
|
||||||
|
//TODO: Implement the logic to check available slots for Torbox
|
||||||
|
return 0, fmt.Errorf("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *Torbox) Accounts() *types.Accounts {
|
||||||
|
return tb.accounts
|
||||||
|
}
|
||||||
@@ -57,7 +57,7 @@ type torboxInfo struct {
|
|||||||
} `json:"files"`
|
} `json:"files"`
|
||||||
DownloadPath string `json:"download_path"`
|
DownloadPath string `json:"download_path"`
|
||||||
InactiveCheck int `json:"inactive_check"`
|
InactiveCheck int `json:"inactive_check"`
|
||||||
Availability int `json:"availability"`
|
Availability float64 `json:"availability"`
|
||||||
DownloadFinished bool `json:"download_finished"`
|
DownloadFinished bool `json:"download_finished"`
|
||||||
Tracker interface{} `json:"tracker"`
|
Tracker interface{} `json:"tracker"`
|
||||||
TotalUploaded int `json:"total_uploaded"`
|
TotalUploaded int `json:"total_uploaded"`
|
||||||
@@ -73,3 +73,5 @@ type torboxInfo struct {
|
|||||||
type InfoResponse APIResponse[torboxInfo]
|
type InfoResponse APIResponse[torboxInfo]
|
||||||
|
|
||||||
type DownloadLinksResponse APIResponse[string]
|
type DownloadLinksResponse APIResponse[string]
|
||||||
|
|
||||||
|
type TorrentsListResponse APIResponse[[]torboxInfo]
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package debrid
|
package store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/rclone"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -16,13 +17,16 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
_ "time/tzdata"
|
||||||
|
|
||||||
"github.com/go-co-op/gocron/v2"
|
"github.com/go-co-op/gocron/v2"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/decypharr/internal/config"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/decypharr/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type WebDavFolderNaming string
|
type WebDavFolderNaming string
|
||||||
@@ -72,7 +76,6 @@ type Cache struct {
|
|||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
|
|
||||||
torrents *torrentCache
|
torrents *torrentCache
|
||||||
downloadLinks *downloadLinkCache
|
|
||||||
invalidDownloadLinks sync.Map
|
invalidDownloadLinks sync.Map
|
||||||
folderNaming WebDavFolderNaming
|
folderNaming WebDavFolderNaming
|
||||||
|
|
||||||
@@ -89,10 +92,9 @@ type Cache struct {
|
|||||||
ready chan struct{}
|
ready chan struct{}
|
||||||
|
|
||||||
// config
|
// config
|
||||||
workers int
|
workers int
|
||||||
torrentRefreshInterval string
|
torrentRefreshInterval string
|
||||||
downloadLinksRefreshInterval string
|
downloadLinksRefreshInterval string
|
||||||
autoExpiresLinksAfterDuration time.Duration
|
|
||||||
|
|
||||||
// refresh mutex
|
// refresh mutex
|
||||||
downloadLinksRefreshMu sync.RWMutex // for refreshing download links
|
downloadLinksRefreshMu sync.RWMutex // for refreshing download links
|
||||||
@@ -105,18 +107,29 @@ type Cache struct {
|
|||||||
|
|
||||||
config config.Debrid
|
config config.Debrid
|
||||||
customFolders []string
|
customFolders []string
|
||||||
|
mounter *rclone.Mount
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dc config.Debrid, client types.Client) *Cache {
|
func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount) *Cache {
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
cet, _ := time.LoadLocation("CET")
|
cet, err := time.LoadLocation("CET")
|
||||||
cetSc, _ := gocron.NewScheduler(gocron.WithLocation(cet))
|
if err != nil {
|
||||||
scheduler, _ := gocron.NewScheduler(gocron.WithLocation(time.Local))
|
cet, err = time.LoadLocation("Europe/Berlin") // Fallback to Berlin if CET fails
|
||||||
|
if err != nil {
|
||||||
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
|
cet = time.FixedZone("CET", 1*60*60) // Fallback to a fixed CET zone
|
||||||
if autoExpiresLinksAfter == 0 || err != nil {
|
}
|
||||||
autoExpiresLinksAfter = 48 * time.Hour
|
|
||||||
}
|
}
|
||||||
|
cetSc, err := gocron.NewScheduler(gocron.WithLocation(cet))
|
||||||
|
if err != nil {
|
||||||
|
// If we can't create a CET scheduler, fallback to local time
|
||||||
|
cetSc, _ = gocron.NewScheduler(gocron.WithLocation(time.Local))
|
||||||
|
}
|
||||||
|
scheduler, err := gocron.NewScheduler(gocron.WithLocation(time.Local))
|
||||||
|
if err != nil {
|
||||||
|
// If we can't create a local scheduler, fallback to CET
|
||||||
|
scheduler = cetSc
|
||||||
|
}
|
||||||
|
|
||||||
var customFolders []string
|
var customFolders []string
|
||||||
dirFilters := map[string][]directoryFilter{}
|
dirFilters := map[string][]directoryFilter{}
|
||||||
for name, value := range dc.Directories {
|
for name, value := range dc.Directories {
|
||||||
@@ -135,25 +148,24 @@ func New(dc config.Debrid, client types.Client) *Cache {
|
|||||||
customFolders = append(customFolders, name)
|
customFolders = append(customFolders, name)
|
||||||
|
|
||||||
}
|
}
|
||||||
_log := logger.New(fmt.Sprintf("%s-webdav", client.GetName()))
|
_log := logger.New(fmt.Sprintf("%s-webdav", client.Name()))
|
||||||
c := &Cache{
|
c := &Cache{
|
||||||
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
|
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
|
||||||
|
|
||||||
torrents: newTorrentCache(dirFilters),
|
torrents: newTorrentCache(dirFilters),
|
||||||
client: client,
|
client: client,
|
||||||
logger: _log,
|
logger: _log,
|
||||||
workers: dc.Workers,
|
workers: dc.Workers,
|
||||||
downloadLinks: newDownloadLinkCache(),
|
torrentRefreshInterval: dc.TorrentsRefreshInterval,
|
||||||
torrentRefreshInterval: dc.TorrentsRefreshInterval,
|
downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval,
|
||||||
downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval,
|
folderNaming: WebDavFolderNaming(dc.FolderNaming),
|
||||||
folderNaming: WebDavFolderNaming(dc.FolderNaming),
|
saveSemaphore: make(chan struct{}, 50),
|
||||||
autoExpiresLinksAfterDuration: autoExpiresLinksAfter,
|
cetScheduler: cetSc,
|
||||||
saveSemaphore: make(chan struct{}, 50),
|
scheduler: scheduler,
|
||||||
cetScheduler: cetSc,
|
|
||||||
scheduler: scheduler,
|
|
||||||
|
|
||||||
config: dc,
|
config: dc,
|
||||||
customFolders: customFolders,
|
customFolders: customFolders,
|
||||||
|
mounter: mounter,
|
||||||
|
|
||||||
ready: make(chan struct{}),
|
ready: make(chan struct{}),
|
||||||
}
|
}
|
||||||
@@ -177,6 +189,15 @@ func (c *Cache) StreamWithRclone() bool {
|
|||||||
// and before you discard the instance on a restart.
|
// and before you discard the instance on a restart.
|
||||||
func (c *Cache) Reset() {
|
func (c *Cache) Reset() {
|
||||||
|
|
||||||
|
// Unmount first
|
||||||
|
if c.mounter != nil && c.mounter.IsMounted() {
|
||||||
|
if err := c.mounter.Unmount(); err != nil {
|
||||||
|
c.logger.Error().Err(err).Msgf("Failed to unmount %s", c.config.Name)
|
||||||
|
} else {
|
||||||
|
c.logger.Info().Msgf("Unmounted %s", c.config.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := c.scheduler.StopJobs(); err != nil {
|
if err := c.scheduler.StopJobs(); err != nil {
|
||||||
c.logger.Error().Err(err).Msg("Failed to stop scheduler jobs")
|
c.logger.Error().Err(err).Msg("Failed to stop scheduler jobs")
|
||||||
}
|
}
|
||||||
@@ -189,14 +210,13 @@ func (c *Cache) Reset() {
|
|||||||
c.listingDebouncer.Stop()
|
c.listingDebouncer.Stop()
|
||||||
|
|
||||||
// Close the repair channel
|
// Close the repair channel
|
||||||
close(c.repairChan)
|
if c.repairChan != nil {
|
||||||
|
close(c.repairChan)
|
||||||
|
}
|
||||||
|
|
||||||
// 1. Reset torrent storage
|
// 1. Reset torrent storage
|
||||||
c.torrents.reset()
|
c.torrents.reset()
|
||||||
|
|
||||||
// 2. Reset download-link cache
|
|
||||||
c.downloadLinks.reset()
|
|
||||||
|
|
||||||
// 3. Clear any sync.Maps
|
// 3. Clear any sync.Maps
|
||||||
c.invalidDownloadLinks = sync.Map{}
|
c.invalidDownloadLinks = sync.Map{}
|
||||||
c.repairRequest = sync.Map{}
|
c.repairRequest = sync.Map{}
|
||||||
@@ -213,6 +233,9 @@ func (c *Cache) Reset() {
|
|||||||
|
|
||||||
// 6. Reset repair channel so the next Start() can spin it up
|
// 6. Reset repair channel so the next Start() can spin it up
|
||||||
c.repairChan = make(chan RepairRequest, 100)
|
c.repairChan = make(chan RepairRequest, 100)
|
||||||
|
|
||||||
|
// Reset the ready channel
|
||||||
|
c.ready = make(chan struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) Start(ctx context.Context) error {
|
func (c *Cache) Start(ctx context.Context) error {
|
||||||
@@ -220,9 +243,14 @@ func (c *Cache) Start(ctx context.Context) error {
|
|||||||
return fmt.Errorf("failed to create cache directory: %w", err)
|
return fmt.Errorf("failed to create cache directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.logger.Info().Msgf("Started indexing...")
|
||||||
|
|
||||||
if err := c.Sync(ctx); err != nil {
|
if err := c.Sync(ctx); err != nil {
|
||||||
return fmt.Errorf("failed to sync cache: %w", err)
|
return fmt.Errorf("failed to sync cache: %w", err)
|
||||||
}
|
}
|
||||||
|
// Fire the ready channel
|
||||||
|
close(c.ready)
|
||||||
|
c.logger.Info().Msgf("Indexing complete, %d torrents loaded", len(c.torrents.getAll()))
|
||||||
|
|
||||||
// initial download links
|
// initial download links
|
||||||
go c.refreshDownloadLinks(ctx)
|
go c.refreshDownloadLinks(ctx)
|
||||||
@@ -231,20 +259,21 @@ func (c *Cache) Start(ctx context.Context) error {
|
|||||||
c.logger.Error().Err(err).Msg("Failed to start cache worker")
|
c.logger.Error().Err(err).Msg("Failed to start cache worker")
|
||||||
}
|
}
|
||||||
|
|
||||||
c.repairChan = make(chan RepairRequest, 100)
|
c.repairChan = make(chan RepairRequest, 100) // Initialize the repair channel, max 100 requests buffered
|
||||||
go c.repairWorker(ctx)
|
go c.repairWorker(ctx)
|
||||||
|
|
||||||
// Fire the ready channel
|
|
||||||
close(c.ready)
|
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
name := c.client.GetName()
|
name := c.client.Name()
|
||||||
addr := cfg.BindAddress + ":" + cfg.Port + cfg.URLBase + "webdav/" + name + "/"
|
addr := cfg.BindAddress + ":" + cfg.Port + cfg.URLBase + "webdav/" + name + "/"
|
||||||
c.logger.Info().Msgf("%s WebDav server running at %s", name, addr)
|
c.logger.Info().Msgf("%s WebDav server running at %s", name, addr)
|
||||||
|
|
||||||
<-ctx.Done()
|
if c.mounter != nil {
|
||||||
c.logger.Info().Msgf("Stopping %s WebDav server", name)
|
if err := c.mounter.Mount(ctx); err != nil {
|
||||||
c.Reset()
|
c.logger.Error().Err(err).Msgf("Failed to mount %s", c.config.Name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.logger.Warn().Msgf("Mounting is disabled for %s", c.config.Name)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -307,10 +336,10 @@ func (c *Cache) load(ctx context.Context) (map[string]CachedTorrent, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
isComplete := true
|
isComplete := true
|
||||||
if len(ct.Files) != 0 {
|
if len(ct.GetFiles()) != 0 {
|
||||||
// Check if all files are valid, if not, delete the file.json and remove from cache.
|
// Check if all files are valid, if not, delete the file.json and remove from cache.
|
||||||
fs := make(map[string]types.File, len(ct.Files))
|
fs := make(map[string]types.File, len(ct.GetFiles()))
|
||||||
for _, f := range ct.Files {
|
for _, f := range ct.GetFiles() {
|
||||||
if f.Link == "" {
|
if f.Link == "" {
|
||||||
isComplete = false
|
isComplete = false
|
||||||
break
|
break
|
||||||
@@ -368,7 +397,7 @@ func (c *Cache) Sync(ctx context.Context) error {
|
|||||||
|
|
||||||
totalTorrents := len(torrents)
|
totalTorrents := len(torrents)
|
||||||
|
|
||||||
c.logger.Info().Msgf("%d torrents found from %s", totalTorrents, c.client.GetName())
|
c.logger.Info().Msgf("%d torrents found from %s", totalTorrents, c.client.Name())
|
||||||
|
|
||||||
newTorrents := make([]*types.Torrent, 0)
|
newTorrents := make([]*types.Torrent, 0)
|
||||||
idStore := make(map[string]struct{}, totalTorrents)
|
idStore := make(map[string]struct{}, totalTorrents)
|
||||||
@@ -390,9 +419,11 @@ func (c *Cache) Sync(ctx context.Context) error {
|
|||||||
if len(deletedTorrents) > 0 {
|
if len(deletedTorrents) > 0 {
|
||||||
c.logger.Info().Msgf("Found %d deleted torrents", len(deletedTorrents))
|
c.logger.Info().Msgf("Found %d deleted torrents", len(deletedTorrents))
|
||||||
for _, id := range deletedTorrents {
|
for _, id := range deletedTorrents {
|
||||||
if _, ok := cachedTorrents[id]; ok {
|
// Remove from cache and debrid service
|
||||||
c.deleteTorrent(id, false) // delete from cache
|
delete(cachedTorrents, id)
|
||||||
}
|
// Remove the json file from disk
|
||||||
|
c.removeFile(id, false)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -505,9 +536,9 @@ func (c *Cache) setTorrent(t CachedTorrent, callback func(torrent CachedTorrent)
|
|||||||
updatedTorrent.Files = mergedFiles
|
updatedTorrent.Files = mergedFiles
|
||||||
}
|
}
|
||||||
c.torrents.set(torrentName, t, updatedTorrent)
|
c.torrents.set(torrentName, t, updatedTorrent)
|
||||||
c.SaveTorrent(t)
|
go c.SaveTorrent(t)
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
callback(updatedTorrent)
|
go callback(updatedTorrent)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -550,6 +581,10 @@ func (c *Cache) GetTorrents() map[string]CachedTorrent {
|
|||||||
return c.torrents.getAll()
|
return c.torrents.getAll()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cache) TotalTorrents() int {
|
||||||
|
return c.torrents.getAllCount()
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
|
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
|
||||||
if torrent, ok := c.torrents.getByName(name); ok {
|
if torrent, ok := c.torrents.getByName(name); ok {
|
||||||
return &torrent
|
return &torrent
|
||||||
@@ -557,6 +592,10 @@ func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetTorrentsName() map[string]CachedTorrent {
|
||||||
|
return c.torrents.getAllByName()
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cache) GetTorrent(torrentId string) *CachedTorrent {
|
func (c *Cache) GetTorrent(torrentId string) *CachedTorrent {
|
||||||
if torrent, ok := c.torrents.getByID(torrentId); ok {
|
if torrent, ok := c.torrents.getByID(torrentId); ok {
|
||||||
return &torrent
|
return &torrent
|
||||||
@@ -665,8 +704,13 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !isComplete(t.Files) {
|
if !isComplete(t.Files) {
|
||||||
c.logger.Debug().Msgf("Torrent %s is still not complete. Triggering a reinsert(disabled)", t.Id)
|
c.logger.Debug().
|
||||||
|
Str("torrent_id", t.Id).
|
||||||
|
Str("torrent_name", t.Name).
|
||||||
|
Int("total_files", len(t.Files)).
|
||||||
|
Msg("Torrent still not complete after refresh")
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
addedOn, err := time.Parse(time.RFC3339, t.Added)
|
addedOn, err := time.Parse(time.RFC3339, t.Added)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
addedOn = time.Now()
|
addedOn = time.Now()
|
||||||
@@ -683,8 +727,9 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) AddTorrent(t *types.Torrent) error {
|
func (c *Cache) Add(t *types.Torrent) error {
|
||||||
if len(t.Files) == 0 {
|
if len(t.Files) == 0 {
|
||||||
|
c.logger.Warn().Msgf("Torrent %s has no files to add. Refreshing", t.Id)
|
||||||
if err := c.client.UpdateTorrent(t); err != nil {
|
if err := c.client.UpdateTorrent(t); err != nil {
|
||||||
return fmt.Errorf("failed to update torrent: %w", err)
|
return fmt.Errorf("failed to update torrent: %w", err)
|
||||||
}
|
}
|
||||||
@@ -701,12 +746,12 @@ func (c *Cache) AddTorrent(t *types.Torrent) error {
|
|||||||
c.setTorrent(ct, func(tor CachedTorrent) {
|
c.setTorrent(ct, func(tor CachedTorrent) {
|
||||||
c.RefreshListings(true)
|
c.RefreshListings(true)
|
||||||
})
|
})
|
||||||
go c.GenerateDownloadLinks(ct)
|
go c.GetFileDownloadLinks(ct)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) GetClient() types.Client {
|
func (c *Cache) Client() types.Client {
|
||||||
return c.client
|
return c.client
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -744,19 +789,19 @@ func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool {
|
|||||||
if torrent, ok := c.torrents.getByID(id); ok {
|
if torrent, ok := c.torrents.getByID(id); ok {
|
||||||
c.torrents.removeId(id) // Delete id from cache
|
c.torrents.removeId(id) // Delete id from cache
|
||||||
defer func() {
|
defer func() {
|
||||||
c.removeFromDB(id)
|
c.removeFile(id, false)
|
||||||
if removeFromDebrid {
|
if removeFromDebrid {
|
||||||
_ = c.client.DeleteTorrent(id) // Skip error handling, we don't care if it fails
|
_ = c.client.DeleteTorrent(id) // Skip error handling, we don't care if it fails
|
||||||
}
|
}
|
||||||
}() // defer delete from debrid
|
}() // defer delete from debrid
|
||||||
|
|
||||||
torrentName := torrent.Name
|
torrentName := c.GetTorrentFolder(torrent.Torrent)
|
||||||
|
|
||||||
if t, ok := c.torrents.getByName(torrentName); ok {
|
if t, ok := c.torrents.getByName(torrentName); ok {
|
||||||
|
|
||||||
newFiles := map[string]types.File{}
|
newFiles := map[string]types.File{}
|
||||||
newId := ""
|
newId := ""
|
||||||
for _, file := range t.Files {
|
for _, file := range t.GetFiles() {
|
||||||
if file.TorrentId != "" && file.TorrentId != id {
|
if file.TorrentId != "" && file.TorrentId != id {
|
||||||
if newId == "" && file.TorrentId != "" {
|
if newId == "" && file.TorrentId != "" {
|
||||||
newId = file.TorrentId
|
newId = file.TorrentId
|
||||||
@@ -787,7 +832,7 @@ func (c *Cache) DeleteTorrents(ids []string) {
|
|||||||
c.listingDebouncer.Call(true)
|
c.listingDebouncer.Call(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) removeFromDB(torrentId string) {
|
func (c *Cache) removeFile(torrentId string, moveToTrash bool) {
|
||||||
// Moves the torrent file to the trash
|
// Moves the torrent file to the trash
|
||||||
filePath := filepath.Join(c.dir, torrentId+".json")
|
filePath := filepath.Join(c.dir, torrentId+".json")
|
||||||
|
|
||||||
@@ -796,6 +841,14 @@ func (c *Cache) removeFromDB(torrentId string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !moveToTrash {
|
||||||
|
// If not moving to trash, delete the file directly
|
||||||
|
if err := os.Remove(filePath); err != nil {
|
||||||
|
c.logger.Error().Err(err).Msgf("Failed to remove file: %s", filePath)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
// Move the file to the trash
|
// Move the file to the trash
|
||||||
trashPath := filepath.Join(c.dir, "trash", torrentId+".json")
|
trashPath := filepath.Join(c.dir, "trash", torrentId+".json")
|
||||||
if err := os.MkdirAll(filepath.Dir(trashPath), 0755); err != nil {
|
if err := os.MkdirAll(filepath.Dir(trashPath), 0755); err != nil {
|
||||||
@@ -815,6 +868,40 @@ func (c *Cache) OnRemove(torrentId string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) GetLogger() zerolog.Logger {
|
// RemoveFile removes a file from the torrent cache
|
||||||
|
// TODO sends a re-insert that removes the file from debrid
|
||||||
|
func (c *Cache) RemoveFile(torrentId string, filename string) error {
|
||||||
|
c.logger.Debug().Str("torrent_id", torrentId).Msgf("Removing file %s", filename)
|
||||||
|
torrent, ok := c.torrents.getByID(torrentId)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("torrent %s not found", torrentId)
|
||||||
|
}
|
||||||
|
file, ok := torrent.GetFile(filename)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("file %s not found in torrent %s", filename, torrentId)
|
||||||
|
}
|
||||||
|
file.Deleted = true
|
||||||
|
torrent.Files[filename] = file
|
||||||
|
|
||||||
|
// If the torrent has no files left, delete it
|
||||||
|
if len(torrent.GetFiles()) == 0 {
|
||||||
|
c.logger.Debug().Msgf("Torrent %s has no files left, deleting it", torrentId)
|
||||||
|
if err := c.DeleteTorrent(torrentId); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete torrent %s: %w", torrentId, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c.setTorrent(torrent, func(torrent CachedTorrent) {
|
||||||
|
c.listingDebouncer.Call(true)
|
||||||
|
}) // Update the torrent in the cache
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) Logger() zerolog.Logger {
|
||||||
return c.logger
|
return c.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetConfig() config.Debrid {
|
||||||
|
return c.config
|
||||||
|
}
|
||||||
198
pkg/debrid/store/download_link.go
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type downloadLinkRequest struct {
|
||||||
|
result string
|
||||||
|
err error
|
||||||
|
done chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDownloadLinkRequest() *downloadLinkRequest {
|
||||||
|
return &downloadLinkRequest{
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *downloadLinkRequest) Complete(result string, err error) {
|
||||||
|
r.result = result
|
||||||
|
r.err = err
|
||||||
|
close(r.done)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *downloadLinkRequest) Wait() (string, error) {
|
||||||
|
<-r.done
|
||||||
|
return r.result, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
|
||||||
|
// Check link cache
|
||||||
|
if dl, err := c.checkDownloadLink(fileLink); dl != "" && err == nil {
|
||||||
|
return dl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight {
|
||||||
|
// Wait for the other request to complete and use its result
|
||||||
|
result := req.(*downloadLinkRequest)
|
||||||
|
return result.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new request object
|
||||||
|
req := newDownloadLinkRequest()
|
||||||
|
c.downloadLinkRequests.Store(fileLink, req)
|
||||||
|
|
||||||
|
dl, err := c.fetchDownloadLink(torrentName, filename, fileLink)
|
||||||
|
if err != nil {
|
||||||
|
req.Complete("", err)
|
||||||
|
c.downloadLinkRequests.Delete(fileLink)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dl == nil || dl.DownloadLink == "" {
|
||||||
|
err = fmt.Errorf("download link is empty for %s in torrent %s", filename, torrentName)
|
||||||
|
req.Complete("", err)
|
||||||
|
c.downloadLinkRequests.Delete(fileLink)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
req.Complete(dl.DownloadLink, err)
|
||||||
|
c.downloadLinkRequests.Delete(fileLink)
|
||||||
|
return dl.DownloadLink, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*types.DownloadLink, error) {
|
||||||
|
ct := c.GetTorrentByName(torrentName)
|
||||||
|
if ct == nil {
|
||||||
|
return nil, fmt.Errorf("torrent not found")
|
||||||
|
}
|
||||||
|
file, ok := ct.GetFile(filename)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("file %s not found in torrent %s", filename, torrentName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if file.Link == "" {
|
||||||
|
// file link is empty, refresh the torrent to get restricted links
|
||||||
|
ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid
|
||||||
|
if ct == nil {
|
||||||
|
return nil, fmt.Errorf("failed to refresh torrent")
|
||||||
|
} else {
|
||||||
|
file, ok = ct.GetFile(filename)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If file.Link is still empty, return
|
||||||
|
if file.Link == "" {
|
||||||
|
// Try to reinsert the torrent?
|
||||||
|
newCt, err := c.reInsertTorrent(ct)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to reinsert torrent. %w", err)
|
||||||
|
}
|
||||||
|
ct = newCt
|
||||||
|
file, ok = ct.GetFile(filename)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
|
||||||
|
downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, utils.HosterUnavailableError) {
|
||||||
|
c.logger.Trace().
|
||||||
|
Str("filename", filename).
|
||||||
|
Str("torrent_id", ct.Id).
|
||||||
|
Msg("Hoster unavailable, attempting to reinsert torrent")
|
||||||
|
|
||||||
|
newCt, err := c.reInsertTorrent(ct)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to reinsert torrent: %w", err)
|
||||||
|
}
|
||||||
|
ct = newCt
|
||||||
|
file, ok = ct.GetFile(filename)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
|
||||||
|
}
|
||||||
|
// Retry getting the download link
|
||||||
|
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("retry failed to get download link: %w", err)
|
||||||
|
}
|
||||||
|
if downloadLink == nil {
|
||||||
|
return nil, fmt.Errorf("download link is empty after retry")
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
} else if errors.Is(err, utils.TrafficExceededError) {
|
||||||
|
// This is likely a fair usage limit error
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("failed to get download link: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if downloadLink == nil {
|
||||||
|
return nil, fmt.Errorf("download link is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set link to cache
|
||||||
|
go c.client.Accounts().SetDownloadLink(fileLink, downloadLink)
|
||||||
|
return downloadLink, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetFileDownloadLinks(t CachedTorrent) {
|
||||||
|
if err := c.client.GetFileDownloadLinks(t.Torrent); err != nil {
|
||||||
|
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("Failed to generate download links")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) checkDownloadLink(link string) (string, error) {
|
||||||
|
|
||||||
|
dl, err := c.client.Accounts().GetDownloadLink(link)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if !c.downloadLinkIsInvalid(dl.DownloadLink) {
|
||||||
|
return dl.DownloadLink, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("download link not found for %s", link)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
|
||||||
|
c.invalidDownloadLinks.Store(downloadLink, reason)
|
||||||
|
// Remove the download api key from active
|
||||||
|
if reason == "bandwidth_exceeded" {
|
||||||
|
// Disable the account
|
||||||
|
_, account, err := c.client.Accounts().GetDownloadLinkWithAccount(link)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.client.Accounts().Disable(account)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) downloadLinkIsInvalid(downloadLink string) bool {
|
||||||
|
if reason, ok := c.invalidDownloadLinks.Load(downloadLink); ok {
|
||||||
|
c.logger.Debug().Msgf("Download link %s is invalid: %s", downloadLink, reason)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, error) {
|
||||||
|
ct := c.GetTorrentByName(torrentName)
|
||||||
|
if ct == nil {
|
||||||
|
return nil, fmt.Errorf("torrent not found")
|
||||||
|
}
|
||||||
|
file := ct.Files[filename]
|
||||||
|
return file.ByteRange, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetTotalActiveDownloadLinks() int {
|
||||||
|
return c.client.Accounts().GetLinksCount()
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package debrid
|
package store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
@@ -19,9 +19,24 @@ func mergeFiles(torrents ...CachedTorrent) map[string]types.File {
|
|||||||
})
|
})
|
||||||
|
|
||||||
for _, torrent := range torrents {
|
for _, torrent := range torrents {
|
||||||
for _, file := range torrent.Files {
|
for _, file := range torrent.GetFiles() {
|
||||||
merged[file.Name] = file
|
merged[file.Name] = file
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return merged
|
return merged
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetIngests() ([]types.IngestData, error) {
|
||||||
|
torrents := c.GetTorrents()
|
||||||
|
debridName := c.client.Name()
|
||||||
|
var ingests []types.IngestData
|
||||||
|
for _, torrent := range torrents {
|
||||||
|
ingests = append(ingests, types.IngestData{
|
||||||
|
Debrid: debridName,
|
||||||
|
Name: torrent.Filename,
|
||||||
|
Hash: torrent.InfoHash,
|
||||||
|
Size: torrent.Bytes,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ingests, nil
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package debrid
|
package store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -127,76 +127,78 @@ func (c *Cache) refreshTorrents(ctx context.Context) {
|
|||||||
|
|
||||||
func (c *Cache) refreshRclone() error {
|
func (c *Cache) refreshRclone() error {
|
||||||
cfg := c.config
|
cfg := c.config
|
||||||
|
|
||||||
if cfg.RcUrl == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.RcUrl == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
client := &http.Client{
|
|
||||||
Timeout: 10 * time.Second,
|
|
||||||
Transport: &http.Transport{
|
|
||||||
MaxIdleConns: 10,
|
|
||||||
IdleConnTimeout: 30 * time.Second,
|
|
||||||
DisableCompression: false,
|
|
||||||
MaxIdleConnsPerHost: 5,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// Create form data
|
|
||||||
data := ""
|
|
||||||
dirs := strings.FieldsFunc(cfg.RcRefreshDirs, func(r rune) bool {
|
dirs := strings.FieldsFunc(cfg.RcRefreshDirs, func(r rune) bool {
|
||||||
return r == ',' || r == '&'
|
return r == ',' || r == '&'
|
||||||
})
|
})
|
||||||
if len(dirs) == 0 {
|
if len(dirs) == 0 {
|
||||||
data = "dir=__all__"
|
dirs = []string{"__all__"}
|
||||||
} else {
|
|
||||||
for index, dir := range dirs {
|
|
||||||
if dir != "" {
|
|
||||||
if index == 0 {
|
|
||||||
data += "dir=" + dir
|
|
||||||
} else {
|
|
||||||
data += "&dir" + fmt.Sprint(index+1) + "=" + dir
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if c.mounter != nil {
|
||||||
|
return c.mounter.RefreshDir(dirs)
|
||||||
|
} else {
|
||||||
|
return c.refreshRcloneWithRC(dirs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
sendRequest := func(endpoint string) error {
|
func (c *Cache) refreshRcloneWithRC(dirs []string) error {
|
||||||
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", cfg.RcUrl, endpoint), strings.NewReader(data))
|
cfg := c.config
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
if cfg.RcUrl == "" {
|
||||||
|
|
||||||
if cfg.RcUser != "" && cfg.RcPass != "" {
|
|
||||||
req.SetBasicAuth(cfg.RcUser, cfg.RcPass)
|
|
||||||
}
|
|
||||||
resp, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
|
||||||
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _ = io.Copy(io.Discard, resp.Body)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sendRequest("vfs/forget"); err != nil {
|
client := http.DefaultClient
|
||||||
return err
|
// Create form data
|
||||||
|
data := c.buildRcloneRequestData(dirs)
|
||||||
|
|
||||||
|
if err := c.sendRcloneRequest(client, "vfs/forget", data); err != nil {
|
||||||
|
c.logger.Error().Err(err).Msg("Failed to send rclone vfs/forget request")
|
||||||
}
|
}
|
||||||
if err := sendRequest("vfs/refresh"); err != nil {
|
|
||||||
|
if err := c.sendRcloneRequest(client, "vfs/refresh", data); err != nil {
|
||||||
|
c.logger.Error().Err(err).Msg("Failed to send rclone vfs/refresh request")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) buildRcloneRequestData(dirs []string) string {
|
||||||
|
var data strings.Builder
|
||||||
|
for index, dir := range dirs {
|
||||||
|
if dir != "" {
|
||||||
|
if index == 0 {
|
||||||
|
data.WriteString("dir=" + dir)
|
||||||
|
} else {
|
||||||
|
data.WriteString("&dir" + fmt.Sprint(index+1) + "=" + dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return data.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) sendRcloneRequest(client *http.Client, endpoint, data string) error {
|
||||||
|
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", c.config.RcUrl, endpoint), strings.NewReader(data))
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
|
||||||
|
if c.config.RcUser != "" && c.config.RcPass != "" {
|
||||||
|
req.SetBasicAuth(c.config.RcUser, c.config.RcPass)
|
||||||
|
}
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||||
|
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _ = io.Copy(io.Discard, resp.Body)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -241,27 +243,14 @@ func (c *Cache) refreshDownloadLinks(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
defer c.downloadLinksRefreshMu.Unlock()
|
defer c.downloadLinksRefreshMu.Unlock()
|
||||||
|
|
||||||
downloadLinks, err := c.client.GetDownloads()
|
links, err := c.client.GetDownloadLinks()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Error().Err(err).Msg("Failed to get download links")
|
c.logger.Error().Err(err).Msg("Failed to get download links")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for k, v := range downloadLinks {
|
|
||||||
// if link is generated in the last 24 hours, add it to cache
|
|
||||||
timeSince := time.Since(v.Generated)
|
|
||||||
if timeSince < c.autoExpiresLinksAfterDuration {
|
|
||||||
c.downloadLinks.Store(k, linkCache{
|
|
||||||
Id: v.Id,
|
|
||||||
accountId: v.AccountId,
|
|
||||||
link: v.DownloadLink,
|
|
||||||
expiresAt: v.Generated.Add(c.autoExpiresLinksAfterDuration - timeSince),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
c.downloadLinks.Delete(k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.logger.Trace().Msgf("Refreshed %d download links", len(downloadLinks))
|
c.client.Accounts().SetDownloadLinks(links)
|
||||||
|
|
||||||
|
c.logger.Debug().Msgf("Refreshed download %d links", c.client.Accounts().GetLinksCount())
|
||||||
}
|
}
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
package debrid
|
package store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/sirrobot01/decypharr/internal/request"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -59,11 +59,10 @@ func (c *Cache) markAsSuccessfullyReinserted(torrentId string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool {
|
func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
|
||||||
// Check torrent files
|
|
||||||
|
|
||||||
isBroken := false
|
|
||||||
files := make(map[string]types.File)
|
files := make(map[string]types.File)
|
||||||
|
repairStrategy := config.Get().Repair.Strategy
|
||||||
|
brokenFiles := make([]string, 0)
|
||||||
if len(filenames) > 0 {
|
if len(filenames) > 0 {
|
||||||
for name, f := range t.Files {
|
for name, f := range t.Files {
|
||||||
if utils.Contains(filenames, name) {
|
if utils.Contains(filenames, name) {
|
||||||
@@ -73,8 +72,6 @@ func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool {
|
|||||||
} else {
|
} else {
|
||||||
files = t.Files
|
files = t.Files
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check empty links
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
// Check if file is missing
|
// Check if file is missing
|
||||||
if f.Link == "" {
|
if f.Link == "" {
|
||||||
@@ -83,44 +80,92 @@ func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool {
|
|||||||
t = newT
|
t = newT
|
||||||
} else {
|
} else {
|
||||||
c.logger.Error().Str("torrentId", t.Torrent.Id).Msg("Failed to refresh torrent")
|
c.logger.Error().Str("torrentId", t.Torrent.Id).Msg("Failed to refresh torrent")
|
||||||
return true
|
return filenames // Return original filenames if refresh fails(torrent is somehow botched)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.Torrent == nil {
|
if t.Torrent == nil {
|
||||||
c.logger.Error().Str("torrentId", t.Torrent.Id).Msg("Failed to refresh torrent")
|
c.logger.Error().Str("torrentId", t.Torrent.Id).Msg("Failed to refresh torrent")
|
||||||
return true
|
return filenames // Return original filenames if refresh fails(torrent is somehow botched)
|
||||||
}
|
}
|
||||||
|
|
||||||
files = t.Files
|
files = t.Files
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Use a mutex to protect brokenFiles slice and torrent-wide failure flag
|
||||||
|
var mu sync.Mutex
|
||||||
|
torrentWideFailed := false
|
||||||
|
|
||||||
|
wg.Add(len(files))
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
// Check if file link is still missing
|
go func(f types.File) {
|
||||||
if f.Link == "" {
|
defer wg.Done()
|
||||||
isBroken = true
|
|
||||||
break
|
select {
|
||||||
} else {
|
case <-ctx.Done():
|
||||||
// Check if file.Link not in the downloadLink Cache
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Link == "" {
|
||||||
|
mu.Lock()
|
||||||
|
if repairStrategy == config.RepairStrategyPerTorrent {
|
||||||
|
torrentWideFailed = true
|
||||||
|
mu.Unlock()
|
||||||
|
cancel() // Signal all other goroutines to stop
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
// per_file strategy - only mark this file as broken
|
||||||
|
brokenFiles = append(brokenFiles, f.Name)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if err := c.client.CheckLink(f.Link); err != nil {
|
if err := c.client.CheckLink(f.Link); err != nil {
|
||||||
if errors.Is(err, request.HosterUnavailableError) {
|
if errors.Is(err, utils.HosterUnavailableError) {
|
||||||
isBroken = true
|
mu.Lock()
|
||||||
break
|
if repairStrategy == config.RepairStrategyPerTorrent {
|
||||||
|
torrentWideFailed = true
|
||||||
|
mu.Unlock()
|
||||||
|
cancel() // Signal all other goroutines to stop
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
// per_file strategy - only mark this file as broken
|
||||||
|
brokenFiles = append(brokenFiles, f.Name)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Handle the result based on strategy
|
||||||
|
if repairStrategy == config.RepairStrategyPerTorrent && torrentWideFailed {
|
||||||
|
// Mark all files as broken for per_torrent strategy
|
||||||
|
for _, f := range files {
|
||||||
|
brokenFiles = append(brokenFiles, f.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// For per_file strategy, brokenFiles already contains only the broken ones
|
||||||
|
|
||||||
// Try to reinsert the torrent if it's broken
|
// Try to reinsert the torrent if it's broken
|
||||||
if isBroken && t.Torrent != nil {
|
if len(brokenFiles) > 0 && t.Torrent != nil {
|
||||||
// Check if the torrent is already in progress
|
// Check if the torrent is already in progress
|
||||||
if _, err := c.reInsertTorrent(t); err != nil {
|
if _, err := c.reInsertTorrent(t); err != nil {
|
||||||
c.logger.Error().Err(err).Str("torrentId", t.Torrent.Id).Msg("Failed to reinsert torrent")
|
c.logger.Error().Err(err).Str("torrentId", t.Torrent.Id).Msg("Failed to reinsert torrent")
|
||||||
return true
|
return brokenFiles // Return broken files if reinsert fails
|
||||||
}
|
}
|
||||||
return false
|
return nil // Return nil if the torrent was successfully reinserted
|
||||||
}
|
}
|
||||||
|
|
||||||
return isBroken
|
return brokenFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) repairWorker(ctx context.Context) {
|
func (c *Cache) repairWorker(ctx context.Context) {
|
||||||
@@ -208,7 +253,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
|
|||||||
return ct, fmt.Errorf("failed to submit magnet: empty torrent")
|
return ct, fmt.Errorf("failed to submit magnet: empty torrent")
|
||||||
}
|
}
|
||||||
newTorrent.DownloadUncached = false // Set to false, avoid re-downloading
|
newTorrent.DownloadUncached = false // Set to false, avoid re-downloading
|
||||||
newTorrent, err = c.client.CheckStatus(newTorrent, true)
|
newTorrent, err = c.client.CheckStatus(newTorrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if newTorrent != nil && newTorrent.Id != "" {
|
if newTorrent != nil && newTorrent.Id != "" {
|
||||||
// Delete the torrent if it was not downloaded
|
// Delete the torrent if it was not downloaded
|
||||||
@@ -223,7 +268,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
addedOn = time.Now()
|
addedOn = time.Now()
|
||||||
}
|
}
|
||||||
for _, f := range newTorrent.Files {
|
for _, f := range newTorrent.GetFiles() {
|
||||||
if f.Link == "" {
|
if f.Link == "" {
|
||||||
c.markAsFailedToReinsert(oldID)
|
c.markAsFailedToReinsert(oldID)
|
||||||
return ct, fmt.Errorf("failed to reinsert torrent: empty link")
|
return ct, fmt.Errorf("failed to reinsert torrent: empty link")
|
||||||
@@ -256,7 +301,11 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
|
|||||||
return ct, nil
|
return ct, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) resetInvalidLinks() {
|
func (c *Cache) resetInvalidLinks(ctx context.Context) {
|
||||||
|
c.logger.Debug().Msgf("Resetting accounts")
|
||||||
c.invalidDownloadLinks = sync.Map{}
|
c.invalidDownloadLinks = sync.Map{}
|
||||||
c.client.ResetActiveDownloadKeys() // Reset the active download keys
|
c.client.Accounts().Reset() // Reset the active download keys
|
||||||
|
|
||||||
|
// Refresh the download links
|
||||||
|
c.refreshDownloadLinks(ctx)
|
||||||
}
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package debrid
|
package store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -40,13 +40,22 @@ type directoryFilter struct {
|
|||||||
ageThreshold time.Duration // only for last_added
|
ageThreshold time.Duration // only for last_added
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type torrents struct {
|
||||||
|
sync.RWMutex
|
||||||
|
byID map[string]CachedTorrent
|
||||||
|
byName map[string]CachedTorrent
|
||||||
|
}
|
||||||
|
|
||||||
|
type folders struct {
|
||||||
|
sync.RWMutex
|
||||||
|
listing map[string][]os.FileInfo // folder name to file listing
|
||||||
|
}
|
||||||
|
|
||||||
type torrentCache struct {
|
type torrentCache struct {
|
||||||
mu sync.Mutex
|
torrents torrents
|
||||||
byID map[string]CachedTorrent
|
|
||||||
byName map[string]CachedTorrent
|
|
||||||
listing atomic.Value
|
listing atomic.Value
|
||||||
folderListing map[string][]os.FileInfo
|
folders folders
|
||||||
folderListingMu sync.RWMutex
|
|
||||||
directoriesFilters map[string][]directoryFilter
|
directoriesFilters map[string][]directoryFilter
|
||||||
sortNeeded atomic.Bool
|
sortNeeded atomic.Bool
|
||||||
}
|
}
|
||||||
@@ -62,9 +71,13 @@ type sortableFile struct {
|
|||||||
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
|
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
|
||||||
|
|
||||||
tc := &torrentCache{
|
tc := &torrentCache{
|
||||||
byID: make(map[string]CachedTorrent),
|
torrents: torrents{
|
||||||
byName: make(map[string]CachedTorrent),
|
byID: make(map[string]CachedTorrent),
|
||||||
folderListing: make(map[string][]os.FileInfo),
|
byName: make(map[string]CachedTorrent),
|
||||||
|
},
|
||||||
|
folders: folders{
|
||||||
|
listing: make(map[string][]os.FileInfo),
|
||||||
|
},
|
||||||
directoriesFilters: dirFilters,
|
directoriesFilters: dirFilters,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,41 +87,42 @@ func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (tc *torrentCache) reset() {
|
func (tc *torrentCache) reset() {
|
||||||
tc.mu.Lock()
|
tc.torrents.Lock()
|
||||||
tc.byID = make(map[string]CachedTorrent)
|
tc.torrents.byID = make(map[string]CachedTorrent)
|
||||||
tc.byName = make(map[string]CachedTorrent)
|
tc.torrents.byName = make(map[string]CachedTorrent)
|
||||||
tc.mu.Unlock()
|
tc.torrents.Unlock()
|
||||||
|
|
||||||
// reset the sorted listing
|
// reset the sorted listing
|
||||||
tc.sortNeeded.Store(false)
|
tc.sortNeeded.Store(false)
|
||||||
tc.listing.Store(make([]os.FileInfo, 0))
|
tc.listing.Store(make([]os.FileInfo, 0))
|
||||||
|
|
||||||
// reset any per-folder views
|
// reset any per-folder views
|
||||||
tc.folderListingMu.Lock()
|
tc.folders.Lock()
|
||||||
tc.folderListing = make(map[string][]os.FileInfo)
|
tc.folders.listing = make(map[string][]os.FileInfo)
|
||||||
tc.folderListingMu.Unlock()
|
tc.folders.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) {
|
func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) {
|
||||||
tc.mu.Lock()
|
tc.torrents.RLock()
|
||||||
defer tc.mu.Unlock()
|
defer tc.torrents.RUnlock()
|
||||||
torrent, exists := tc.byID[id]
|
torrent, exists := tc.torrents.byID[id]
|
||||||
return torrent, exists
|
return torrent, exists
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) {
|
func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) {
|
||||||
tc.mu.Lock()
|
tc.torrents.RLock()
|
||||||
defer tc.mu.Unlock()
|
defer tc.torrents.RUnlock()
|
||||||
torrent, exists := tc.byName[name]
|
torrent, exists := tc.torrents.byName[name]
|
||||||
return torrent, exists
|
return torrent, exists
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *torrentCache) set(name string, torrent, newTorrent CachedTorrent) {
|
func (tc *torrentCache) set(name string, torrent, newTorrent CachedTorrent) {
|
||||||
tc.mu.Lock()
|
tc.torrents.Lock()
|
||||||
// Set the id first
|
// Set the id first
|
||||||
tc.byID[newTorrent.Id] = torrent // This is the unadulterated torrent
|
|
||||||
tc.byName[name] = newTorrent // This is likely the modified torrent
|
tc.torrents.byName[name] = torrent
|
||||||
tc.mu.Unlock()
|
tc.torrents.byID[torrent.Id] = torrent // This is the unadulterated torrent
|
||||||
|
tc.torrents.Unlock()
|
||||||
tc.sortNeeded.Store(true)
|
tc.sortNeeded.Store(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,12 +138,12 @@ func (tc *torrentCache) getListing() []os.FileInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
|
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
|
||||||
tc.folderListingMu.RLock()
|
tc.folders.RLock()
|
||||||
defer tc.folderListingMu.RUnlock()
|
defer tc.folders.RUnlock()
|
||||||
if folderName == "" {
|
if folderName == "" {
|
||||||
return tc.getListing()
|
return tc.getListing()
|
||||||
}
|
}
|
||||||
if folder, ok := tc.folderListing[folderName]; ok {
|
if folder, ok := tc.folders.listing[folderName]; ok {
|
||||||
return folder
|
return folder
|
||||||
}
|
}
|
||||||
// If folder not found, return empty slice
|
// If folder not found, return empty slice
|
||||||
@@ -138,13 +152,13 @@ func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
|
|||||||
|
|
||||||
func (tc *torrentCache) refreshListing() {
|
func (tc *torrentCache) refreshListing() {
|
||||||
|
|
||||||
tc.mu.Lock()
|
tc.torrents.RLock()
|
||||||
all := make([]sortableFile, 0, len(tc.byName))
|
all := make([]sortableFile, 0, len(tc.torrents.byName))
|
||||||
for name, t := range tc.byName {
|
for name, t := range tc.torrents.byName {
|
||||||
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
|
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
|
||||||
}
|
}
|
||||||
tc.sortNeeded.Store(false)
|
tc.sortNeeded.Store(false)
|
||||||
tc.mu.Unlock()
|
tc.torrents.RUnlock()
|
||||||
|
|
||||||
sort.Slice(all, func(i, j int) bool {
|
sort.Slice(all, func(i, j int) bool {
|
||||||
if all[i].name != all[j].name {
|
if all[i].name != all[j].name {
|
||||||
@@ -157,17 +171,18 @@ func (tc *torrentCache) refreshListing() {
|
|||||||
|
|
||||||
wg.Add(1) // for all listing
|
wg.Add(1) // for all listing
|
||||||
go func() {
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
listing := make([]os.FileInfo, len(all))
|
listing := make([]os.FileInfo, len(all))
|
||||||
for i, sf := range all {
|
for i, sf := range all {
|
||||||
listing[i] = &fileInfo{sf.id, sf.name, sf.size, 0755 | os.ModeDir, sf.modTime, true}
|
listing[i] = &fileInfo{sf.id, sf.name, sf.size, 0755 | os.ModeDir, sf.modTime, true}
|
||||||
}
|
}
|
||||||
tc.listing.Store(listing)
|
tc.listing.Store(listing)
|
||||||
}()
|
}()
|
||||||
wg.Done()
|
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
// For __bad__
|
// For __bad__
|
||||||
go func() {
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
listing := make([]os.FileInfo, 0)
|
listing := make([]os.FileInfo, 0)
|
||||||
for _, sf := range all {
|
for _, sf := range all {
|
||||||
if sf.bad {
|
if sf.bad {
|
||||||
@@ -181,15 +196,14 @@ func (tc *torrentCache) refreshListing() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tc.folderListingMu.Lock()
|
tc.folders.Lock()
|
||||||
if len(listing) > 0 {
|
if len(listing) > 0 {
|
||||||
tc.folderListing["__bad__"] = listing
|
tc.folders.listing["__bad__"] = listing
|
||||||
} else {
|
} else {
|
||||||
delete(tc.folderListing, "__bad__")
|
delete(tc.folders.listing, "__bad__")
|
||||||
}
|
}
|
||||||
tc.folderListingMu.Unlock()
|
tc.folders.Unlock()
|
||||||
}()
|
}()
|
||||||
wg.Done()
|
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
wg.Add(len(tc.directoriesFilters)) // for each directory filter
|
wg.Add(len(tc.directoriesFilters)) // for each directory filter
|
||||||
@@ -207,13 +221,13 @@ func (tc *torrentCache) refreshListing() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tc.folderListingMu.Lock()
|
tc.folders.Lock()
|
||||||
if len(matched) > 0 {
|
if len(matched) > 0 {
|
||||||
tc.folderListing[dir] = matched
|
tc.folders.listing[dir] = matched
|
||||||
} else {
|
} else {
|
||||||
delete(tc.folderListing, dir)
|
delete(tc.folders.listing, dir)
|
||||||
}
|
}
|
||||||
tc.folderListingMu.Unlock()
|
tc.folders.Unlock()
|
||||||
}(dir, filters)
|
}(dir, filters)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,35 +278,51 @@ func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file so
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (tc *torrentCache) getAll() map[string]CachedTorrent {
|
func (tc *torrentCache) getAll() map[string]CachedTorrent {
|
||||||
tc.mu.Lock()
|
tc.torrents.RLock()
|
||||||
defer tc.mu.Unlock()
|
defer tc.torrents.RUnlock()
|
||||||
result := make(map[string]CachedTorrent)
|
result := make(map[string]CachedTorrent, len(tc.torrents.byID))
|
||||||
for name, torrent := range tc.byID {
|
for name, torrent := range tc.torrents.byID {
|
||||||
result[name] = torrent
|
result[name] = torrent
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tc *torrentCache) getAllCount() int {
|
||||||
|
tc.torrents.RLock()
|
||||||
|
defer tc.torrents.RUnlock()
|
||||||
|
return len(tc.torrents.byID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *torrentCache) getAllByName() map[string]CachedTorrent {
|
||||||
|
tc.torrents.RLock()
|
||||||
|
defer tc.torrents.RUnlock()
|
||||||
|
results := make(map[string]CachedTorrent, len(tc.torrents.byName))
|
||||||
|
for name, torrent := range tc.torrents.byName {
|
||||||
|
results[name] = torrent
|
||||||
|
}
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
func (tc *torrentCache) getIdMaps() map[string]struct{} {
|
func (tc *torrentCache) getIdMaps() map[string]struct{} {
|
||||||
tc.mu.Lock()
|
tc.torrents.RLock()
|
||||||
defer tc.mu.Unlock()
|
defer tc.torrents.RUnlock()
|
||||||
res := make(map[string]struct{}, len(tc.byID))
|
res := make(map[string]struct{}, len(tc.torrents.byID))
|
||||||
for id := range tc.byID {
|
for id := range tc.torrents.byID {
|
||||||
res[id] = struct{}{}
|
res[id] = struct{}{}
|
||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *torrentCache) removeId(id string) {
|
func (tc *torrentCache) removeId(id string) {
|
||||||
tc.mu.Lock()
|
tc.torrents.Lock()
|
||||||
defer tc.mu.Unlock()
|
defer tc.torrents.Unlock()
|
||||||
delete(tc.byID, id)
|
delete(tc.torrents.byID, id)
|
||||||
tc.sortNeeded.Store(true)
|
tc.sortNeeded.Store(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *torrentCache) remove(name string) {
|
func (tc *torrentCache) remove(name string) {
|
||||||
tc.mu.Lock()
|
tc.torrents.Lock()
|
||||||
defer tc.mu.Unlock()
|
defer tc.torrents.Unlock()
|
||||||
delete(tc.byName, name)
|
delete(tc.torrents.byName, name)
|
||||||
tc.sortNeeded.Store(true)
|
tc.sortNeeded.Store(true)
|
||||||
}
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package debrid
|
package store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -9,6 +9,9 @@ import (
|
|||||||
func (c *Cache) StartSchedule(ctx context.Context) error {
|
func (c *Cache) StartSchedule(ctx context.Context) error {
|
||||||
// For now, we just want to refresh the listing and download links
|
// For now, we just want to refresh the listing and download links
|
||||||
|
|
||||||
|
// Stop any existing jobs before starting new ones
|
||||||
|
c.scheduler.RemoveByTags("decypharr")
|
||||||
|
|
||||||
// Schedule download link refresh job
|
// Schedule download link refresh job
|
||||||
if jd, err := utils.ConvertToJobDef(c.downloadLinksRefreshInterval); err != nil {
|
if jd, err := utils.ConvertToJobDef(c.downloadLinksRefreshInterval); err != nil {
|
||||||
c.logger.Error().Err(err).Msg("Failed to convert download link refresh interval to job definition")
|
c.logger.Error().Err(err).Msg("Failed to convert download link refresh interval to job definition")
|
||||||
@@ -16,7 +19,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
|
|||||||
// Schedule the job
|
// Schedule the job
|
||||||
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
|
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
|
||||||
c.refreshDownloadLinks(ctx)
|
c.refreshDownloadLinks(ctx)
|
||||||
}), gocron.WithContext(ctx)); err != nil {
|
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
|
||||||
c.logger.Error().Err(err).Msg("Failed to create download link refresh job")
|
c.logger.Error().Err(err).Msg("Failed to create download link refresh job")
|
||||||
} else {
|
} else {
|
||||||
c.logger.Debug().Msgf("Download link refresh job scheduled for every %s", c.downloadLinksRefreshInterval)
|
c.logger.Debug().Msgf("Download link refresh job scheduled for every %s", c.downloadLinksRefreshInterval)
|
||||||
@@ -30,7 +33,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
|
|||||||
// Schedule the job
|
// Schedule the job
|
||||||
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
|
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
|
||||||
c.refreshTorrents(ctx)
|
c.refreshTorrents(ctx)
|
||||||
}), gocron.WithContext(ctx)); err != nil {
|
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
|
||||||
c.logger.Error().Err(err).Msg("Failed to create torrent refresh job")
|
c.logger.Error().Err(err).Msg("Failed to create torrent refresh job")
|
||||||
} else {
|
} else {
|
||||||
c.logger.Debug().Msgf("Torrent refresh job scheduled for every %s", c.torrentRefreshInterval)
|
c.logger.Debug().Msgf("Torrent refresh job scheduled for every %s", c.torrentRefreshInterval)
|
||||||
@@ -45,8 +48,8 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
|
|||||||
} else {
|
} else {
|
||||||
// Schedule the job
|
// Schedule the job
|
||||||
if _, err := c.cetScheduler.NewJob(jd, gocron.NewTask(func() {
|
if _, err := c.cetScheduler.NewJob(jd, gocron.NewTask(func() {
|
||||||
c.resetInvalidLinks()
|
c.resetInvalidLinks(ctx)
|
||||||
}), gocron.WithContext(ctx)); err != nil {
|
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
|
||||||
c.logger.Error().Err(err).Msg("Failed to create link reset job")
|
c.logger.Error().Err(err).Msg("Failed to create link reset job")
|
||||||
} else {
|
} else {
|
||||||
c.logger.Debug().Msgf("Link reset job scheduled for every midnight, CET")
|
c.logger.Debug().Msgf("Link reset job scheduled for every midnight, CET")
|
||||||
1
pkg/debrid/store/xml.go
Normal file
@@ -0,0 +1 @@
|
|||||||
|
package store
|
||||||
243
pkg/debrid/types/account.go
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Accounts struct {
|
||||||
|
current *Account
|
||||||
|
accounts []*Account
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAccounts(debridConf config.Debrid) *Accounts {
|
||||||
|
accounts := make([]*Account, 0)
|
||||||
|
for idx, token := range debridConf.DownloadAPIKeys {
|
||||||
|
if token == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
account := newAccount(debridConf.Name, token, idx)
|
||||||
|
accounts = append(accounts, account)
|
||||||
|
}
|
||||||
|
|
||||||
|
var current *Account
|
||||||
|
if len(accounts) > 0 {
|
||||||
|
current = accounts[0]
|
||||||
|
}
|
||||||
|
return &Accounts{
|
||||||
|
accounts: accounts,
|
||||||
|
current: current,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Account struct {
|
||||||
|
Debrid string // e.g., "realdebrid", "torbox", etc.
|
||||||
|
Order int
|
||||||
|
Disabled bool
|
||||||
|
Token string
|
||||||
|
links map[string]*DownloadLink
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accounts) All() []*Account {
|
||||||
|
a.mu.RLock()
|
||||||
|
defer a.mu.RUnlock()
|
||||||
|
activeAccounts := make([]*Account, 0)
|
||||||
|
for _, acc := range a.accounts {
|
||||||
|
if !acc.Disabled {
|
||||||
|
activeAccounts = append(activeAccounts, acc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return activeAccounts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accounts) Current() *Account {
|
||||||
|
a.mu.RLock()
|
||||||
|
if a.current != nil {
|
||||||
|
current := a.current
|
||||||
|
a.mu.RUnlock()
|
||||||
|
return current
|
||||||
|
}
|
||||||
|
a.mu.RUnlock()
|
||||||
|
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
|
// Double-check after acquiring write lock
|
||||||
|
if a.current != nil {
|
||||||
|
return a.current
|
||||||
|
}
|
||||||
|
|
||||||
|
activeAccounts := make([]*Account, 0)
|
||||||
|
for _, acc := range a.accounts {
|
||||||
|
if !acc.Disabled {
|
||||||
|
activeAccounts = append(activeAccounts, acc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(activeAccounts) > 0 {
|
||||||
|
a.current = activeAccounts[0]
|
||||||
|
}
|
||||||
|
return a.current
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accounts) Disable(account *Account) {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
account.disable()
|
||||||
|
|
||||||
|
if a.current == account {
|
||||||
|
var newCurrent *Account
|
||||||
|
for _, acc := range a.accounts {
|
||||||
|
if !acc.Disabled {
|
||||||
|
newCurrent = acc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
a.current = newCurrent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accounts) Reset() {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
for _, acc := range a.accounts {
|
||||||
|
acc.resetDownloadLinks()
|
||||||
|
acc.Disabled = false
|
||||||
|
}
|
||||||
|
if len(a.accounts) > 0 {
|
||||||
|
a.current = a.accounts[0]
|
||||||
|
} else {
|
||||||
|
a.current = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accounts) GetDownloadLink(fileLink string) (*DownloadLink, error) {
|
||||||
|
if a.Current() == nil {
|
||||||
|
return nil, NoActiveAccountsError
|
||||||
|
}
|
||||||
|
dl, ok := a.Current().getLink(fileLink)
|
||||||
|
if !ok {
|
||||||
|
return nil, NoDownloadLinkError
|
||||||
|
}
|
||||||
|
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
|
||||||
|
return nil, DownloadLinkExpiredError
|
||||||
|
}
|
||||||
|
if dl.DownloadLink == "" {
|
||||||
|
return nil, EmptyDownloadLinkError
|
||||||
|
}
|
||||||
|
return dl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accounts) GetDownloadLinkWithAccount(fileLink string) (*DownloadLink, *Account, error) {
|
||||||
|
currentAccount := a.Current()
|
||||||
|
if currentAccount == nil {
|
||||||
|
return nil, nil, NoActiveAccountsError
|
||||||
|
}
|
||||||
|
dl, ok := currentAccount.getLink(fileLink)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, NoDownloadLinkError
|
||||||
|
}
|
||||||
|
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
|
||||||
|
return nil, currentAccount, DownloadLinkExpiredError
|
||||||
|
}
|
||||||
|
if dl.DownloadLink == "" {
|
||||||
|
return nil, currentAccount, EmptyDownloadLinkError
|
||||||
|
}
|
||||||
|
return dl, currentAccount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accounts) SetDownloadLink(fileLink string, dl *DownloadLink) {
|
||||||
|
if a.Current() == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
a.Current().setLink(fileLink, dl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accounts) DeleteDownloadLink(fileLink string) {
|
||||||
|
if a.Current() == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
a.Current().deleteLink(fileLink)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accounts) GetLinksCount() int {
|
||||||
|
if a.Current() == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return a.Current().LinksCount()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accounts) SetDownloadLinks(links map[string]*DownloadLink) {
|
||||||
|
if a.Current() == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
a.Current().setLinks(links)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAccount(debridName, token string, index int) *Account {
|
||||||
|
return &Account{
|
||||||
|
Debrid: debridName,
|
||||||
|
Token: token,
|
||||||
|
Order: index,
|
||||||
|
links: make(map[string]*DownloadLink),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Account) getLink(fileLink string) (*DownloadLink, bool) {
|
||||||
|
a.mu.RLock()
|
||||||
|
defer a.mu.RUnlock()
|
||||||
|
dl, ok := a.links[a.sliceFileLink(fileLink)]
|
||||||
|
return dl, ok
|
||||||
|
}
|
||||||
|
func (a *Account) setLink(fileLink string, dl *DownloadLink) {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
a.links[a.sliceFileLink(fileLink)] = dl
|
||||||
|
}
|
||||||
|
func (a *Account) deleteLink(fileLink string) {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
|
delete(a.links, a.sliceFileLink(fileLink))
|
||||||
|
}
|
||||||
|
func (a *Account) resetDownloadLinks() {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
a.links = make(map[string]*DownloadLink)
|
||||||
|
}
|
||||||
|
func (a *Account) LinksCount() int {
|
||||||
|
a.mu.RLock()
|
||||||
|
defer a.mu.RUnlock()
|
||||||
|
return len(a.links)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Account) disable() {
|
||||||
|
a.Disabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Account) setLinks(links map[string]*DownloadLink) {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
now := time.Now()
|
||||||
|
for _, dl := range links {
|
||||||
|
if !dl.ExpiresAt.IsZero() && dl.ExpiresAt.Before(now) {
|
||||||
|
// Expired, continue
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
a.links[a.sliceFileLink(dl.Link)] = dl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// slice download link
|
||||||
|
func (a *Account) sliceFileLink(fileLink string) string {
|
||||||
|
if a.Debrid != "realdebrid" {
|
||||||
|
return fileLink
|
||||||
|
}
|
||||||
|
if len(fileLink) < 39 {
|
||||||
|
return fileLink
|
||||||
|
}
|
||||||
|
return fileLink[0:39]
|
||||||
|
}
|
||||||
@@ -6,23 +6,23 @@ import (
|
|||||||
|
|
||||||
type Client interface {
|
type Client interface {
|
||||||
SubmitMagnet(tr *Torrent) (*Torrent, error)
|
SubmitMagnet(tr *Torrent) (*Torrent, error)
|
||||||
CheckStatus(tr *Torrent, isSymlink bool) (*Torrent, error)
|
CheckStatus(tr *Torrent) (*Torrent, error)
|
||||||
GenerateDownloadLinks(tr *Torrent) error
|
GetFileDownloadLinks(tr *Torrent) error
|
||||||
GetDownloadLink(tr *Torrent, file *File) (*DownloadLink, error)
|
GetDownloadLink(tr *Torrent, file *File) (*DownloadLink, error)
|
||||||
DeleteTorrent(torrentId string) error
|
DeleteTorrent(torrentId string) error
|
||||||
IsAvailable(infohashes []string) map[string]bool
|
IsAvailable(infohashes []string) map[string]bool
|
||||||
GetCheckCached() bool
|
|
||||||
GetDownloadUncached() bool
|
GetDownloadUncached() bool
|
||||||
UpdateTorrent(torrent *Torrent) error
|
UpdateTorrent(torrent *Torrent) error
|
||||||
GetTorrent(torrentId string) (*Torrent, error)
|
GetTorrent(torrentId string) (*Torrent, error)
|
||||||
GetTorrents() ([]*Torrent, error)
|
GetTorrents() ([]*Torrent, error)
|
||||||
GetName() string
|
Name() string
|
||||||
GetLogger() zerolog.Logger
|
Logger() zerolog.Logger
|
||||||
GetDownloadingStatus() []string
|
GetDownloadingStatus() []string
|
||||||
GetDownloads() (map[string]DownloadLink, error)
|
GetDownloadLinks() (map[string]*DownloadLink, error)
|
||||||
CheckLink(link string) error
|
CheckLink(link string) error
|
||||||
GetMountPath() string
|
GetMountPath() string
|
||||||
DisableAccount(string)
|
Accounts() *Accounts // Returns the active download account/token
|
||||||
ResetActiveDownloadKeys()
|
|
||||||
DeleteDownloadLink(linkId string) error
|
DeleteDownloadLink(linkId string) error
|
||||||
|
GetProfile() (*Profile, error)
|
||||||
|
GetAvailableSlots() (int, error)
|
||||||
}
|
}
|
||||||
|
|||||||
30
pkg/debrid/types/error.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
type Error struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
Code string `json:"code"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
return e.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
var NoActiveAccountsError = &Error{
|
||||||
|
Message: "No active accounts",
|
||||||
|
Code: "no_active_accounts",
|
||||||
|
}
|
||||||
|
|
||||||
|
var NoDownloadLinkError = &Error{
|
||||||
|
Message: "No download link found",
|
||||||
|
Code: "no_download_link",
|
||||||
|
}
|
||||||
|
|
||||||
|
var DownloadLinkExpiredError = &Error{
|
||||||
|
Message: "Download link expired",
|
||||||
|
Code: "download_link_expired",
|
||||||
|
}
|
||||||
|
|
||||||
|
var EmptyDownloadLinkError = &Error{
|
||||||
|
Message: "Download link is empty",
|
||||||
|
Code: "empty_download_link",
|
||||||
|
}
|
||||||
@@ -2,13 +2,14 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/sirrobot01/decypharr/internal/logger"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Torrent struct {
|
type Torrent struct {
|
||||||
@@ -29,27 +30,16 @@ type Torrent struct {
|
|||||||
Seeders int `json:"seeders"`
|
Seeders int `json:"seeders"`
|
||||||
Links []string `json:"links"`
|
Links []string `json:"links"`
|
||||||
MountPath string `json:"mount_path"`
|
MountPath string `json:"mount_path"`
|
||||||
|
DeletedFiles []string `json:"deleted_files"`
|
||||||
|
|
||||||
Debrid string `json:"debrid"`
|
Debrid string `json:"debrid"`
|
||||||
|
|
||||||
Arr *arr.Arr `json:"arr"`
|
Arr *arr.Arr `json:"arr"`
|
||||||
Mu sync.Mutex `json:"-"`
|
|
||||||
SizeDownloaded int64 `json:"-"` // This is used for local download
|
|
||||||
DownloadUncached bool `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DownloadLink struct {
|
SizeDownloaded int64 `json:"-"` // This is used for local download
|
||||||
Filename string `json:"filename"`
|
DownloadUncached bool `json:"-"`
|
||||||
Link string `json:"link"`
|
|
||||||
DownloadLink string `json:"download_link"`
|
|
||||||
Generated time.Time `json:"generated"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Id string `json:"id"`
|
|
||||||
AccountId string `json:"account_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DownloadLink) String() string {
|
sync.Mutex
|
||||||
return d.DownloadLink
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) GetSymlinkFolder(parent string) string {
|
func (t *Torrent) GetSymlinkFolder(parent string) string {
|
||||||
@@ -75,16 +65,37 @@ func (t *Torrent) GetMountFolder(rClonePath string) (string, error) {
|
|||||||
return "", fmt.Errorf("no path found")
|
return "", fmt.Errorf("no path found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) GetFile(filename string) (File, bool) {
|
||||||
|
f, ok := t.Files[filename]
|
||||||
|
if !ok {
|
||||||
|
return File{}, false
|
||||||
|
}
|
||||||
|
return f, !f.Deleted
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) GetFiles() []File {
|
||||||
|
files := make([]File, 0, len(t.Files))
|
||||||
|
for _, f := range t.Files {
|
||||||
|
if !f.Deleted {
|
||||||
|
files = append(files, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
TorrentId string `json:"torrent_id"`
|
TorrentId string `json:"torrent_id"`
|
||||||
Id string `json:"id"`
|
Id string `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
|
IsRar bool `json:"is_rar"`
|
||||||
|
ByteRange *[2]int64 `json:"byte_range,omitempty"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Link string `json:"link"`
|
Link string `json:"link"`
|
||||||
DownloadLink *DownloadLink `json:"-"`
|
|
||||||
AccountId string `json:"account_id"`
|
AccountId string `json:"account_id"`
|
||||||
Generated time.Time `json:"generated"`
|
Generated time.Time `json:"generated"`
|
||||||
|
Deleted bool `json:"deleted"`
|
||||||
|
DownloadLink *DownloadLink `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) Cleanup(remove bool) {
|
func (t *Torrent) Cleanup(remove bool) {
|
||||||
@@ -96,18 +107,38 @@ func (t *Torrent) Cleanup(remove bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) GetFile(id string) *File {
|
type IngestData struct {
|
||||||
for _, f := range t.Files {
|
Debrid string `json:"debrid"`
|
||||||
if f.Id == id {
|
Name string `json:"name"`
|
||||||
return &f
|
Hash string `json:"hash"`
|
||||||
}
|
Size int64 `json:"size"`
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Account struct {
|
type Profile struct {
|
||||||
ID string `json:"id"`
|
Name string `json:"name"`
|
||||||
Disabled bool `json:"disabled"`
|
Id int64 `json:"id"`
|
||||||
Name string `json:"name"`
|
Username string `json:"username"`
|
||||||
Token string `json:"token"`
|
Email string `json:"email"`
|
||||||
|
Points int64 `json:"points"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Premium int `json:"premium"`
|
||||||
|
Expiration time.Time `json:"expiration"`
|
||||||
|
|
||||||
|
LibrarySize int `json:"library_size"`
|
||||||
|
BadTorrents int `json:"bad_torrents"`
|
||||||
|
ActiveLinks int `json:"active_links"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DownloadLink struct {
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
Link string `json:"link"`
|
||||||
|
DownloadLink string `json:"download_link"`
|
||||||
|
Generated time.Time `json:"generated"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Id string `json:"id"`
|
||||||
|
ExpiresAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DownloadLink) String() string {
|
||||||
|
return d.DownloadLink
|
||||||
}
|
}
|
||||||
|
|||||||
207
pkg/qbit/context.go
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/store"
|
||||||
|
"golang.org/x/crypto/bcrypt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type contextKey string
|
||||||
|
|
||||||
|
const (
|
||||||
|
categoryKey contextKey = "category"
|
||||||
|
hashesKey contextKey = "hashes"
|
||||||
|
arrKey contextKey = "arr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func validateServiceURL(urlStr string) error {
|
||||||
|
if urlStr == "" {
|
||||||
|
return fmt.Errorf("URL cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try parsing as full URL first
|
||||||
|
u, err := url.Parse(urlStr)
|
||||||
|
if err == nil && u.Scheme != "" && u.Host != "" {
|
||||||
|
// It's a full URL, validate scheme
|
||||||
|
if u.Scheme != "http" && u.Scheme != "https" {
|
||||||
|
return fmt.Errorf("URL scheme must be http or https")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's a host:port format (no scheme)
|
||||||
|
if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") {
|
||||||
|
// Try parsing with http:// prefix
|
||||||
|
testURL := "http://" + urlStr
|
||||||
|
u, err := url.Parse(testURL)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid host:port format: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if u.Host == "" {
|
||||||
|
return fmt.Errorf("host is required in host:port format")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate port number
|
||||||
|
if u.Port() == "" {
|
||||||
|
return fmt.Errorf("port is required in host:port format")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("invalid URL format: %s", urlStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCategory(ctx context.Context) string {
|
||||||
|
if category, ok := ctx.Value(categoryKey).(string); ok {
|
||||||
|
return category
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHashes(ctx context.Context) []string {
|
||||||
|
if hashes, ok := ctx.Value(hashesKey).([]string); ok {
|
||||||
|
return hashes
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getArrFromContext(ctx context.Context) *arr.Arr {
|
||||||
|
if a, ok := ctx.Value(arrKey).(*arr.Arr); ok {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeAuthHeader(header string) (string, string, error) {
|
||||||
|
encodedTokens := strings.Split(header, " ")
|
||||||
|
if len(encodedTokens) != 2 {
|
||||||
|
return "", "", nil
|
||||||
|
}
|
||||||
|
encodedToken := encodedTokens[1]
|
||||||
|
|
||||||
|
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
bearer := string(bytes)
|
||||||
|
|
||||||
|
colonIndex := strings.LastIndex(bearer, ":")
|
||||||
|
host := bearer[:colonIndex]
|
||||||
|
token := bearer[colonIndex+1:]
|
||||||
|
|
||||||
|
return host, token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) categoryContext(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
category := strings.Trim(r.URL.Query().Get("category"), "")
|
||||||
|
if category == "" {
|
||||||
|
// Get from form
|
||||||
|
_ = r.ParseForm()
|
||||||
|
category = r.Form.Get("category")
|
||||||
|
if category == "" {
|
||||||
|
// Get from multipart form
|
||||||
|
_ = r.ParseMultipartForm(32 << 20)
|
||||||
|
category = r.FormValue("category")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx := context.WithValue(r.Context(), categoryKey, strings.TrimSpace(category))
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// authContext creates a middleware that extracts the Arr host and token from the Authorization header
|
||||||
|
// and adds it to the request context.
|
||||||
|
// This is used to identify the Arr instance for the request.
|
||||||
|
// Only a valid host and token will be added to the context/config. The rest are manual
|
||||||
|
func (q *QBit) authContext(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
cfg := config.Get()
|
||||||
|
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
|
||||||
|
category := getCategory(r.Context())
|
||||||
|
arrs := store.Get().Arr()
|
||||||
|
// Check if arr exists
|
||||||
|
a := arrs.Get(category)
|
||||||
|
if a == nil {
|
||||||
|
// Arr is not configured, create a new one
|
||||||
|
downloadUncached := false
|
||||||
|
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
host = strings.TrimSpace(host)
|
||||||
|
if host != "" {
|
||||||
|
a.Host = host
|
||||||
|
}
|
||||||
|
token = strings.TrimSpace(token)
|
||||||
|
if token != "" {
|
||||||
|
a.Token = token
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if cfg.NeedsAuth() {
|
||||||
|
if a.Host == "" || a.Token == "" {
|
||||||
|
http.Error(w, "Unauthorized: Host and token are required for authentication", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// try to use either Arr validate, or user auth validation
|
||||||
|
if err := a.Validate(); err != nil {
|
||||||
|
// If this failed, try to use user auth validation
|
||||||
|
if !verifyAuth(host, token) {
|
||||||
|
http.Error(w, "Unauthorized: Invalid host or token", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
a.Source = "auto"
|
||||||
|
arrs.AddOrUpdate(a)
|
||||||
|
ctx := context.WithValue(r.Context(), arrKey, a)
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func hashesContext(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_hashes := chi.URLParam(r, "hashes")
|
||||||
|
var hashes []string
|
||||||
|
if _hashes != "" {
|
||||||
|
hashes = strings.Split(_hashes, "|")
|
||||||
|
}
|
||||||
|
if hashes == nil {
|
||||||
|
// Get hashes from form
|
||||||
|
_ = r.ParseForm()
|
||||||
|
hashes = r.Form["hashes"]
|
||||||
|
}
|
||||||
|
for i, hash := range hashes {
|
||||||
|
hashes[i] = strings.TrimSpace(hash)
|
||||||
|
}
|
||||||
|
ctx := context.WithValue(r.Context(), hashesKey, hashes)
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyAuth(username, password string) bool {
|
||||||
|
// If you're storing hashed password, use bcrypt to compare
|
||||||
|
if username == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
auth := config.Get().GetAuth()
|
||||||
|
if auth == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if username != auth.Username {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
err := bcrypt.CompareHashAndPassword([]byte(auth.Password), []byte(password))
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
184
pkg/qbit/http.go
@@ -1,114 +1,25 @@
|
|||||||
package qbit
|
package qbit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"github.com/go-chi/chi/v5"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/request"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
"github.com/sirrobot01/decypharr/pkg/service"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func decodeAuthHeader(header string) (string, string, error) {
|
|
||||||
encodedTokens := strings.Split(header, " ")
|
|
||||||
if len(encodedTokens) != 2 {
|
|
||||||
return "", "", nil
|
|
||||||
}
|
|
||||||
encodedToken := encodedTokens[1]
|
|
||||||
|
|
||||||
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
bearer := string(bytes)
|
|
||||||
|
|
||||||
colonIndex := strings.LastIndex(bearer, ":")
|
|
||||||
host := bearer[:colonIndex]
|
|
||||||
token := bearer[colonIndex+1:]
|
|
||||||
|
|
||||||
return host, token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) CategoryContext(next http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
category := strings.Trim(r.URL.Query().Get("category"), "")
|
|
||||||
if category == "" {
|
|
||||||
// Get from form
|
|
||||||
_ = r.ParseForm()
|
|
||||||
category = r.Form.Get("category")
|
|
||||||
if category == "" {
|
|
||||||
// Get from multipart form
|
|
||||||
_ = r.ParseMultipartForm(32 << 20)
|
|
||||||
category = r.FormValue("category")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ctx := context.WithValue(r.Context(), "category", strings.TrimSpace(category))
|
|
||||||
next.ServeHTTP(w, r.WithContext(ctx))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) authContext(next http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
|
|
||||||
category := r.Context().Value("category").(string)
|
|
||||||
svc := service.GetService()
|
|
||||||
// Check if arr exists
|
|
||||||
a := svc.Arr.Get(category)
|
|
||||||
if a == nil {
|
|
||||||
downloadUncached := false
|
|
||||||
a = arr.New(category, "", "", false, false, &downloadUncached)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
host = strings.TrimSpace(host)
|
|
||||||
if host != "" {
|
|
||||||
a.Host = host
|
|
||||||
}
|
|
||||||
token = strings.TrimSpace(token)
|
|
||||||
if token != "" {
|
|
||||||
a.Token = token
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
svc.Arr.AddOrUpdate(a)
|
|
||||||
ctx := context.WithValue(r.Context(), "arr", a)
|
|
||||||
next.ServeHTTP(w, r.WithContext(ctx))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func HashesCtx(next http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
_hashes := chi.URLParam(r, "hashes")
|
|
||||||
var hashes []string
|
|
||||||
if _hashes != "" {
|
|
||||||
hashes = strings.Split(_hashes, "|")
|
|
||||||
}
|
|
||||||
if hashes == nil {
|
|
||||||
// Get hashes from form
|
|
||||||
_ = r.ParseForm()
|
|
||||||
hashes = r.Form["hashes"]
|
|
||||||
}
|
|
||||||
for i, hash := range hashes {
|
|
||||||
hashes[i] = strings.TrimSpace(hash)
|
|
||||||
}
|
|
||||||
ctx := context.WithValue(r.Context(), "hashes", hashes)
|
|
||||||
next.ServeHTTP(w, r.WithContext(ctx))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
_arr := ctx.Value("arr").(*arr.Arr)
|
_arr := getArrFromContext(ctx)
|
||||||
if _arr == nil {
|
if _arr == nil {
|
||||||
// No arr
|
// Arr not in context, return OK
|
||||||
_, _ = w.Write([]byte("Ok."))
|
_, _ = w.Write([]byte("Ok."))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := _arr.Validate(); err != nil {
|
if err := _arr.Validate(); err != nil {
|
||||||
q.logger.Info().Msgf("Error validating arr: %v", err)
|
q.logger.Error().Err(err).Msgf("Error validating arr")
|
||||||
|
http.Error(w, "Invalid arr configuration", http.StatusBadRequest)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
_, _ = w.Write([]byte("Ok."))
|
_, _ = w.Write([]byte("Ok."))
|
||||||
}
|
}
|
||||||
@@ -122,7 +33,7 @@ func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) {
|
||||||
preferences := NewAppPreferences()
|
preferences := getAppPreferences()
|
||||||
|
|
||||||
preferences.WebUiUsername = q.Username
|
preferences.WebUiUsername = q.Username
|
||||||
preferences.SavePath = q.DownloadFolder
|
preferences.SavePath = q.DownloadFolder
|
||||||
@@ -150,10 +61,10 @@ func (q *QBit) handleShutdown(w http.ResponseWriter, r *http.Request) {
|
|||||||
func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
|
||||||
//log all url params
|
//log all url params
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
category := ctx.Value("category").(string)
|
category := getCategory(ctx)
|
||||||
filter := strings.Trim(r.URL.Query().Get("filter"), "")
|
filter := strings.Trim(r.URL.Query().Get("filter"), "")
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes := getHashes(ctx)
|
||||||
torrents := q.Storage.GetAllSorted(category, filter, hashes, "added_on", false)
|
torrents := q.storage.GetAllSorted(category, filter, hashes, "added_on", false)
|
||||||
request.JSONResponse(w, torrents, http.StatusOK)
|
request.JSONResponse(w, torrents, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,13 +75,13 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
|||||||
contentType := r.Header.Get("Content-Type")
|
contentType := r.Header.Get("Content-Type")
|
||||||
if strings.Contains(contentType, "multipart/form-data") {
|
if strings.Contains(contentType, "multipart/form-data") {
|
||||||
if err := r.ParseMultipartForm(32 << 20); err != nil {
|
if err := r.ParseMultipartForm(32 << 20); err != nil {
|
||||||
q.logger.Info().Msgf("Error parsing multipart form: %v", err)
|
q.logger.Error().Err(err).Msgf("Error parsing multipart form")
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else if strings.Contains(contentType, "application/x-www-form-urlencoded") {
|
} else if strings.Contains(contentType, "application/x-www-form-urlencoded") {
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
q.logger.Info().Msgf("Error parsing form: %v", err)
|
q.logger.Error().Err(err).Msgf("Error parsing form")
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -179,10 +90,18 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
|
action := "symlink"
|
||||||
|
if strings.ToLower(r.FormValue("sequentialDownload")) == "true" {
|
||||||
|
action = "download"
|
||||||
|
}
|
||||||
|
debridName := r.FormValue("debrid")
|
||||||
category := r.FormValue("category")
|
category := r.FormValue("category")
|
||||||
|
_arr := getArrFromContext(ctx)
|
||||||
|
if _arr == nil {
|
||||||
|
// Arr is not in context
|
||||||
|
_arr = arr.New(category, "", "", false, false, nil, "", "")
|
||||||
|
}
|
||||||
atleastOne := false
|
atleastOne := false
|
||||||
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
|
|
||||||
|
|
||||||
// Handle magnet URLs
|
// Handle magnet URLs
|
||||||
if urls := r.FormValue("urls"); urls != "" {
|
if urls := r.FormValue("urls"); urls != "" {
|
||||||
@@ -191,8 +110,8 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
|||||||
urlList = append(urlList, strings.TrimSpace(u))
|
urlList = append(urlList, strings.TrimSpace(u))
|
||||||
}
|
}
|
||||||
for _, url := range urlList {
|
for _, url := range urlList {
|
||||||
if err := q.AddMagnet(ctx, url, category); err != nil {
|
if err := q.addMagnet(ctx, url, _arr, debridName, action); err != nil {
|
||||||
q.logger.Info().Msgf("Error adding magnet: %v", err)
|
q.logger.Debug().Msgf("Error adding magnet: %s", err.Error())
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -204,8 +123,8 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
|||||||
if r.MultipartForm != nil && r.MultipartForm.File != nil {
|
if r.MultipartForm != nil && r.MultipartForm.File != nil {
|
||||||
if files := r.MultipartForm.File["torrents"]; len(files) > 0 {
|
if files := r.MultipartForm.File["torrents"]; len(files) > 0 {
|
||||||
for _, fileHeader := range files {
|
for _, fileHeader := range files {
|
||||||
if err := q.AddTorrent(ctx, fileHeader, category); err != nil {
|
if err := q.addTorrent(ctx, fileHeader, _arr, debridName, action); err != nil {
|
||||||
q.logger.Info().Msgf("Error adding torrent: %v", err)
|
q.logger.Debug().Err(err).Msgf("Error adding torrent")
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -224,14 +143,14 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes := getHashes(ctx)
|
||||||
if len(hashes) == 0 {
|
if len(hashes) == 0 {
|
||||||
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
category := ctx.Value("category").(string)
|
category := getCategory(ctx)
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
q.Storage.Delete(hash, category, false)
|
q.storage.Delete(hash, category, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
@@ -239,10 +158,10 @@ func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes := getHashes(ctx)
|
||||||
category := ctx.Value("category").(string)
|
category := getCategory(ctx)
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
torrent := q.Storage.Get(hash, category)
|
torrent := q.storage.Get(hash, category)
|
||||||
if torrent == nil {
|
if torrent == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -254,10 +173,10 @@ func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes := getHashes(ctx)
|
||||||
category := ctx.Value("category").(string)
|
category := getCategory(ctx)
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
torrent := q.Storage.Get(hash, category)
|
torrent := q.storage.Get(hash, category)
|
||||||
if torrent == nil {
|
if torrent == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -269,10 +188,10 @@ func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes := getHashes(ctx)
|
||||||
category := ctx.Value("category").(string)
|
category := getCategory(ctx)
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
torrent := q.Storage.Get(hash, category)
|
torrent := q.storage.Get(hash, category)
|
||||||
if torrent == nil {
|
if torrent == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -315,7 +234,7 @@ func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
|
|||||||
func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hash := r.URL.Query().Get("hash")
|
hash := r.URL.Query().Get("hash")
|
||||||
torrent := q.Storage.Get(hash, ctx.Value("category").(string))
|
torrent := q.storage.Get(hash, getCategory(ctx))
|
||||||
|
|
||||||
properties := q.GetTorrentProperties(torrent)
|
properties := q.GetTorrentProperties(torrent)
|
||||||
request.JSONResponse(w, properties, http.StatusOK)
|
request.JSONResponse(w, properties, http.StatusOK)
|
||||||
@@ -324,22 +243,21 @@ func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
|
|||||||
func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hash := r.URL.Query().Get("hash")
|
hash := r.URL.Query().Get("hash")
|
||||||
torrent := q.Storage.Get(hash, ctx.Value("category").(string))
|
torrent := q.storage.Get(hash, getCategory(ctx))
|
||||||
if torrent == nil {
|
if torrent == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
files := q.GetTorrentFiles(torrent)
|
request.JSONResponse(w, torrent.Files, http.StatusOK)
|
||||||
request.JSONResponse(w, files, http.StatusOK)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleSetCategory(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handleSetCategory(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
category := ctx.Value("category").(string)
|
category := getCategory(ctx)
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes := getHashes(ctx)
|
||||||
torrents := q.Storage.GetAll("", "", hashes)
|
torrents := q.storage.GetAll("", "", hashes)
|
||||||
for _, torrent := range torrents {
|
for _, torrent := range torrents {
|
||||||
torrent.Category = category
|
torrent.Category = category
|
||||||
q.Storage.AddOrUpdate(torrent)
|
q.storage.AddOrUpdate(torrent)
|
||||||
}
|
}
|
||||||
request.JSONResponse(w, nil, http.StatusOK)
|
request.JSONResponse(w, nil, http.StatusOK)
|
||||||
}
|
}
|
||||||
@@ -351,14 +269,14 @@ func (q *QBit) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes := getHashes(ctx)
|
||||||
tags := strings.Split(r.FormValue("tags"), ",")
|
tags := strings.Split(r.FormValue("tags"), ",")
|
||||||
for i, tag := range tags {
|
for i, tag := range tags {
|
||||||
tags[i] = strings.TrimSpace(tag)
|
tags[i] = strings.TrimSpace(tag)
|
||||||
}
|
}
|
||||||
torrents := q.Storage.GetAll("", "", hashes)
|
torrents := q.storage.GetAll("", "", hashes)
|
||||||
for _, t := range torrents {
|
for _, t := range torrents {
|
||||||
q.SetTorrentTags(t, tags)
|
q.setTorrentTags(t, tags)
|
||||||
}
|
}
|
||||||
request.JSONResponse(w, nil, http.StatusOK)
|
request.JSONResponse(w, nil, http.StatusOK)
|
||||||
}
|
}
|
||||||
@@ -370,14 +288,14 @@ func (q *QBit) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes := getHashes(ctx)
|
||||||
tags := strings.Split(r.FormValue("tags"), ",")
|
tags := strings.Split(r.FormValue("tags"), ",")
|
||||||
for i, tag := range tags {
|
for i, tag := range tags {
|
||||||
tags[i] = strings.TrimSpace(tag)
|
tags[i] = strings.TrimSpace(tag)
|
||||||
}
|
}
|
||||||
torrents := q.Storage.GetAll("", "", hashes)
|
torrents := q.storage.GetAll("", "", hashes)
|
||||||
for _, torrent := range torrents {
|
for _, torrent := range torrents {
|
||||||
q.RemoveTorrentTags(torrent, tags)
|
q.removeTorrentTags(torrent, tags)
|
||||||
|
|
||||||
}
|
}
|
||||||
request.JSONResponse(w, nil, http.StatusOK)
|
request.JSONResponse(w, nil, http.StatusOK)
|
||||||
@@ -397,6 +315,6 @@ func (q *QBit) handleCreateTags(w http.ResponseWriter, r *http.Request) {
|
|||||||
for i, tag := range tags {
|
for i, tag := range tags {
|
||||||
tags[i] = strings.TrimSpace(tag)
|
tags[i] = strings.TrimSpace(tag)
|
||||||
}
|
}
|
||||||
q.AddTags(tags)
|
q.addTags(tags)
|
||||||
request.JSONResponse(w, nil, http.StatusOK)
|
request.JSONResponse(w, nil, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,80 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/service"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ImportRequest struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
Magnet *utils.Magnet `json:"magnet"`
|
|
||||||
Arr *arr.Arr `json:"arr"`
|
|
||||||
IsSymlink bool `json:"isSymlink"`
|
|
||||||
SeriesId int `json:"series"`
|
|
||||||
Seasons []int `json:"seasons"`
|
|
||||||
Episodes []string `json:"episodes"`
|
|
||||||
DownloadUncached bool `json:"downloadUncached"`
|
|
||||||
|
|
||||||
Failed bool `json:"failed"`
|
|
||||||
FailedAt time.Time `json:"failedAt"`
|
|
||||||
Reason string `json:"reason"`
|
|
||||||
Completed bool `json:"completed"`
|
|
||||||
CompletedAt time.Time `json:"completedAt"`
|
|
||||||
Async bool `json:"async"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ManualImportResponseSchema struct {
|
|
||||||
Priority string `json:"priority"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
Result string `json:"result"`
|
|
||||||
Queued time.Time `json:"queued"`
|
|
||||||
Trigger string `json:"trigger"`
|
|
||||||
SendUpdatesToClient bool `json:"sendUpdatesToClient"`
|
|
||||||
UpdateScheduledTask bool `json:"updateScheduledTask"`
|
|
||||||
Id int `json:"id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewImportRequest(magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool) *ImportRequest {
|
|
||||||
return &ImportRequest{
|
|
||||||
ID: uuid.NewString(),
|
|
||||||
Magnet: magnet,
|
|
||||||
Arr: arr,
|
|
||||||
Failed: false,
|
|
||||||
Completed: false,
|
|
||||||
Async: false,
|
|
||||||
IsSymlink: isSymlink,
|
|
||||||
DownloadUncached: downloadUncached,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *ImportRequest) Fail(reason string) {
|
|
||||||
i.Failed = true
|
|
||||||
i.FailedAt = time.Now()
|
|
||||||
i.Reason = reason
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *ImportRequest) Complete() {
|
|
||||||
i.Completed = true
|
|
||||||
i.CompletedAt = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *ImportRequest) Process(q *QBit) (err error) {
|
|
||||||
// Use this for now.
|
|
||||||
// This sends the torrent to the arr
|
|
||||||
svc := service.GetService()
|
|
||||||
torrent := createTorrentFromMagnet(i.Magnet, i.Arr.Name, "manual")
|
|
||||||
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, i.Magnet, i.Arr, i.IsSymlink, i.DownloadUncached)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
|
||||||
q.Storage.AddOrUpdate(torrent)
|
|
||||||
go q.ProcessFiles(torrent, debridTorrent, i.Arr, i.IsSymlink)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,52 +1,38 @@
|
|||||||
package qbit
|
package qbit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cmp"
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/decypharr/internal/config"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/decypharr/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"os"
|
"github.com/sirrobot01/decypharr/pkg/store"
|
||||||
"path/filepath"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type QBit struct {
|
type QBit struct {
|
||||||
Username string `json:"username"`
|
Username string
|
||||||
Password string `json:"password"`
|
Password string
|
||||||
Port string `json:"port"`
|
DownloadFolder string
|
||||||
DownloadFolder string `json:"download_folder"`
|
Categories []string
|
||||||
Categories []string `json:"categories"`
|
storage *store.TorrentStorage
|
||||||
Storage *TorrentStorage
|
logger zerolog.Logger
|
||||||
logger zerolog.Logger
|
Tags []string
|
||||||
Tags []string
|
|
||||||
RefreshInterval int
|
|
||||||
SkipPreCache bool
|
|
||||||
|
|
||||||
downloadSemaphore chan struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func New() *QBit {
|
func New() *QBit {
|
||||||
_cfg := config.Get()
|
_cfg := config.Get()
|
||||||
cfg := _cfg.QBitTorrent
|
cfg := _cfg.QBitTorrent
|
||||||
port := cmp.Or(_cfg.Port, os.Getenv("QBIT_PORT"), "8282")
|
|
||||||
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
|
|
||||||
return &QBit{
|
return &QBit{
|
||||||
Username: cfg.Username,
|
Username: cfg.Username,
|
||||||
Password: cfg.Password,
|
Password: cfg.Password,
|
||||||
Port: port,
|
DownloadFolder: cfg.DownloadFolder,
|
||||||
DownloadFolder: cfg.DownloadFolder,
|
Categories: cfg.Categories,
|
||||||
Categories: cfg.Categories,
|
storage: store.Get().Torrents(),
|
||||||
Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")),
|
logger: logger.New("qbit"),
|
||||||
logger: logger.New("qbit"),
|
|
||||||
RefreshInterval: refreshInterval,
|
|
||||||
SkipPreCache: cfg.SkipPreCache,
|
|
||||||
downloadSemaphore: make(chan struct{}, cmp.Or(cfg.MaxDownloads, 5)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) Reset() {
|
func (q *QBit) Reset() {
|
||||||
if q.Storage != nil {
|
if q.storage != nil {
|
||||||
q.Storage.Reset()
|
q.storage.Reset()
|
||||||
}
|
}
|
||||||
q.Tags = nil
|
q.Tags = nil
|
||||||
close(q.downloadSemaphore)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,12 +7,12 @@ import (
|
|||||||
|
|
||||||
func (q *QBit) Routes() http.Handler {
|
func (q *QBit) Routes() http.Handler {
|
||||||
r := chi.NewRouter()
|
r := chi.NewRouter()
|
||||||
r.Use(q.CategoryContext)
|
r.Use(q.categoryContext)
|
||||||
r.Group(func(r chi.Router) {
|
r.Group(func(r chi.Router) {
|
||||||
r.Use(q.authContext)
|
r.Use(q.authContext)
|
||||||
r.Post("/auth/login", q.handleLogin)
|
r.Post("/auth/login", q.handleLogin)
|
||||||
r.Route("/torrents", func(r chi.Router) {
|
r.Route("/torrents", func(r chi.Router) {
|
||||||
r.Use(HashesCtx)
|
r.Use(hashesContext)
|
||||||
r.Get("/info", q.handleTorrentsInfo)
|
r.Get("/info", q.handleTorrentsInfo)
|
||||||
r.Post("/add", q.handleTorrentsAdd)
|
r.Post("/add", q.handleTorrentsAdd)
|
||||||
r.Post("/delete", q.handleTorrentsDelete)
|
r.Post("/delete", q.handleTorrentsDelete)
|
||||||
|
|||||||
@@ -1,38 +1,35 @@
|
|||||||
package qbit
|
package qbit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cmp"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/sirrobot01/decypharr/internal/request"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
|
"github.com/sirrobot01/decypharr/pkg/store"
|
||||||
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/service"
|
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// All torrent related helpers goes here
|
// All torrent-related helpers goes here
|
||||||
|
func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid string, action string) error {
|
||||||
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
|
|
||||||
magnet, err := utils.GetMagnetFromUrl(url)
|
magnet, err := utils.GetMagnetFromUrl(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error parsing magnet link: %w", err)
|
return fmt.Errorf("error parsing magnet link: %w", err)
|
||||||
}
|
}
|
||||||
err = q.Process(ctx, magnet, category)
|
_store := store.Get()
|
||||||
|
|
||||||
|
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent)
|
||||||
|
|
||||||
|
err = _store.AddTorrent(ctx, importReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to process torrent: %w", err)
|
return fmt.Errorf("failed to process torrent: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
|
func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, arr *arr.Arr, debrid string, action string) error {
|
||||||
file, _ := fileHeader.Open()
|
file, _ := fileHeader.Open()
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
var reader io.Reader = file
|
var reader io.Reader = file
|
||||||
@@ -40,226 +37,28 @@ func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
|
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
|
||||||
}
|
}
|
||||||
err = q.Process(ctx, magnet, category)
|
_store := store.Get()
|
||||||
|
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent)
|
||||||
|
err = _store.AddTorrent(ctx, importReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to process torrent: %w", err)
|
return fmt.Errorf("failed to process torrent: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error {
|
func (q *QBit) ResumeTorrent(t *store.Torrent) bool {
|
||||||
svc := service.GetService()
|
|
||||||
torrent := createTorrentFromMagnet(magnet, category, "auto")
|
|
||||||
a, ok := ctx.Value("arr").(*arr.Arr)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("arr not found in context")
|
|
||||||
}
|
|
||||||
isSymlink := ctx.Value("isSymlink").(bool)
|
|
||||||
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, magnet, a, isSymlink, false)
|
|
||||||
if err != nil || debridTorrent == nil {
|
|
||||||
if err == nil {
|
|
||||||
err = fmt.Errorf("failed to process torrent")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
|
||||||
q.Storage.AddOrUpdate(torrent)
|
|
||||||
go q.ProcessFiles(torrent, debridTorrent, a, isSymlink) // We can send async for file processing not to delay the response
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debridTypes.Torrent, arr *arr.Arr, isSymlink bool) {
|
|
||||||
svc := service.GetService()
|
|
||||||
client := svc.Debrid.GetClient(debridTorrent.Debrid)
|
|
||||||
downloadingStatuses := client.GetDownloadingStatus()
|
|
||||||
for debridTorrent.Status != "downloaded" {
|
|
||||||
q.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress)
|
|
||||||
dbT, err := client.CheckStatus(debridTorrent, isSymlink)
|
|
||||||
if err != nil {
|
|
||||||
if dbT != nil && dbT.Id != "" {
|
|
||||||
// Delete the torrent if it was not downloaded
|
|
||||||
go func() {
|
|
||||||
_ = client.DeleteTorrent(dbT.Id)
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
q.logger.Error().Msgf("Error checking status: %v", err)
|
|
||||||
q.MarkAsFailed(torrent)
|
|
||||||
go func() {
|
|
||||||
if err := arr.Refresh(); err != nil {
|
|
||||||
q.logger.Error().Msgf("Error refreshing arr: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
debridTorrent = dbT
|
|
||||||
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
|
||||||
|
|
||||||
// Exit the loop for downloading statuses to prevent memory buildup
|
|
||||||
if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if !utils.Contains(client.GetDownloadingStatus(), debridTorrent.Status) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(time.Duration(q.RefreshInterval) * time.Second)
|
|
||||||
}
|
|
||||||
var torrentSymlinkPath string
|
|
||||||
var err error
|
|
||||||
debridTorrent.Arr = arr
|
|
||||||
|
|
||||||
// Check if debrid supports webdav by checking cache
|
|
||||||
if isSymlink {
|
|
||||||
timer := time.Now()
|
|
||||||
cache, useWebdav := svc.Debrid.Caches[debridTorrent.Debrid]
|
|
||||||
if useWebdav {
|
|
||||||
q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
|
|
||||||
|
|
||||||
// Use webdav to download the file
|
|
||||||
|
|
||||||
if err := cache.AddTorrent(debridTorrent); err != nil {
|
|
||||||
q.logger.Error().Msgf("Error adding torrent to cache: %v", err)
|
|
||||||
q.MarkAsFailed(torrent)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
|
|
||||||
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
|
|
||||||
torrentSymlinkPath, err = q.createSymlinksWebdav(debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// User is using either zurg or debrid webdav
|
|
||||||
torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/
|
|
||||||
}
|
|
||||||
q.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer))
|
|
||||||
} else {
|
|
||||||
torrentSymlinkPath, err = q.ProcessManualFile(torrent)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
q.MarkAsFailed(torrent)
|
|
||||||
go func() {
|
|
||||||
_ = client.DeleteTorrent(debridTorrent.Id)
|
|
||||||
}()
|
|
||||||
q.logger.Info().Msgf("Error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
torrent.TorrentPath = torrentSymlinkPath
|
|
||||||
q.UpdateTorrent(torrent, debridTorrent)
|
|
||||||
go func() {
|
|
||||||
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
|
|
||||||
q.logger.Error().Msgf("Error sending discord message: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if err := arr.Refresh(); err != nil {
|
|
||||||
q.logger.Error().Msgf("Error refreshing arr: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
|
|
||||||
t.State = "error"
|
|
||||||
q.Storage.AddOrUpdate(t)
|
|
||||||
go func() {
|
|
||||||
if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil {
|
|
||||||
q.logger.Error().Msgf("Error sending discord message: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent {
|
|
||||||
if debridTorrent == nil {
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
|
|
||||||
if err != nil {
|
|
||||||
addedOn = time.Now()
|
|
||||||
}
|
|
||||||
totalSize := debridTorrent.Bytes
|
|
||||||
progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0
|
|
||||||
sizeCompleted := int64(float64(totalSize) * progress)
|
|
||||||
|
|
||||||
var speed int64
|
|
||||||
if debridTorrent.Speed != 0 {
|
|
||||||
speed = debridTorrent.Speed
|
|
||||||
}
|
|
||||||
var eta int
|
|
||||||
if speed != 0 {
|
|
||||||
eta = int((totalSize - sizeCompleted) / speed)
|
|
||||||
}
|
|
||||||
t.ID = debridTorrent.Id
|
|
||||||
t.Name = debridTorrent.Name
|
|
||||||
t.AddedOn = addedOn.Unix()
|
|
||||||
t.DebridTorrent = debridTorrent
|
|
||||||
t.Debrid = debridTorrent.Debrid
|
|
||||||
t.Size = totalSize
|
|
||||||
t.Completed = sizeCompleted
|
|
||||||
t.Downloaded = sizeCompleted
|
|
||||||
t.DownloadedSession = sizeCompleted
|
|
||||||
t.Uploaded = sizeCompleted
|
|
||||||
t.UploadedSession = sizeCompleted
|
|
||||||
t.AmountLeft = totalSize - sizeCompleted
|
|
||||||
t.Progress = progress
|
|
||||||
t.Eta = eta
|
|
||||||
t.Dlspeed = speed
|
|
||||||
t.Upspeed = speed
|
|
||||||
t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
|
|
||||||
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent {
|
|
||||||
if debridTorrent == nil {
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
if debridClient := service.GetDebrid().GetClient(debridTorrent.Debrid); debridClient != nil {
|
|
||||||
if debridTorrent.Status != "downloaded" {
|
|
||||||
_ = debridClient.UpdateTorrent(debridTorrent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t = q.UpdateTorrentMin(t, debridTorrent)
|
|
||||||
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
|
|
||||||
|
|
||||||
if t.IsReady() {
|
|
||||||
t.State = "pausedUP"
|
|
||||||
q.Storage.Update(t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
ticker := time.NewTicker(100 * time.Millisecond)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
if t.IsReady() {
|
|
||||||
t.State = "pausedUP"
|
|
||||||
q.Storage.Update(t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
updatedT := q.UpdateTorrent(t, debridTorrent)
|
|
||||||
t = updatedT
|
|
||||||
|
|
||||||
case <-time.After(10 * time.Minute): // Add a timeout
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) ResumeTorrent(t *Torrent) bool {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) PauseTorrent(t *Torrent) bool {
|
func (q *QBit) PauseTorrent(t *store.Torrent) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) RefreshTorrent(t *Torrent) bool {
|
func (q *QBit) RefreshTorrent(t *store.Torrent) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
|
func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties {
|
||||||
return &TorrentProperties{
|
return &TorrentProperties{
|
||||||
AdditionDate: t.AddedOn,
|
AdditionDate: t.AddedOn,
|
||||||
Comment: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>",
|
Comment: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>",
|
||||||
@@ -284,21 +83,7 @@ func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
|
func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool {
|
||||||
files := make([]*TorrentFile, 0)
|
|
||||||
if t.DebridTorrent == nil {
|
|
||||||
return files
|
|
||||||
}
|
|
||||||
for _, file := range t.DebridTorrent.Files {
|
|
||||||
files = append(files, &TorrentFile{
|
|
||||||
Name: file.Path,
|
|
||||||
Size: file.Size,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return files
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool {
|
|
||||||
torrentTags := strings.Split(t.Tags, ",")
|
torrentTags := strings.Split(t.Tags, ",")
|
||||||
for _, tag := range tags {
|
for _, tag := range tags {
|
||||||
if tag == "" {
|
if tag == "" {
|
||||||
@@ -312,20 +97,20 @@ func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.Tags = strings.Join(torrentTags, ",")
|
t.Tags = strings.Join(torrentTags, ",")
|
||||||
q.Storage.Update(t)
|
q.storage.Update(t)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) RemoveTorrentTags(t *Torrent, tags []string) bool {
|
func (q *QBit) removeTorrentTags(t *store.Torrent, tags []string) bool {
|
||||||
torrentTags := strings.Split(t.Tags, ",")
|
torrentTags := strings.Split(t.Tags, ",")
|
||||||
newTorrentTags := utils.RemoveItem(torrentTags, tags...)
|
newTorrentTags := utils.RemoveItem(torrentTags, tags...)
|
||||||
q.Tags = utils.RemoveItem(q.Tags, tags...)
|
q.Tags = utils.RemoveItem(q.Tags, tags...)
|
||||||
t.Tags = strings.Join(newTorrentTags, ",")
|
t.Tags = strings.Join(newTorrentTags, ",")
|
||||||
q.Storage.Update(t)
|
q.storage.Update(t)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) AddTags(tags []string) bool {
|
func (q *QBit) addTags(tags []string) bool {
|
||||||
for _, tag := range tags {
|
for _, tag := range tags {
|
||||||
if tag == "" {
|
if tag == "" {
|
||||||
continue
|
continue
|
||||||
@@ -336,8 +121,3 @@ func (q *QBit) AddTags(tags []string) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) RemoveTags(tags []string) bool {
|
|
||||||
q.Tags = utils.RemoveItem(q.Tags, tags...)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,11 +1,5 @@
|
|||||||
package qbit
|
package qbit
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BuildInfo struct {
|
type BuildInfo struct {
|
||||||
Libtorrent string `json:"libtorrent"`
|
Libtorrent string `json:"libtorrent"`
|
||||||
Bitness int `json:"bitness"`
|
Bitness int `json:"bitness"`
|
||||||
@@ -172,76 +166,6 @@ type TorrentCategory struct {
|
|||||||
SavePath string `json:"savePath"`
|
SavePath string `json:"savePath"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Torrent struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
DebridTorrent *types.Torrent `json:"-"`
|
|
||||||
Debrid string `json:"debrid"`
|
|
||||||
TorrentPath string `json:"-"`
|
|
||||||
|
|
||||||
AddedOn int64 `json:"added_on,omitempty"`
|
|
||||||
AmountLeft int64 `json:"amount_left"`
|
|
||||||
AutoTmm bool `json:"auto_tmm"`
|
|
||||||
Availability float64 `json:"availability,omitempty"`
|
|
||||||
Category string `json:"category,omitempty"`
|
|
||||||
Completed int64 `json:"completed"`
|
|
||||||
CompletionOn int `json:"completion_on,omitempty"`
|
|
||||||
ContentPath string `json:"content_path"`
|
|
||||||
DlLimit int `json:"dl_limit"`
|
|
||||||
Dlspeed int64 `json:"dlspeed"`
|
|
||||||
Downloaded int64 `json:"downloaded"`
|
|
||||||
DownloadedSession int64 `json:"downloaded_session"`
|
|
||||||
Eta int `json:"eta"`
|
|
||||||
FlPiecePrio bool `json:"f_l_piece_prio,omitempty"`
|
|
||||||
ForceStart bool `json:"force_start,omitempty"`
|
|
||||||
Hash string `json:"hash"`
|
|
||||||
LastActivity int64 `json:"last_activity,omitempty"`
|
|
||||||
MagnetUri string `json:"magnet_uri,omitempty"`
|
|
||||||
MaxRatio int `json:"max_ratio,omitempty"`
|
|
||||||
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
NumComplete int `json:"num_complete,omitempty"`
|
|
||||||
NumIncomplete int `json:"num_incomplete,omitempty"`
|
|
||||||
NumLeechs int `json:"num_leechs,omitempty"`
|
|
||||||
NumSeeds int `json:"num_seeds,omitempty"`
|
|
||||||
Priority int `json:"priority,omitempty"`
|
|
||||||
Progress float64 `json:"progress"`
|
|
||||||
Ratio int `json:"ratio,omitempty"`
|
|
||||||
RatioLimit int `json:"ratio_limit,omitempty"`
|
|
||||||
SavePath string `json:"save_path"`
|
|
||||||
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
|
|
||||||
SeenComplete int64 `json:"seen_complete,omitempty"`
|
|
||||||
SeqDl bool `json:"seq_dl"`
|
|
||||||
Size int64 `json:"size,omitempty"`
|
|
||||||
State string `json:"state,omitempty"`
|
|
||||||
SuperSeeding bool `json:"super_seeding"`
|
|
||||||
Tags string `json:"tags,omitempty"`
|
|
||||||
TimeActive int `json:"time_active,omitempty"`
|
|
||||||
TotalSize int64 `json:"total_size,omitempty"`
|
|
||||||
Tracker string `json:"tracker,omitempty"`
|
|
||||||
UpLimit int64 `json:"up_limit,omitempty"`
|
|
||||||
Uploaded int64 `json:"uploaded,omitempty"`
|
|
||||||
UploadedSession int64 `json:"uploaded_session,omitempty"`
|
|
||||||
Upspeed int64 `json:"upspeed,omitempty"`
|
|
||||||
Source string `json:"source,omitempty"`
|
|
||||||
|
|
||||||
Mu sync.Mutex `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Torrent) IsReady() bool {
|
|
||||||
return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Torrent) discordContext() string {
|
|
||||||
format := `
|
|
||||||
**Name:** %s
|
|
||||||
**Arr:** %s
|
|
||||||
**Hash:** %s
|
|
||||||
**MagnetURI:** %s
|
|
||||||
**Debrid:** %s
|
|
||||||
`
|
|
||||||
return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid)
|
|
||||||
}
|
|
||||||
|
|
||||||
type TorrentProperties struct {
|
type TorrentProperties struct {
|
||||||
AdditionDate int64 `json:"addition_date,omitempty"`
|
AdditionDate int64 `json:"addition_date,omitempty"`
|
||||||
Comment string `json:"comment,omitempty"`
|
Comment string `json:"comment,omitempty"`
|
||||||
@@ -278,18 +202,7 @@ type TorrentProperties struct {
|
|||||||
UpSpeedAvg int `json:"up_speed_avg,omitempty"`
|
UpSpeedAvg int `json:"up_speed_avg,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TorrentFile struct {
|
func getAppPreferences() *AppPreferences {
|
||||||
Index int `json:"index,omitempty"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Size int64 `json:"size,omitempty"`
|
|
||||||
Progress int `json:"progress,omitempty"`
|
|
||||||
Priority int `json:"priority,omitempty"`
|
|
||||||
IsSeed bool `json:"is_seed,omitempty"`
|
|
||||||
PieceRange []int `json:"piece_range,omitempty"`
|
|
||||||
Availability float64 `json:"availability,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAppPreferences() *AppPreferences {
|
|
||||||
preferences := &AppPreferences{
|
preferences := &AppPreferences{
|
||||||
AddTrackers: "",
|
AddTrackers: "",
|
||||||
AddTrackersEnabled: false,
|
AddTrackersEnabled: false,
|
||||||
|
|||||||
701
pkg/rar/rarar.go
Normal file
@@ -0,0 +1,701 @@
|
|||||||
|
// Source: https://github.com/eliasbenb/RARAR.py
|
||||||
|
// Note that this code only translates the original Python for RAR3 (not RAR5) support.
|
||||||
|
|
||||||
|
package rar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Constants from the Python code
|
||||||
|
var (
|
||||||
|
// Chunk sizes
|
||||||
|
DefaultChunkSize = 4096
|
||||||
|
HttpChunkSize = 32768
|
||||||
|
MaxSearchSize = 1 << 20 // 1MB
|
||||||
|
|
||||||
|
// RAR marker and block types
|
||||||
|
Rar3Marker = []byte{0x52, 0x61, 0x72, 0x21, 0x1A, 0x07, 0x00}
|
||||||
|
BlockFile = byte(0x74)
|
||||||
|
BlockHeader = byte(0x73)
|
||||||
|
BlockMarker = byte(0x72)
|
||||||
|
BlockEnd = byte(0x7B)
|
||||||
|
|
||||||
|
// Header flags
|
||||||
|
FlagDirectory = 0xE0
|
||||||
|
FlagHasHighSize = 0x100
|
||||||
|
FlagHasUnicodeName = 0x200
|
||||||
|
FlagHasData = 0x8000
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compression methods
|
||||||
|
var CompressionMethods = map[byte]string{
|
||||||
|
0x30: "Store",
|
||||||
|
0x31: "Fastest",
|
||||||
|
0x32: "Fast",
|
||||||
|
0x33: "Normal",
|
||||||
|
0x34: "Good",
|
||||||
|
0x35: "Best",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error definitions
|
||||||
|
var (
|
||||||
|
ErrMarkerNotFound = errors.New("RAR marker not found within search limit")
|
||||||
|
ErrInvalidFormat = errors.New("invalid RAR format")
|
||||||
|
ErrNetworkError = errors.New("network error")
|
||||||
|
ErrRangeRequestsNotSupported = errors.New("server does not support range requests")
|
||||||
|
ErrCompressionNotSupported = errors.New("compression method not supported")
|
||||||
|
ErrDirectoryExtractNotSupported = errors.New("directory extract not supported")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Name returns the base filename of the file
|
||||||
|
func (f *File) Name() string {
|
||||||
|
if i := strings.LastIndexAny(f.Path, "\\/"); i >= 0 {
|
||||||
|
return f.Path[i+1:]
|
||||||
|
}
|
||||||
|
return f.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) ByteRange() *[2]int64 {
|
||||||
|
return &[2]int64{f.DataOffset, f.DataOffset + f.CompressedSize - 1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHttpFile(url string) (*HttpFile, error) {
|
||||||
|
client := &http.Client{}
|
||||||
|
file := &HttpFile{
|
||||||
|
URL: url,
|
||||||
|
Position: 0,
|
||||||
|
Client: client,
|
||||||
|
MaxRetries: 3,
|
||||||
|
RetryDelay: time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get file size
|
||||||
|
size, err := file.getFileSize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get file size: %w", err)
|
||||||
|
}
|
||||||
|
file.FileSize = size
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HttpFile) doWithRetry(operation func() (interface{}, error)) (interface{}, error) {
|
||||||
|
var lastErr error
|
||||||
|
for attempt := 0; attempt <= f.MaxRetries; attempt++ {
|
||||||
|
if attempt > 0 {
|
||||||
|
// Jitter + exponential backoff delay
|
||||||
|
delay := f.RetryDelay * time.Duration(1<<uint(attempt-1))
|
||||||
|
jitter := time.Duration(rand.Int63n(int64(delay / 4)))
|
||||||
|
time.Sleep(delay + jitter)
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := operation()
|
||||||
|
if err == nil {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lastErr = err
|
||||||
|
// Only retry on network errors
|
||||||
|
if !errors.Is(err, ErrNetworkError) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("after %d retries: %w", f.MaxRetries, lastErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFileSize gets the total file size from the server
|
||||||
|
func (f *HttpFile) getFileSize() (int64, error) {
|
||||||
|
result, err := f.doWithRetry(func() (interface{}, error) {
|
||||||
|
resp, err := f.Client.Head(f.URL)
|
||||||
|
if err != nil {
|
||||||
|
return int64(0), fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return int64(0), fmt.Errorf("%w: unexpected status code: %d", ErrNetworkError, resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
contentLength := resp.Header.Get("Content-Length")
|
||||||
|
if contentLength == "" {
|
||||||
|
return int64(0), fmt.Errorf("%w: content length not provided", ErrNetworkError)
|
||||||
|
}
|
||||||
|
|
||||||
|
var size int64
|
||||||
|
_, err = fmt.Sscanf(contentLength, "%d", &size)
|
||||||
|
if err != nil {
|
||||||
|
return int64(0), fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return size, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.(int64), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAt implements the io.ReaderAt interface
|
||||||
|
func (f *HttpFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
|
if len(p) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure we don't read past the end of the file
|
||||||
|
size := int64(len(p))
|
||||||
|
if f.FileSize > 0 {
|
||||||
|
remaining := f.FileSize - off
|
||||||
|
if remaining <= 0 {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
if size > remaining {
|
||||||
|
size = remaining
|
||||||
|
p = p[:size]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := f.doWithRetry(func() (interface{}, error) {
|
||||||
|
// Create HTTP request with Range header
|
||||||
|
req, err := http.NewRequest("GET", f.URL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
end := off + size - 1
|
||||||
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", off, end))
|
||||||
|
|
||||||
|
// Make the request
|
||||||
|
resp, err := f.Client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Handle response
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case http.StatusPartialContent:
|
||||||
|
// Read the content
|
||||||
|
bytesRead, err := io.ReadFull(resp.Body, p)
|
||||||
|
return bytesRead, err
|
||||||
|
case http.StatusOK:
|
||||||
|
// Some servers return the full content instead of partial
|
||||||
|
fullData, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if int64(len(fullData)) <= off {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
end = off + size
|
||||||
|
if int64(len(fullData)) < end {
|
||||||
|
end = int64(len(fullData))
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(p, fullData[off:end])
|
||||||
|
return int(end - off), nil
|
||||||
|
case http.StatusRequestedRangeNotSatisfiable:
|
||||||
|
// We're at EOF
|
||||||
|
return 0, io.EOF
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("%w: unexpected status code: %d", ErrNetworkError, resp.StatusCode)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.(int), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader creates a new RAR3 reader
|
||||||
|
func NewReader(url string) (*Reader, error) {
|
||||||
|
file, err := NewHttpFile(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := &Reader{
|
||||||
|
File: file,
|
||||||
|
ChunkSize: HttpChunkSize,
|
||||||
|
Files: make([]*File, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find RAR marker
|
||||||
|
marker, err := reader.findMarker()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
reader.Marker = marker
|
||||||
|
pos := reader.Marker + int64(len(Rar3Marker)) // Skip marker block
|
||||||
|
|
||||||
|
headerData, err := reader.readBytes(pos, 7)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(headerData) < 7 {
|
||||||
|
return nil, ErrInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
headType := headerData[2]
|
||||||
|
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
|
||||||
|
|
||||||
|
if headType != BlockHeader {
|
||||||
|
return nil, ErrInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the position after the archive header
|
||||||
|
reader.HeaderEndPos = pos + int64(headSize)
|
||||||
|
|
||||||
|
return reader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readBytes reads a range of bytes from the file
|
||||||
|
func (r *Reader) readBytes(start int64, length int) ([]byte, error) {
|
||||||
|
if length <= 0 {
|
||||||
|
return []byte{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make([]byte, length)
|
||||||
|
n, err := r.File.ReadAt(data, start)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if n < length {
|
||||||
|
// Partial read, return what we got
|
||||||
|
return data[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findMarker finds the RAR marker in the file
|
||||||
|
func (r *Reader) findMarker() (int64, error) {
|
||||||
|
// First try to find marker in the first chunk
|
||||||
|
firstChunkSize := 8192 // 8KB
|
||||||
|
chunk, err := r.readBytes(0, firstChunkSize)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
markerPos := bytes.Index(chunk, Rar3Marker)
|
||||||
|
if markerPos != -1 {
|
||||||
|
return int64(markerPos), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not found, continue searching
|
||||||
|
position := int64(firstChunkSize - len(Rar3Marker) + 1)
|
||||||
|
maxSearch := int64(MaxSearchSize)
|
||||||
|
|
||||||
|
for position < maxSearch {
|
||||||
|
chunkSize := min(r.ChunkSize, int(maxSearch-position))
|
||||||
|
chunk, err := r.readBytes(position, chunkSize)
|
||||||
|
if err != nil || len(chunk) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
markerPos := bytes.Index(chunk, Rar3Marker)
|
||||||
|
if markerPos != -1 {
|
||||||
|
return position + int64(markerPos), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move forward by chunk size minus the marker length
|
||||||
|
position += int64(max(1, len(chunk)-len(Rar3Marker)+1))
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, ErrMarkerNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeUnicode decodes RAR3 Unicode encoding
|
||||||
|
func decodeUnicode(asciiStr string, unicodeData []byte) string {
|
||||||
|
if len(unicodeData) == 0 {
|
||||||
|
return asciiStr
|
||||||
|
}
|
||||||
|
|
||||||
|
result := []rune{}
|
||||||
|
asciiPos := 0
|
||||||
|
dataPos := 0
|
||||||
|
highByte := byte(0)
|
||||||
|
|
||||||
|
for dataPos < len(unicodeData) {
|
||||||
|
flags := unicodeData[dataPos]
|
||||||
|
dataPos++
|
||||||
|
|
||||||
|
// Determine the number of character positions this flag byte controls
|
||||||
|
var flagBits uint
|
||||||
|
var flagCount int
|
||||||
|
var bitCount int
|
||||||
|
|
||||||
|
if flags&0x80 != 0 {
|
||||||
|
// Extended flag - controls up to 32 characters (16 bit pairs)
|
||||||
|
flagBits = uint(flags)
|
||||||
|
bitCount = 1
|
||||||
|
for (flagBits&(0x80>>bitCount) != 0) && dataPos < len(unicodeData) {
|
||||||
|
flagBits = ((flagBits & ((0x80 >> bitCount) - 1)) << 8) | uint(unicodeData[dataPos])
|
||||||
|
dataPos++
|
||||||
|
bitCount++
|
||||||
|
}
|
||||||
|
flagCount = bitCount * 4
|
||||||
|
} else {
|
||||||
|
// Simple flag - controls 4 characters (4 bit pairs)
|
||||||
|
flagBits = uint(flags)
|
||||||
|
flagCount = 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process each 2-bit flag
|
||||||
|
for i := 0; i < flagCount; i++ {
|
||||||
|
if asciiPos >= len(asciiStr) && dataPos >= len(unicodeData) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
flagValue := (flagBits >> (i * 2)) & 0x03
|
||||||
|
|
||||||
|
switch flagValue {
|
||||||
|
case 0:
|
||||||
|
// Use ASCII character
|
||||||
|
if asciiPos < len(asciiStr) {
|
||||||
|
result = append(result, rune(asciiStr[asciiPos]))
|
||||||
|
asciiPos++
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
// Unicode character with high byte 0
|
||||||
|
if dataPos < len(unicodeData) {
|
||||||
|
result = append(result, rune(unicodeData[dataPos]))
|
||||||
|
dataPos++
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
// Unicode character with current high byte
|
||||||
|
if dataPos < len(unicodeData) {
|
||||||
|
lowByte := uint(unicodeData[dataPos])
|
||||||
|
dataPos++
|
||||||
|
result = append(result, rune(lowByte|(uint(highByte)<<8)))
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
// Set new high byte
|
||||||
|
if dataPos < len(unicodeData) {
|
||||||
|
highByte = unicodeData[dataPos]
|
||||||
|
dataPos++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append any remaining ASCII characters
|
||||||
|
for asciiPos < len(asciiStr) {
|
||||||
|
result = append(result, rune(asciiStr[asciiPos]))
|
||||||
|
asciiPos++
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readFiles reads all file entries in the archive
|
||||||
|
func (r *Reader) readFiles() error {
|
||||||
|
pos := r.Marker
|
||||||
|
pos += int64(len(Rar3Marker)) // Skip marker block
|
||||||
|
|
||||||
|
// Read archive header
|
||||||
|
headerData, err := r.readBytes(pos, 7)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(headerData) < 7 {
|
||||||
|
return ErrInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
headType := headerData[2]
|
||||||
|
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
|
||||||
|
|
||||||
|
if headType != BlockHeader {
|
||||||
|
return ErrInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
pos += int64(headSize) // Skip archive header
|
||||||
|
|
||||||
|
// Track whether we've found the end marker
|
||||||
|
foundEndMarker := false
|
||||||
|
|
||||||
|
// Process file entries
|
||||||
|
for !foundEndMarker {
|
||||||
|
headerData, err := r.readBytes(pos, 7)
|
||||||
|
if err != nil {
|
||||||
|
// Don't stop on EOF, might be temporary network error
|
||||||
|
// For definitive errors, return the error
|
||||||
|
if !errors.Is(err, io.EOF) && !errors.Is(err, ErrNetworkError) {
|
||||||
|
return fmt.Errorf("error reading block header: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we get EOF or network error, retry a few times
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
retryDelay := time.Second
|
||||||
|
|
||||||
|
for retryCount < maxRetries {
|
||||||
|
time.Sleep(retryDelay * time.Duration(1<<uint(retryCount)))
|
||||||
|
retryCount++
|
||||||
|
|
||||||
|
headerData, err = r.readBytes(pos, 7)
|
||||||
|
if err == nil && len(headerData) >= 7 {
|
||||||
|
break // Successfully got data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(headerData) < 7 {
|
||||||
|
return fmt.Errorf("failed to read block header after retries: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(headerData) < 7 {
|
||||||
|
return fmt.Errorf("incomplete block header at position %d", pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
headType := headerData[2]
|
||||||
|
headFlags := int(binary.LittleEndian.Uint16(headerData[3:5]))
|
||||||
|
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
|
||||||
|
|
||||||
|
if headType == BlockEnd {
|
||||||
|
// End of archive
|
||||||
|
foundEndMarker = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if headType == BlockFile {
|
||||||
|
// Get complete header data
|
||||||
|
completeHeader, err := r.readBytes(pos, headSize)
|
||||||
|
if err != nil || len(completeHeader) < headSize {
|
||||||
|
// Retry logic for incomplete headers
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
retryDelay := time.Second
|
||||||
|
|
||||||
|
for retryCount < maxRetries && (err != nil || len(completeHeader) < headSize) {
|
||||||
|
time.Sleep(retryDelay * time.Duration(1<<uint(retryCount)))
|
||||||
|
retryCount++
|
||||||
|
|
||||||
|
completeHeader, err = r.readBytes(pos, headSize)
|
||||||
|
if err == nil && len(completeHeader) >= headSize {
|
||||||
|
break // Successfully got data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(completeHeader) < headSize {
|
||||||
|
return fmt.Errorf("failed to read complete file header after retries: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fileInfo, err := r.parseFileHeader(completeHeader, pos)
|
||||||
|
if err == nil && fileInfo != nil {
|
||||||
|
r.Files = append(r.Files, fileInfo)
|
||||||
|
pos = fileInfo.NextOffset
|
||||||
|
} else {
|
||||||
|
pos += int64(headSize)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Skip non-file block
|
||||||
|
pos += int64(headSize)
|
||||||
|
|
||||||
|
// Skip data if present
|
||||||
|
if headFlags&FlagHasData != 0 {
|
||||||
|
// Read data size
|
||||||
|
sizeData, err := r.readBytes(pos-4, 4)
|
||||||
|
if err != nil || len(sizeData) < 4 {
|
||||||
|
// Retry logic for data size read errors
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
retryDelay := time.Second
|
||||||
|
|
||||||
|
for retryCount < maxRetries && (err != nil || len(sizeData) < 4) {
|
||||||
|
time.Sleep(retryDelay * time.Duration(1<<uint(retryCount)))
|
||||||
|
retryCount++
|
||||||
|
|
||||||
|
sizeData, err = r.readBytes(pos-4, 4)
|
||||||
|
if err == nil && len(sizeData) >= 4 {
|
||||||
|
break // Successfully got data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sizeData) < 4 {
|
||||||
|
return fmt.Errorf("failed to read data size after retries: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dataSize := int64(binary.LittleEndian.Uint32(sizeData))
|
||||||
|
pos += dataSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !foundEndMarker {
|
||||||
|
return fmt.Errorf("end marker not found in archive")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseFileHeader parses a file header and returns file info
|
||||||
|
func (r *Reader) parseFileHeader(headerData []byte, position int64) (*File, error) {
|
||||||
|
if len(headerData) < 7 {
|
||||||
|
return nil, fmt.Errorf("header data too short")
|
||||||
|
}
|
||||||
|
|
||||||
|
headType := headerData[2]
|
||||||
|
headFlags := int(binary.LittleEndian.Uint16(headerData[3:5]))
|
||||||
|
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
|
||||||
|
|
||||||
|
if headType != BlockFile {
|
||||||
|
return nil, fmt.Errorf("not a file block")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we have enough data
|
||||||
|
if len(headerData) < 32 {
|
||||||
|
return nil, fmt.Errorf("file header too short")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse basic file header fields
|
||||||
|
packSize := binary.LittleEndian.Uint32(headerData[7:11])
|
||||||
|
unpackSize := binary.LittleEndian.Uint32(headerData[11:15])
|
||||||
|
// fileOS := headerData[15]
|
||||||
|
fileCRC := binary.LittleEndian.Uint32(headerData[16:20])
|
||||||
|
// fileTime := binary.LittleEndian.Uint32(headerData[20:24])
|
||||||
|
// unpVer := headerData[24]
|
||||||
|
method := headerData[25]
|
||||||
|
nameSize := binary.LittleEndian.Uint16(headerData[26:28])
|
||||||
|
// fileAttr := binary.LittleEndian.Uint32(headerData[28:32])
|
||||||
|
|
||||||
|
// Handle high pack/unp sizes
|
||||||
|
highPackSize := uint32(0)
|
||||||
|
highUnpSize := uint32(0)
|
||||||
|
|
||||||
|
offset := 32 // Start after basic header fields
|
||||||
|
|
||||||
|
if headFlags&FlagHasHighSize != 0 {
|
||||||
|
if offset+8 <= len(headerData) {
|
||||||
|
highPackSize = binary.LittleEndian.Uint32(headerData[offset : offset+4])
|
||||||
|
highUnpSize = binary.LittleEndian.Uint32(headerData[offset+4 : offset+8])
|
||||||
|
}
|
||||||
|
offset += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate actual sizes
|
||||||
|
fullPackSize := int64(packSize) + (int64(highPackSize) << 32)
|
||||||
|
fullUnpSize := int64(unpackSize) + (int64(highUnpSize) << 32)
|
||||||
|
|
||||||
|
// Read filename
|
||||||
|
var fileName string
|
||||||
|
if offset+int(nameSize) <= len(headerData) {
|
||||||
|
fileNameBytes := headerData[offset : offset+int(nameSize)]
|
||||||
|
|
||||||
|
if headFlags&FlagHasUnicodeName != 0 {
|
||||||
|
zeroPos := bytes.IndexByte(fileNameBytes, 0)
|
||||||
|
if zeroPos != -1 {
|
||||||
|
// Try UTF-8 first
|
||||||
|
asciiPart := fileNameBytes[:zeroPos]
|
||||||
|
if utf8.Valid(asciiPart) {
|
||||||
|
fileName = string(asciiPart)
|
||||||
|
} else {
|
||||||
|
// Fall back to custom decoder
|
||||||
|
asciiStr := string(asciiPart)
|
||||||
|
unicodePart := fileNameBytes[zeroPos+1:]
|
||||||
|
fileName = decodeUnicode(asciiStr, unicodePart)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No null byte
|
||||||
|
if utf8.Valid(fileNameBytes) {
|
||||||
|
fileName = string(fileNameBytes)
|
||||||
|
} else {
|
||||||
|
fileName = string(fileNameBytes) // Last resort
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Non-Unicode filename
|
||||||
|
if utf8.Valid(fileNameBytes) {
|
||||||
|
fileName = string(fileNameBytes)
|
||||||
|
} else {
|
||||||
|
fileName = string(fileNameBytes) // Fallback
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fileName = fmt.Sprintf("UnknownFile%d", len(r.Files))
|
||||||
|
}
|
||||||
|
|
||||||
|
isDirectory := (headFlags & FlagDirectory) == FlagDirectory
|
||||||
|
|
||||||
|
// Calculate data offsets
|
||||||
|
dataOffset := position + int64(headSize)
|
||||||
|
nextOffset := dataOffset
|
||||||
|
|
||||||
|
// Only add data size if it's not a directory and has data
|
||||||
|
if !isDirectory && headFlags&FlagHasData != 0 {
|
||||||
|
nextOffset += fullPackSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return &File{
|
||||||
|
Path: fileName,
|
||||||
|
Size: fullUnpSize,
|
||||||
|
CompressedSize: fullPackSize,
|
||||||
|
Method: method,
|
||||||
|
CRC: fileCRC,
|
||||||
|
IsDirectory: isDirectory,
|
||||||
|
DataOffset: dataOffset,
|
||||||
|
NextOffset: nextOffset,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFiles returns all files in the archive
|
||||||
|
func (r *Reader) GetFiles() ([]*File, error) {
|
||||||
|
if len(r.Files) == 0 {
|
||||||
|
err := r.readFiles()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.Files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtractFile extracts a file from the archive
|
||||||
|
func (r *Reader) ExtractFile(file *File) ([]byte, error) {
|
||||||
|
if file.IsDirectory {
|
||||||
|
return nil, ErrDirectoryExtractNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only support "Store" method
|
||||||
|
if file.Method != 0x30 { // 0x30 = "Store"
|
||||||
|
return nil, ErrCompressionNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.readBytes(file.DataOffset, int(file.CompressedSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func max(a, b int) int {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
37
pkg/rar/types.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package rar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// File represents a file entry in a RAR archive
|
||||||
|
type File struct {
|
||||||
|
Path string
|
||||||
|
Size int64
|
||||||
|
CompressedSize int64
|
||||||
|
Method byte
|
||||||
|
CRC uint32
|
||||||
|
IsDirectory bool
|
||||||
|
DataOffset int64
|
||||||
|
NextOffset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Access point for a RAR archive served through HTTP
|
||||||
|
type HttpFile struct {
|
||||||
|
URL string
|
||||||
|
Position int64
|
||||||
|
Client *http.Client
|
||||||
|
FileSize int64
|
||||||
|
MaxRetries int
|
||||||
|
RetryDelay time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reader reads RAR3 format archives
|
||||||
|
type Reader struct {
|
||||||
|
File *HttpFile
|
||||||
|
ChunkSize int
|
||||||
|
Marker int64
|
||||||
|
HeaderEndPos int64 // Position after the archive header
|
||||||
|
Files []*File
|
||||||
|
}
|
||||||
413
pkg/rclone/client.go
Normal file
@@ -0,0 +1,413 @@
|
|||||||
|
package rclone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mount creates a mount using the rclone RC API with retry logic
|
||||||
|
func (m *Manager) Mount(provider, webdavURL string) error {
|
||||||
|
return m.mountWithRetry(provider, webdavURL, 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mountWithRetry attempts to mount with retry logic
|
||||||
|
func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) error {
|
||||||
|
if !m.IsReady() {
|
||||||
|
if err := m.WaitForReady(30 * time.Second); err != nil {
|
||||||
|
return fmt.Errorf("rclone RC server not ready: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for attempt := 0; attempt <= maxRetries; attempt++ {
|
||||||
|
if attempt > 0 {
|
||||||
|
// Wait before retry
|
||||||
|
wait := time.Duration(attempt*2) * time.Second
|
||||||
|
m.logger.Debug().
|
||||||
|
Int("attempt", attempt).
|
||||||
|
Str("provider", provider).
|
||||||
|
Msg("Retrying mount operation")
|
||||||
|
time.Sleep(wait)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.performMount(provider, webdavURL); err != nil {
|
||||||
|
m.logger.Error().
|
||||||
|
Err(err).
|
||||||
|
Str("provider", provider).
|
||||||
|
Int("attempt", attempt+1).
|
||||||
|
Msg("Mount attempt failed")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil // Success
|
||||||
|
}
|
||||||
|
return fmt.Errorf("mount failed for %s", provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
// performMount performs a single mount attempt
|
||||||
|
func (m *Manager) performMount(provider, webdavURL string) error {
|
||||||
|
cfg := config.Get()
|
||||||
|
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
|
||||||
|
cacheDir := ""
|
||||||
|
if cfg.Rclone.CacheDir != "" {
|
||||||
|
cacheDir = filepath.Join(cfg.Rclone.CacheDir, provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create mount directory
|
||||||
|
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create mount directory %s: %w", mountPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if already mounted
|
||||||
|
m.mountsMutex.RLock()
|
||||||
|
existingMount, exists := m.mounts[provider]
|
||||||
|
m.mountsMutex.RUnlock()
|
||||||
|
|
||||||
|
if exists && existingMount.Mounted {
|
||||||
|
m.logger.Info().Str("provider", provider).Str("path", mountPath).Msg("Already mounted")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up any stale mount first
|
||||||
|
if exists && !existingMount.Mounted {
|
||||||
|
m.forceUnmountPath(mountPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create rclone config for this provider
|
||||||
|
configName := fmt.Sprintf("decypharr-%s", provider)
|
||||||
|
if err := m.createConfig(configName, webdavURL); err != nil {
|
||||||
|
return fmt.Errorf("failed to create rclone config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare mount arguments
|
||||||
|
mountArgs := map[string]interface{}{
|
||||||
|
"fs": fmt.Sprintf("%s:", configName),
|
||||||
|
"mountPoint": mountPath,
|
||||||
|
"mountType": "mount", // Use standard FUSE mount
|
||||||
|
"mountOpt": map[string]interface{}{
|
||||||
|
"AllowNonEmpty": true,
|
||||||
|
"AllowOther": true,
|
||||||
|
"DebugFUSE": false,
|
||||||
|
"DeviceName": fmt.Sprintf("decypharr-%s", provider),
|
||||||
|
"VolumeName": fmt.Sprintf("decypharr-%s", provider),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
configOpts := make(map[string]interface{})
|
||||||
|
|
||||||
|
if cfg.Rclone.BufferSize != "" {
|
||||||
|
configOpts["BufferSize"] = cfg.Rclone.BufferSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if cacheDir != "" {
|
||||||
|
// Create cache directory if specified
|
||||||
|
if err := os.MkdirAll(cacheDir, 0755); err != nil {
|
||||||
|
m.logger.Warn().Str("cacheDir", cacheDir).Msg("Failed to create cache directory")
|
||||||
|
} else {
|
||||||
|
configOpts["CacheDir"] = cacheDir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(configOpts) > 0 {
|
||||||
|
// Only add _config if there are options to set
|
||||||
|
mountArgs["_config"] = configOpts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add VFS options if caching is enabled
|
||||||
|
if cfg.Rclone.VfsCacheMode != "off" {
|
||||||
|
vfsOpt := map[string]interface{}{
|
||||||
|
"CacheMode": cfg.Rclone.VfsCacheMode,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Rclone.VfsCacheMaxAge != "" {
|
||||||
|
vfsOpt["CacheMaxAge"] = cfg.Rclone.VfsCacheMaxAge
|
||||||
|
}
|
||||||
|
if cfg.Rclone.VfsCacheMaxSize != "" {
|
||||||
|
vfsOpt["CacheMaxSize"] = cfg.Rclone.VfsCacheMaxSize
|
||||||
|
}
|
||||||
|
if cfg.Rclone.VfsCachePollInterval != "" {
|
||||||
|
vfsOpt["CachePollInterval"] = cfg.Rclone.VfsCachePollInterval
|
||||||
|
}
|
||||||
|
if cfg.Rclone.VfsReadChunkSize != "" {
|
||||||
|
vfsOpt["ChunkSize"] = cfg.Rclone.VfsReadChunkSize
|
||||||
|
}
|
||||||
|
if cfg.Rclone.VfsReadAhead != "" {
|
||||||
|
vfsOpt["ReadAhead"] = cfg.Rclone.VfsReadAhead
|
||||||
|
}
|
||||||
|
if cfg.Rclone.NoChecksum {
|
||||||
|
vfsOpt["NoChecksum"] = cfg.Rclone.NoChecksum
|
||||||
|
}
|
||||||
|
if cfg.Rclone.NoModTime {
|
||||||
|
vfsOpt["NoModTime"] = cfg.Rclone.NoModTime
|
||||||
|
}
|
||||||
|
|
||||||
|
mountArgs["vfsOpt"] = vfsOpt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add mount options based on configuration
|
||||||
|
if cfg.Rclone.UID != 0 {
|
||||||
|
mountArgs["mountOpt"].(map[string]interface{})["UID"] = cfg.Rclone.UID
|
||||||
|
}
|
||||||
|
if cfg.Rclone.GID != 0 {
|
||||||
|
mountArgs["mountOpt"].(map[string]interface{})["GID"] = cfg.Rclone.GID
|
||||||
|
}
|
||||||
|
if cfg.Rclone.AttrTimeout != "" {
|
||||||
|
if attrTimeout, err := time.ParseDuration(cfg.Rclone.AttrTimeout); err == nil {
|
||||||
|
mountArgs["mountOpt"].(map[string]interface{})["AttrTimeout"] = attrTimeout.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Make the mount request
|
||||||
|
req := RCRequest{
|
||||||
|
Command: "mount/mount",
|
||||||
|
Args: mountArgs,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := m.makeRequest(req, true)
|
||||||
|
if err != nil {
|
||||||
|
// Clean up mount point on failure
|
||||||
|
m.forceUnmountPath(mountPath)
|
||||||
|
return fmt.Errorf("failed to create mount for %s: %w", provider, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store mount info
|
||||||
|
mountInfo := &MountInfo{
|
||||||
|
Provider: provider,
|
||||||
|
LocalPath: mountPath,
|
||||||
|
WebDAVURL: webdavURL,
|
||||||
|
Mounted: true,
|
||||||
|
MountedAt: time.Now().Format(time.RFC3339),
|
||||||
|
ConfigName: configName,
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mountsMutex.Lock()
|
||||||
|
m.mounts[provider] = mountInfo
|
||||||
|
m.mountsMutex.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmount unmounts a specific provider
|
||||||
|
func (m *Manager) Unmount(provider string) error {
|
||||||
|
return m.unmount(provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmount is the internal unmount function
|
||||||
|
func (m *Manager) unmount(provider string) error {
|
||||||
|
m.mountsMutex.RLock()
|
||||||
|
mountInfo, exists := m.mounts[provider]
|
||||||
|
m.mountsMutex.RUnlock()
|
||||||
|
|
||||||
|
if !exists || !mountInfo.Mounted {
|
||||||
|
m.logger.Info().Str("provider", provider).Msg("Mount not found or already unmounted")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
m.logger.Info().Str("provider", provider).Str("path", mountInfo.LocalPath).Msg("Unmounting")
|
||||||
|
|
||||||
|
// Try RC unmount first
|
||||||
|
req := RCRequest{
|
||||||
|
Command: "mount/unmount",
|
||||||
|
Args: map[string]interface{}{
|
||||||
|
"mountPoint": mountInfo.LocalPath,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var rcErr error
|
||||||
|
if m.IsReady() {
|
||||||
|
_, rcErr = m.makeRequest(req, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If RC unmount fails or server is not ready, try force unmount
|
||||||
|
if rcErr != nil {
|
||||||
|
m.logger.Warn().Err(rcErr).Str("provider", provider).Msg("RC unmount failed, trying force unmount")
|
||||||
|
if err := m.forceUnmountPath(mountInfo.LocalPath); err != nil {
|
||||||
|
m.logger.Error().Err(err).Str("provider", provider).Msg("Force unmount failed")
|
||||||
|
// Don't return error here, update the state anyway
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update mount info
|
||||||
|
m.mountsMutex.Lock()
|
||||||
|
if info, exists := m.mounts[provider]; exists {
|
||||||
|
info.Mounted = false
|
||||||
|
info.Error = ""
|
||||||
|
if rcErr != nil {
|
||||||
|
info.Error = rcErr.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.mountsMutex.Unlock()
|
||||||
|
|
||||||
|
m.logger.Info().Str("provider", provider).Msg("Unmount completed")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmountAll unmounts all mounts
|
||||||
|
func (m *Manager) UnmountAll() error {
|
||||||
|
m.mountsMutex.RLock()
|
||||||
|
providers := make([]string, 0, len(m.mounts))
|
||||||
|
for provider, mount := range m.mounts {
|
||||||
|
if mount.Mounted {
|
||||||
|
providers = append(providers, provider)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.mountsMutex.RUnlock()
|
||||||
|
|
||||||
|
var lastError error
|
||||||
|
for _, provider := range providers {
|
||||||
|
if err := m.unmount(provider); err != nil {
|
||||||
|
lastError = err
|
||||||
|
m.logger.Error().Err(err).Str("provider", provider).Msg("Failed to unmount")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return lastError
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMountInfo returns information about a specific mount
|
||||||
|
func (m *Manager) GetMountInfo(provider string) (*MountInfo, bool) {
|
||||||
|
m.mountsMutex.RLock()
|
||||||
|
defer m.mountsMutex.RUnlock()
|
||||||
|
|
||||||
|
info, exists := m.mounts[provider]
|
||||||
|
if !exists {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a copy to avoid race conditions
|
||||||
|
mountInfo := *info
|
||||||
|
return &mountInfo, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllMounts returns information about all mounts
|
||||||
|
func (m *Manager) GetAllMounts() map[string]*MountInfo {
|
||||||
|
m.mountsMutex.RLock()
|
||||||
|
defer m.mountsMutex.RUnlock()
|
||||||
|
|
||||||
|
result := make(map[string]*MountInfo, len(m.mounts))
|
||||||
|
for provider, info := range m.mounts {
|
||||||
|
// Create a copy to avoid race conditions
|
||||||
|
mountInfo := *info
|
||||||
|
result[provider] = &mountInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsMounted checks if a provider is mounted
|
||||||
|
func (m *Manager) IsMounted(provider string) bool {
|
||||||
|
info, exists := m.GetMountInfo(provider)
|
||||||
|
return exists && info.Mounted
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefreshDir refreshes directories in the VFS cache
|
||||||
|
func (m *Manager) RefreshDir(provider string, dirs []string) error {
|
||||||
|
if !m.IsReady() {
|
||||||
|
return fmt.Errorf("rclone RC server not ready")
|
||||||
|
}
|
||||||
|
|
||||||
|
mountInfo, exists := m.GetMountInfo(provider)
|
||||||
|
if !exists || !mountInfo.Mounted {
|
||||||
|
return fmt.Errorf("provider %s not mounted", provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no specific directories provided, refresh root
|
||||||
|
if len(dirs) == 0 {
|
||||||
|
dirs = []string{"/"}
|
||||||
|
}
|
||||||
|
args := map[string]interface{}{
|
||||||
|
"fs": fmt.Sprintf("decypharr-%s:", provider),
|
||||||
|
}
|
||||||
|
for i, dir := range dirs {
|
||||||
|
if dir != "" {
|
||||||
|
if i == 0 {
|
||||||
|
args["dir"] = dir
|
||||||
|
} else {
|
||||||
|
args[fmt.Sprintf("dir%d", i+1)] = dir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req := RCRequest{
|
||||||
|
Command: "vfs/forget",
|
||||||
|
Args: args,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := m.makeRequest(req, true)
|
||||||
|
if err != nil {
|
||||||
|
m.logger.Error().Err(err).
|
||||||
|
Str("provider", provider).
|
||||||
|
Msg("Failed to refresh directory")
|
||||||
|
return fmt.Errorf("failed to refresh directory %s for provider %s: %w", dirs, provider, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req = RCRequest{
|
||||||
|
Command: "vfs/refresh",
|
||||||
|
Args: args,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = m.makeRequest(req, true)
|
||||||
|
if err != nil {
|
||||||
|
m.logger.Error().Err(err).
|
||||||
|
Str("provider", provider).
|
||||||
|
Msg("Failed to refresh directory")
|
||||||
|
return fmt.Errorf("failed to refresh directory %s for provider %s: %w", dirs, provider, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createConfig creates an rclone config entry for the provider
|
||||||
|
func (m *Manager) createConfig(configName, webdavURL string) error {
|
||||||
|
req := RCRequest{
|
||||||
|
Command: "config/create",
|
||||||
|
Args: map[string]interface{}{
|
||||||
|
"name": configName,
|
||||||
|
"type": "webdav",
|
||||||
|
"parameters": map[string]interface{}{
|
||||||
|
"url": webdavURL,
|
||||||
|
"vendor": "other",
|
||||||
|
"pacer_min_sleep": "0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := m.makeRequest(req, true)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create config %s: %w", configName, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// forceUnmountPath attempts to force unmount a path using system commands
|
||||||
|
func (m *Manager) forceUnmountPath(mountPath string) error {
|
||||||
|
methods := [][]string{
|
||||||
|
{"umount", mountPath},
|
||||||
|
{"umount", "-l", mountPath}, // lazy unmount
|
||||||
|
{"fusermount", "-uz", mountPath},
|
||||||
|
{"fusermount3", "-uz", mountPath},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, method := range methods {
|
||||||
|
if err := m.tryUnmountCommand(method...); err == nil {
|
||||||
|
m.logger.Info().
|
||||||
|
Strs("command", method).
|
||||||
|
Str("path", mountPath).
|
||||||
|
Msg("Successfully unmounted using system command")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("all force unmount attempts failed for %s", mountPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// tryUnmountCommand tries to run an unmount command
|
||||||
|
func (m *Manager) tryUnmountCommand(args ...string) error {
|
||||||
|
if len(args) == 0 {
|
||||||
|
return fmt.Errorf("no command provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(m.ctx, args[0], args[1:]...)
|
||||||
|
return cmd.Run()
|
||||||
|
}
|
||||||
140
pkg/rclone/health.go
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
package rclone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HealthCheck performs comprehensive health checks on the rclone system
|
||||||
|
func (m *Manager) HealthCheck() error {
|
||||||
|
if !m.serverStarted {
|
||||||
|
return fmt.Errorf("rclone RC server is not started")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !m.IsReady() {
|
||||||
|
return fmt.Errorf("rclone RC server is not ready")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we can communicate with the server
|
||||||
|
if !m.pingServer() {
|
||||||
|
return fmt.Errorf("rclone RC server is not responding")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check mounts health
|
||||||
|
m.mountsMutex.RLock()
|
||||||
|
unhealthyMounts := make([]string, 0)
|
||||||
|
for provider, mount := range m.mounts {
|
||||||
|
if mount.Mounted && !m.checkMountHealth(provider) {
|
||||||
|
unhealthyMounts = append(unhealthyMounts, provider)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.mountsMutex.RUnlock()
|
||||||
|
|
||||||
|
if len(unhealthyMounts) > 0 {
|
||||||
|
return fmt.Errorf("unhealthy mounts detected: %v", unhealthyMounts)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkMountHealth checks if a specific mount is healthy
|
||||||
|
func (m *Manager) checkMountHealth(provider string) bool {
|
||||||
|
// Try to list the root directory of the mount
|
||||||
|
req := RCRequest{
|
||||||
|
Command: "operations/list",
|
||||||
|
Args: map[string]interface{}{
|
||||||
|
"fs": fmt.Sprintf("decypharr-%s:", provider),
|
||||||
|
"remote": "/",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := m.makeRequest(req, true)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecoverMount attempts to recover a failed mount
|
||||||
|
func (m *Manager) RecoverMount(provider string) error {
|
||||||
|
m.mountsMutex.RLock()
|
||||||
|
mountInfo, exists := m.mounts[provider]
|
||||||
|
m.mountsMutex.RUnlock()
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("mount for provider %s does not exist", provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.logger.Warn().Str("provider", provider).Msg("Attempting to recover mount")
|
||||||
|
|
||||||
|
// First try to unmount cleanly
|
||||||
|
if err := m.unmount(provider); err != nil {
|
||||||
|
m.logger.Error().Err(err).Str("provider", provider).Msg("Failed to unmount during recovery")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait a moment
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
// Try to remount
|
||||||
|
if err := m.Mount(provider, mountInfo.WebDAVURL); err != nil {
|
||||||
|
return fmt.Errorf("failed to recover mount for %s: %w", provider, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.logger.Info().Str("provider", provider).Msg("Successfully recovered mount")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitorMounts continuously monitors mount health and attempts recovery
|
||||||
|
func (m *Manager) MonitorMounts(ctx context.Context) {
|
||||||
|
if !m.serverStarted {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(30 * time.Second) // Check every 30 seconds
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
m.logger.Debug().Msg("Mount monitoring stopped")
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
m.performMountHealthCheck()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// performMountHealthCheck checks and attempts to recover unhealthy mounts
|
||||||
|
func (m *Manager) performMountHealthCheck() {
|
||||||
|
if !m.IsReady() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mountsMutex.RLock()
|
||||||
|
providers := make([]string, 0, len(m.mounts))
|
||||||
|
for provider, mount := range m.mounts {
|
||||||
|
if mount.Mounted {
|
||||||
|
providers = append(providers, provider)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.mountsMutex.RUnlock()
|
||||||
|
|
||||||
|
for _, provider := range providers {
|
||||||
|
if !m.checkMountHealth(provider) {
|
||||||
|
m.logger.Warn().Str("provider", provider).Msg("Mount health check failed, attempting recovery")
|
||||||
|
|
||||||
|
// Mark mount as unhealthy
|
||||||
|
m.mountsMutex.Lock()
|
||||||
|
if mount, exists := m.mounts[provider]; exists {
|
||||||
|
mount.Error = "Health check failed"
|
||||||
|
mount.Mounted = false
|
||||||
|
}
|
||||||
|
m.mountsMutex.Unlock()
|
||||||
|
|
||||||
|
// Attempt recovery
|
||||||
|
go func(provider string) {
|
||||||
|
if err := m.RecoverMount(provider); err != nil {
|
||||||
|
m.logger.Error().Err(err).Str("provider", provider).Msg("Failed to recover mount")
|
||||||
|
}
|
||||||
|
}(provider)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
43
pkg/rclone/killed_unix.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package rclone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os/exec"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WasHardTerminated reports true iff the process was ended by SIGKILL or SIGTERM.
|
||||||
|
func WasHardTerminated(err error) bool {
|
||||||
|
var ee *exec.ExitError
|
||||||
|
if !errors.As(err, &ee) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ws, ok := ee.Sys().(syscall.WaitStatus)
|
||||||
|
if !ok || !ws.Signaled() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
sig := ws.Signal()
|
||||||
|
return sig == syscall.SIGKILL || sig == syscall.SIGTERM
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitCode returns the numeric exit code when available.
|
||||||
|
func ExitCode(err error) (int, bool) {
|
||||||
|
var ee *exec.ExitError
|
||||||
|
if !errors.As(err, &ee) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
ws, ok := ee.Sys().(syscall.WaitStatus)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
if ws.Exited() {
|
||||||
|
return ws.ExitStatus(), true
|
||||||
|
}
|
||||||
|
// Conventional shell “killed by signal” code is 128 + signal.
|
||||||
|
if ws.Signaled() {
|
||||||
|
return 128 + int(ws.Signal()), true
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
35
pkg/rclone/killed_windows.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package rclone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os/exec"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func WasHardTerminated(err error) bool {
|
||||||
|
var ee *exec.ExitError
|
||||||
|
if !errors.As(err, &ee) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ws, ok := ee.Sys().(syscall.WaitStatus)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// No Signaled() on Windows; consider "hard terminated" if not success.
|
||||||
|
return ws.ExitStatus() != 0 // Use the ExitStatus() method
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitCode returns the process exit code when available.
|
||||||
|
func ExitCode(err error) (int, bool) {
|
||||||
|
var ee *exec.ExitError
|
||||||
|
if !errors.As(err, &ee) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
ws, ok := ee.Sys().(syscall.WaitStatus)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return ws.ExitStatus(), true // Use the ExitStatus() method
|
||||||
|
}
|
||||||
375
pkg/rclone/manager.go
Normal file
@@ -0,0 +1,375 @@
|
|||||||
|
package rclone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Manager handles the rclone RC server and provides mount operations
|
||||||
|
type Manager struct {
|
||||||
|
cmd *exec.Cmd
|
||||||
|
rcPort string
|
||||||
|
rcUser string
|
||||||
|
rcPass string
|
||||||
|
configDir string
|
||||||
|
mounts map[string]*MountInfo
|
||||||
|
mountsMutex sync.RWMutex
|
||||||
|
logger zerolog.Logger
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
httpClient *http.Client
|
||||||
|
serverReady chan struct{}
|
||||||
|
serverStarted bool
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type MountInfo struct {
|
||||||
|
Provider string `json:"provider"`
|
||||||
|
LocalPath string `json:"local_path"`
|
||||||
|
WebDAVURL string `json:"webdav_url"`
|
||||||
|
Mounted bool `json:"mounted"`
|
||||||
|
MountedAt string `json:"mounted_at,omitempty"`
|
||||||
|
ConfigName string `json:"config_name"`
|
||||||
|
Error string `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RCRequest struct {
|
||||||
|
Command string `json:"command"`
|
||||||
|
Args map[string]interface{} `json:"args,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RCResponse struct {
|
||||||
|
Result interface{} `json:"result,omitempty"`
|
||||||
|
Error string `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewManager creates a new rclone RC manager
|
||||||
|
func NewManager() *Manager {
|
||||||
|
cfg := config.Get()
|
||||||
|
|
||||||
|
rcPort := "5572"
|
||||||
|
configDir := filepath.Join(cfg.Path, "rclone")
|
||||||
|
|
||||||
|
// Ensure config directory exists
|
||||||
|
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||||
|
_logger := logger.New("rclone")
|
||||||
|
_logger.Error().Err(err).Msg("Failed to create rclone config directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
return &Manager{
|
||||||
|
rcPort: rcPort,
|
||||||
|
configDir: configDir,
|
||||||
|
mounts: make(map[string]*MountInfo),
|
||||||
|
logger: logger.New("rclone"),
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
httpClient: &http.Client{Timeout: 30 * time.Second},
|
||||||
|
serverReady: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the rclone RC server
|
||||||
|
func (m *Manager) Start(ctx context.Context) error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
if m.serverStarted {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := config.Get()
|
||||||
|
if !cfg.Rclone.Enabled {
|
||||||
|
m.logger.Info().Msg("Rclone is disabled, skipping RC server startup")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"rcd",
|
||||||
|
"--rc-addr", ":" + m.rcPort,
|
||||||
|
"--rc-no-auth", // We'll handle auth at the application level
|
||||||
|
"--config", filepath.Join(m.configDir, "rclone.conf"),
|
||||||
|
"--log-level", "INFO",
|
||||||
|
}
|
||||||
|
m.cmd = exec.CommandContext(ctx, "rclone", args...)
|
||||||
|
m.cmd.Dir = m.configDir
|
||||||
|
|
||||||
|
// Capture output for debugging
|
||||||
|
var stdout, stderr bytes.Buffer
|
||||||
|
m.cmd.Stdout = &stdout
|
||||||
|
m.cmd.Stderr = &stderr
|
||||||
|
|
||||||
|
if err := m.cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("failed to start rclone RC server: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.serverStarted = true
|
||||||
|
|
||||||
|
// Wait for server to be ready in a goroutine
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
m.logger.Error().Interface("panic", r).Msg("Panic in rclone RC server monitor")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
m.waitForServer()
|
||||||
|
close(m.serverReady)
|
||||||
|
|
||||||
|
// Start mount monitoring once server is ready
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
m.logger.Error().Interface("panic", r).Msg("Panic in mount monitor")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
m.MonitorMounts(ctx)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for command to finish and log output
|
||||||
|
err := m.cmd.Wait()
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
m.logger.Info().Msg("Rclone RC server exited normally")
|
||||||
|
|
||||||
|
case errors.Is(err, context.Canceled):
|
||||||
|
m.logger.Info().Msg("Rclone RC server terminated: context canceled")
|
||||||
|
|
||||||
|
case WasHardTerminated(err): // SIGKILL on *nix; non-zero exit on Windows
|
||||||
|
m.logger.Info().Msg("Rclone RC server hard-terminated")
|
||||||
|
|
||||||
|
default:
|
||||||
|
if code, ok := ExitCode(err); ok {
|
||||||
|
m.logger.Debug().Int("exit_code", code).Err(err).
|
||||||
|
Msg("Rclone RC server error")
|
||||||
|
} else {
|
||||||
|
m.logger.Debug().Err(err).Msg("Rclone RC server error (no exit code)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the rclone RC server and unmounts all mounts
|
||||||
|
func (m *Manager) Stop() error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
if !m.serverStarted {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
m.logger.Info().Msg("Stopping rclone RC server")
|
||||||
|
|
||||||
|
// Unmount all mounts first
|
||||||
|
m.mountsMutex.RLock()
|
||||||
|
mountList := make([]*MountInfo, 0, len(m.mounts))
|
||||||
|
for _, mount := range m.mounts {
|
||||||
|
if mount.Mounted {
|
||||||
|
mountList = append(mountList, mount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.mountsMutex.RUnlock()
|
||||||
|
|
||||||
|
// Unmount in parallel
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for _, mount := range mountList {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(mount *MountInfo) {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := m.unmount(mount.Provider); err != nil {
|
||||||
|
m.logger.Error().Err(err).Str("provider", mount.Provider).Msg("Failed to unmount during shutdown")
|
||||||
|
}
|
||||||
|
}(mount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for unmounts with timeout
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
m.logger.Info().Msg("All mounts unmounted successfully")
|
||||||
|
case <-time.After(30 * time.Second):
|
||||||
|
m.logger.Warn().Msg("Timeout waiting for mounts to unmount, proceeding with shutdown")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel context and stop process
|
||||||
|
m.cancel()
|
||||||
|
|
||||||
|
if m.cmd != nil && m.cmd.Process != nil {
|
||||||
|
// Try graceful shutdown first
|
||||||
|
if err := m.cmd.Process.Signal(os.Interrupt); err != nil {
|
||||||
|
m.logger.Warn().Err(err).Msg("Failed to send interrupt signal, using kill")
|
||||||
|
if killErr := m.cmd.Process.Kill(); killErr != nil {
|
||||||
|
m.logger.Error().Err(killErr).Msg("Failed to kill rclone process")
|
||||||
|
return killErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for process to exit with timeout
|
||||||
|
done := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
done <- m.cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
if err != nil && !errors.Is(err, context.Canceled) && !WasHardTerminated(err) {
|
||||||
|
m.logger.Warn().Err(err).Msg("Rclone process exited with error")
|
||||||
|
}
|
||||||
|
case <-time.After(10 * time.Second):
|
||||||
|
m.logger.Warn().Msg("Timeout waiting for rclone to exit, force killing")
|
||||||
|
if err := m.cmd.Process.Kill(); err != nil {
|
||||||
|
m.logger.Error().Err(err).Msg("Failed to force kill rclone process")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Wait a bit more for the kill to take effect
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
m.logger.Info().Msg("Rclone process killed successfully")
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
m.logger.Error().Msg("Process may still be running after kill")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up any remaining mount directories
|
||||||
|
cfg := config.Get()
|
||||||
|
if cfg.Rclone.MountPath != "" {
|
||||||
|
m.cleanupMountDirectories(cfg.Rclone.MountPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.serverStarted = false
|
||||||
|
m.logger.Info().Msg("Rclone RC server stopped")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanupMountDirectories removes empty mount directories
|
||||||
|
func (m *Manager) cleanupMountDirectories(_ string) {
|
||||||
|
m.mountsMutex.RLock()
|
||||||
|
defer m.mountsMutex.RUnlock()
|
||||||
|
|
||||||
|
for _, mount := range m.mounts {
|
||||||
|
if mount.LocalPath != "" {
|
||||||
|
// Try to remove the directory if it's empty
|
||||||
|
if err := os.Remove(mount.LocalPath); err == nil {
|
||||||
|
m.logger.Debug().Str("path", mount.LocalPath).Msg("Removed empty mount directory")
|
||||||
|
}
|
||||||
|
// Don't log errors here as the directory might not be empty, which is fine
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForServer waits for the RC server to become available
|
||||||
|
func (m *Manager) waitForServer() {
|
||||||
|
maxAttempts := 30
|
||||||
|
for i := 0; i < maxAttempts; i++ {
|
||||||
|
if m.ctx.Err() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.pingServer() {
|
||||||
|
m.logger.Info().Msg("Rclone RC server is ready")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.logger.Error().Msg("Rclone RC server not responding - mount operations will be disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
// pingServer checks if the RC server is responding
|
||||||
|
func (m *Manager) pingServer() bool {
|
||||||
|
req := RCRequest{Command: "core/version"}
|
||||||
|
_, err := m.makeRequest(req, true)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) makeRequest(req RCRequest, close bool) (*http.Response, error) {
|
||||||
|
reqBody, err := json.Marshal(req.Args)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to marshal request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
url := fmt.Sprintf("http://localhost:%s/%s", m.rcPort, req.Command)
|
||||||
|
httpReq, err := http.NewRequestWithContext(m.ctx, "POST", url, bytes.NewBuffer(reqBody))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
httpReq.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := m.httpClient.Do(httpReq)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to make request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
// Read the response body to get more details
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var errorResp RCResponse
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&errorResp); err != nil {
|
||||||
|
return nil, fmt.Errorf("request failed with status %s, but could not decode error response: %w", resp.Status, err)
|
||||||
|
}
|
||||||
|
if errorResp.Error != "" {
|
||||||
|
return nil, fmt.Errorf("%s", errorResp.Error)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("request failed with status %s and no error message", resp.Status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if close {
|
||||||
|
defer func() {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
m.logger.Debug().Err(err).Msg("Failed to close response body")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsReady returns true if the RC server is ready
|
||||||
|
func (m *Manager) IsReady() bool {
|
||||||
|
select {
|
||||||
|
case <-m.serverReady:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForReady waits for the RC server to be ready
|
||||||
|
func (m *Manager) WaitForReady(timeout time.Duration) error {
|
||||||
|
select {
|
||||||
|
case <-m.serverReady:
|
||||||
|
return nil
|
||||||
|
case <-time.After(timeout):
|
||||||
|
return fmt.Errorf("timeout waiting for rclone RC server to be ready")
|
||||||
|
case <-m.ctx.Done():
|
||||||
|
return m.ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) GetLogger() zerolog.Logger {
|
||||||
|
return m.logger
|
||||||
|
}
|
||||||
120
pkg/rclone/mount.go
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
package rclone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mount represents a mount using the rclone RC client
|
||||||
|
type Mount struct {
|
||||||
|
Provider string
|
||||||
|
LocalPath string
|
||||||
|
WebDAVURL string
|
||||||
|
logger zerolog.Logger
|
||||||
|
rcManager *Manager
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMount creates a new RC-based mount
|
||||||
|
func NewMount(provider, webdavURL string, rcManager *Manager) *Mount {
|
||||||
|
cfg := config.Get()
|
||||||
|
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
|
||||||
|
|
||||||
|
_url, err := url.JoinPath(webdavURL, provider)
|
||||||
|
if err != nil {
|
||||||
|
_url = fmt.Sprintf("%s/%s", webdavURL, provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Mount{
|
||||||
|
Provider: provider,
|
||||||
|
LocalPath: mountPath,
|
||||||
|
WebDAVURL: _url,
|
||||||
|
rcManager: rcManager,
|
||||||
|
logger: rcManager.GetLogger(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount creates the mount using rclone RC
|
||||||
|
func (m *Mount) Mount(ctx context.Context) error {
|
||||||
|
if m.rcManager == nil {
|
||||||
|
return fmt.Errorf("rclone manager is not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if already mounted
|
||||||
|
if m.rcManager.IsMounted(m.Provider) {
|
||||||
|
m.logger.Info().Msgf("Mount %s is already mounted at %s", m.Provider, m.LocalPath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
m.logger.Info().
|
||||||
|
Str("provider", m.Provider).
|
||||||
|
Str("webdav_url", m.WebDAVURL).
|
||||||
|
Str("mount_path", m.LocalPath).
|
||||||
|
Msg("Creating mount via RC")
|
||||||
|
|
||||||
|
if err := m.rcManager.Mount(m.Provider, m.WebDAVURL); err != nil {
|
||||||
|
m.logger.Error().Str("provider", m.Provider).Msg("Mount operation failed")
|
||||||
|
return fmt.Errorf("mount failed for %s", m.Provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.logger.Info().Msgf("Successfully mounted %s WebDAV at %s via RC", m.Provider, m.LocalPath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmount removes the mount using rclone RC
|
||||||
|
func (m *Mount) Unmount() error {
|
||||||
|
if m.rcManager == nil {
|
||||||
|
m.logger.Warn().Msg("Rclone manager is not available, skipping unmount")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !m.rcManager.IsMounted(m.Provider) {
|
||||||
|
m.logger.Info().Msgf("Mount %s is not mounted, skipping unmount", m.Provider)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
m.logger.Info().Str("provider", m.Provider).Msg("Unmounting via RC")
|
||||||
|
|
||||||
|
if err := m.rcManager.Unmount(m.Provider); err != nil {
|
||||||
|
return fmt.Errorf("failed to unmount %s via RC: %w", m.Provider, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.logger.Info().Msgf("Successfully unmounted %s", m.Provider)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsMounted checks if the mount is active via RC
|
||||||
|
func (m *Mount) IsMounted() bool {
|
||||||
|
if m.rcManager == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return m.rcManager.IsMounted(m.Provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefreshDir refreshes directories in the mount
|
||||||
|
func (m *Mount) RefreshDir(dirs []string) error {
|
||||||
|
if m.rcManager == nil {
|
||||||
|
return fmt.Errorf("rclone manager is not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !m.IsMounted() {
|
||||||
|
return fmt.Errorf("provider %s not properly mounted. Skipping refreshes", m.Provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.rcManager.RefreshDir(m.Provider, dirs); err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh directories for %s: %w", m.Provider, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMountInfo returns mount information
|
||||||
|
func (m *Mount) GetMountInfo() (*MountInfo, bool) {
|
||||||
|
if m.rcManager == nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return m.rcManager.GetMountInfo(m.Provider)
|
||||||
|
}
|
||||||
184
pkg/rclone/stats.go
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
package rclone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TransferringStat struct {
|
||||||
|
Bytes int64 `json:"bytes"`
|
||||||
|
ETA int64 `json:"eta"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Speed float64 `json:"speed"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Progress float64 `json:"progress"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type VersionResponse struct {
|
||||||
|
Arch string `json:"arch"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
OS string `json:"os"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CoreStatsResponse struct {
|
||||||
|
Bytes int64 `json:"bytes"`
|
||||||
|
Checks int `json:"checks"`
|
||||||
|
DeletedDirs int `json:"deletedDirs"`
|
||||||
|
Deletes int `json:"deletes"`
|
||||||
|
ElapsedTime float64 `json:"elapsedTime"`
|
||||||
|
Errors int `json:"errors"`
|
||||||
|
Eta int `json:"eta"`
|
||||||
|
Speed float64 `json:"speed"`
|
||||||
|
TotalBytes int64 `json:"totalBytes"`
|
||||||
|
TotalChecks int `json:"totalChecks"`
|
||||||
|
TotalTransfers int `json:"totalTransfers"`
|
||||||
|
TransferTime float64 `json:"transferTime"`
|
||||||
|
Transfers int `json:"transfers"`
|
||||||
|
Transferring []TransferringStat `json:"transferring,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MemoryStats struct {
|
||||||
|
Sys int `json:"Sys"`
|
||||||
|
TotalAlloc int64 `json:"TotalAlloc"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type BandwidthStats struct {
|
||||||
|
BytesPerSecond int64 `json:"bytesPerSecond"`
|
||||||
|
Rate string `json:"rate"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats represents rclone statistics
|
||||||
|
type Stats struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
Ready bool `json:"server_ready"`
|
||||||
|
Core CoreStatsResponse `json:"core"`
|
||||||
|
Memory MemoryStats `json:"memory"`
|
||||||
|
Mount map[string]*MountInfo `json:"mount"`
|
||||||
|
Bandwidth BandwidthStats `json:"bandwidth"`
|
||||||
|
Version VersionResponse `json:"version"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStats retrieves statistics from the rclone RC server
|
||||||
|
func (m *Manager) GetStats() (*Stats, error) {
|
||||||
|
stats := &Stats{}
|
||||||
|
stats.Ready = m.IsReady()
|
||||||
|
stats.Enabled = true
|
||||||
|
|
||||||
|
coreStats, err := m.GetCoreStats()
|
||||||
|
if err == nil {
|
||||||
|
stats.Core = *coreStats
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get memory usage
|
||||||
|
memStats, err := m.GetMemoryUsage()
|
||||||
|
if err == nil {
|
||||||
|
stats.Memory = *memStats
|
||||||
|
}
|
||||||
|
// Get bandwidth stats
|
||||||
|
bwStats, err := m.GetBandwidthStats()
|
||||||
|
if err == nil {
|
||||||
|
stats.Bandwidth = *bwStats
|
||||||
|
} else {
|
||||||
|
fmt.Println("Failed to get rclone stats", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get version info
|
||||||
|
versionResp, err := m.GetVersion()
|
||||||
|
if err == nil {
|
||||||
|
stats.Version = *versionResp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get mount info
|
||||||
|
stats.Mount = m.GetAllMounts()
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) GetCoreStats() (*CoreStatsResponse, error) {
|
||||||
|
if !m.IsReady() {
|
||||||
|
return nil, fmt.Errorf("rclone RC server not ready")
|
||||||
|
}
|
||||||
|
|
||||||
|
req := RCRequest{
|
||||||
|
Command: "core/stats",
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := m.makeRequest(req, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get core stats: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var coreStats CoreStatsResponse
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&coreStats); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode core stats response: %w", err)
|
||||||
|
}
|
||||||
|
return &coreStats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMemoryUsage returns memory usage statistics
|
||||||
|
func (m *Manager) GetMemoryUsage() (*MemoryStats, error) {
|
||||||
|
if !m.IsReady() {
|
||||||
|
return nil, fmt.Errorf("rclone RC server not ready")
|
||||||
|
}
|
||||||
|
|
||||||
|
req := RCRequest{
|
||||||
|
Command: "core/memstats",
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := m.makeRequest(req, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get memory stats: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var memStats MemoryStats
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&memStats); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode memory stats response: %w", err)
|
||||||
|
}
|
||||||
|
return &memStats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBandwidthStats returns bandwidth usage for all transfers
|
||||||
|
func (m *Manager) GetBandwidthStats() (*BandwidthStats, error) {
|
||||||
|
if !m.IsReady() {
|
||||||
|
return nil, fmt.Errorf("rclone RC server not ready")
|
||||||
|
}
|
||||||
|
|
||||||
|
req := RCRequest{
|
||||||
|
Command: "core/bwlimit",
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := m.makeRequest(req, false)
|
||||||
|
if err != nil {
|
||||||
|
// Bandwidth stats might not be available, return empty
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var bwStats BandwidthStats
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&bwStats); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode bandwidth stats response: %w", err)
|
||||||
|
}
|
||||||
|
return &bwStats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetVersion returns rclone version information
|
||||||
|
func (m *Manager) GetVersion() (*VersionResponse, error) {
|
||||||
|
if !m.IsReady() {
|
||||||
|
return nil, fmt.Errorf("rclone RC server not ready")
|
||||||
|
}
|
||||||
|
|
||||||
|
req := RCRequest{
|
||||||
|
Command: "core/version",
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := m.makeRequest(req, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get version: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var versionResp VersionResponse
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&versionResp); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode version response: %w", err)
|
||||||
|
}
|
||||||
|
return &versionResp, nil
|
||||||
|
}
|
||||||