Compare commits
282 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a0e9f7f553 | ||
|
|
4be4f6b293 | ||
|
|
6c8949b831 | ||
|
|
0dd1efb07c | ||
|
|
3aeb806033 | ||
|
|
7c8156eacf | ||
|
|
d8a963f77f | ||
|
|
27e7bc8f47 | ||
|
|
1d243dd12b | ||
|
|
b4efa22bfd | ||
|
|
6f9fafd7d8 | ||
|
|
eba24c9d63 | ||
|
|
c620ba3d56 | ||
|
|
fab3a7e4f7 | ||
|
|
01615cb51e | ||
|
|
cb63fc69f5 | ||
|
|
40755fbdde | ||
|
|
d0ae839617 | ||
|
|
ce972779c3 | ||
|
|
139249a1f3 | ||
|
|
a60d93677f | ||
|
|
9c31ad266e | ||
|
|
3d2fcf5656 | ||
|
|
afe577bf2f | ||
|
|
604402250e | ||
|
|
74615a80ff | ||
|
|
b901bd5175 | ||
|
|
8c56e59107 | ||
|
|
b8b9e76753 | ||
|
|
6fb54d322e | ||
|
|
cf61546bec | ||
|
|
c72867ff57 | ||
|
|
fa6920f94a | ||
|
|
dba5604d79 | ||
|
|
f656b7e4e2 | ||
|
|
c7b07137c5 | ||
|
|
c0aa4eaeba | ||
|
|
2c90e518aa | ||
|
|
dec7d93272 | ||
|
|
8d092615db | ||
|
|
a4ee0973cc | ||
|
|
ab12610346 | ||
|
|
1d19be9013 | ||
|
|
cee0e20fe1 | ||
|
|
a3e698e04f | ||
|
|
e123a2fd5e | ||
|
|
817051589e | ||
|
|
705de2d2bc | ||
|
|
54c421a480 | ||
|
|
1b98b994b7 | ||
|
|
06096c3748 | ||
|
|
7474011ef0 | ||
|
|
086aa3b1ff | ||
|
|
c15e9d8f70 | ||
|
|
b2e99585f7 | ||
|
|
5661b05ec1 | ||
|
|
b7226b21ec | ||
|
|
605d5b81c2 | ||
|
|
8d87c602b9 | ||
|
|
7cf25f53e7 | ||
|
|
22280f15cf | ||
|
|
a539aa53bd | ||
|
|
3efda45304 | ||
|
|
5bf1dab5e6 | ||
|
|
84603b084b | ||
|
|
dfcf8708f1 | ||
|
|
30a1dd74a7 | ||
|
|
f041ef47a7 | ||
|
|
349a13468b | ||
|
|
9c6c44d785 | ||
|
|
1cd09239f9 | ||
|
|
f9c49cbbef | ||
|
|
60b8d87f1c | ||
|
|
fbd6cd5038 | ||
|
|
87bf8d0574 | ||
|
|
7f25599b60 | ||
|
|
d313ed0712 | ||
|
|
09202b88e9 | ||
|
|
d10a6ddedd | ||
|
|
7c1defc684 | ||
|
|
83a453cd0c | ||
|
|
a2bdad7c2a | ||
|
|
57ccd67c83 | ||
|
|
5aa1c67544 | ||
|
|
53748ea297 | ||
|
|
109d0a0c1c | ||
|
|
35a74d8dba | ||
|
|
b984697fe3 | ||
|
|
690d7668c1 | ||
|
|
3c8e6bae81 | ||
|
|
64edc5547d | ||
|
|
03a1d73825 | ||
|
|
3b018b3571 | ||
|
|
e5b3e0741e | ||
|
|
36e681d0e6 | ||
|
|
7c1bb52793 | ||
|
|
9de7cfd73b | ||
|
|
ffb1745bf6 | ||
|
|
0f56badb45 | ||
|
|
8e464cdcea | ||
|
|
4cdfd051f3 | ||
|
|
e05c6d5028 | ||
|
|
57de04b164 | ||
|
|
0deb88e265 | ||
|
|
21354529e7 | ||
|
|
ef820b5bf4 | ||
|
|
130433203f | ||
|
|
1248d99680 | ||
|
|
c0703cb622 | ||
|
|
6c2bfa811a | ||
|
|
75a5bb90a3 | ||
|
|
1f190e3cb6 | ||
|
|
5f06a244b8 | ||
|
|
10467ff9f8 | ||
|
|
f977c52571 | ||
|
|
a3e64cc269 | ||
|
|
e8112a4647 | ||
|
|
6e2d1e1a7f | ||
|
|
bce51ecd4f | ||
|
|
ae5e237379 | ||
|
|
07f1d0f28d | ||
|
|
267430e6fb | ||
|
|
1a4db69b20 | ||
|
|
3cc8ad3cdc | ||
|
|
fb39e92a88 | ||
|
|
2139d3a175 | ||
|
|
32935ce3aa | ||
|
|
a27c5dd491 | ||
|
|
dc8ee3d150 | ||
|
|
52877107c9 | ||
|
|
f34a371274 | ||
|
|
8c78da3f69 | ||
|
|
1983e27124 | ||
|
|
80615e06d1 | ||
|
|
b5b6f0ff73 | ||
|
|
3fe9053aa8 | ||
|
|
c07a85f4d0 | ||
|
|
af067cace9 | ||
|
|
ea79e2a6fb | ||
|
|
58fb4e6e14 | ||
|
|
39945616f3 | ||
|
|
8029cd3840 | ||
|
|
19b8664146 | ||
|
|
8ea128446c | ||
|
|
391900e93d | ||
|
|
5987028f05 | ||
|
|
7492f629f9 | ||
|
|
101ae4197e | ||
|
|
a357897222 | ||
|
|
92177b150b | ||
|
|
9011420ac3 | ||
|
|
4b5e18df94 | ||
|
|
003f73c456 | ||
|
|
b34935d490 | ||
|
|
4659cd4273 | ||
|
|
7d954052ae | ||
|
|
8bf164451c | ||
|
|
5792305a66 | ||
|
|
f9addaed36 | ||
|
|
face86e151 | ||
|
|
cf28f42db4 | ||
|
|
dc2301eb98 | ||
|
|
f9bc7ad914 | ||
|
|
4ae5de99e8 | ||
|
|
d49fbea60f | ||
|
|
7bd38736b1 | ||
|
|
56bca562f4 | ||
|
|
9469c98df7 | ||
|
|
8c13da5d30 | ||
|
|
dc6ee2f020 | ||
|
|
fce0fc0215 | ||
|
|
e2f792d5ab | ||
|
|
49875446b4 | ||
|
|
738474be16 | ||
|
|
d10b679584 | ||
|
|
f93d489956 | ||
|
|
8d494fc277 | ||
|
|
0c68364a6a | ||
|
|
4e2fb9c74f | ||
|
|
50c775ca74 | ||
|
|
0d178992ef | ||
|
|
5d2fabe20b | ||
|
|
fa469c64c6 | ||
|
|
26f6f384a3 | ||
|
|
b91aa1db38 | ||
|
|
e2ff3b26de | ||
|
|
2d29996d2c | ||
|
|
b4e4db27fb | ||
|
|
c0589d4ad2 | ||
|
|
a30861984c | ||
|
|
4f92b135d4 | ||
|
|
2b2a682218 | ||
|
|
a83f3d72ce | ||
|
|
1c06407900 | ||
|
|
b1a3d8b762 | ||
|
|
0e25de0e3c | ||
|
|
e741a0e32b | ||
|
|
84bd93805f | ||
|
|
fce2ce28c7 | ||
|
|
302a461efd | ||
|
|
7eb021aac1 | ||
|
|
7a989ccf2b | ||
|
|
f04d7ac86e | ||
|
|
65fb2d1e7c | ||
|
|
46beac7227 | ||
|
|
e0e71b0f7e | ||
|
|
3b463adf09 | ||
|
|
7af0de76cc | ||
|
|
108da305b3 | ||
|
|
9a7bff04ef | ||
|
|
325e6c912c | ||
|
|
6a24c372f5 | ||
|
|
32ecc08a72 | ||
|
|
4b2f601df2 | ||
|
|
bfd2596367 | ||
|
|
6f4f72d781 | ||
|
|
14341d30bc | ||
|
|
878f78468f | ||
|
|
c386495d3d | ||
|
|
1614e29f8f | ||
|
|
186a24cc4a | ||
|
|
16c825d5ba | ||
|
|
8ca3cb32f3 | ||
|
|
c5b975a721 | ||
|
|
2fa6737f31 | ||
|
|
f40cd9ba56 | ||
|
|
bca96dd858 | ||
|
|
1b9b7e203e | ||
|
|
99b4a3152d | ||
|
|
297715bf6e | ||
|
|
64995d0bf3 | ||
|
|
92504cc8e0 | ||
|
|
12f89b3047 | ||
|
|
5d7ddbd208 | ||
|
|
84b70464da | ||
|
|
092a028ad9 | ||
|
|
530de20276 | ||
|
|
e2eb11056d | ||
|
|
1a2504ff6c | ||
|
|
07d632309a | ||
|
|
d9b06fb518 | ||
|
|
d58b327957 | ||
|
|
bba90cb89a | ||
|
|
fc5c6e2869 | ||
|
|
66f4965ec8 | ||
|
|
dc16f0d8a1 | ||
|
|
0741ddf999 | ||
|
|
2ae4bd571e | ||
|
|
0b1c1af8b8 | ||
|
|
74a55149fc | ||
|
|
cfb0051b04 | ||
|
|
a986c4b5d0 | ||
|
|
3841b7751e | ||
|
|
ea73572557 | ||
|
|
7cb41a0e8b | ||
|
|
451c17cdf7 | ||
|
|
c39eebea0d | ||
|
|
03c9657945 | ||
|
|
28e5342c66 | ||
|
|
eeb3a31b05 | ||
|
|
e9d3e120f3 | ||
|
|
104df3c33c | ||
|
|
810c9d705e | ||
|
|
4ff00859a3 | ||
|
|
b77dbcc4f4 | ||
|
|
58c0aafab1 | ||
|
|
357da54083 | ||
|
|
88a7196eaf | ||
|
|
abc86a0460 | ||
|
|
dd0b7efdff | ||
|
|
7359f280b0 | ||
|
|
4eb3539347 | ||
|
|
9fb1118475 | ||
|
|
07491b43fe | ||
|
|
8f7c9a19c5 | ||
|
|
a51364d150 | ||
|
|
df2aa4e361 | ||
|
|
b51cb954f8 | ||
|
|
8bdb2e3547 | ||
|
|
2c9a076cd2 | ||
|
|
d2a77620bc | ||
|
|
4b8f1ccfb6 |
52
.air.toml
Normal file
@@ -0,0 +1,52 @@
|
||||
root = "."
|
||||
testdata_dir = "testdata"
|
||||
tmp_dir = "tmp"
|
||||
|
||||
[build]
|
||||
args_bin = ["--config", "data/"]
|
||||
bin = "./tmp/main"
|
||||
cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/decypharr/pkg/version.Version=0.0.0 -X github.com/sirrobot01/decypharr/pkg/version.Channel=dev\" -o ./tmp/main .'"
|
||||
delay = 1000
|
||||
exclude_dir = ["tmp", "vendor", "testdata", "data", "logs", "docs", "dist", "node_modules", ".ven"]
|
||||
exclude_file = []
|
||||
exclude_regex = ["_test.go"]
|
||||
exclude_unchanged = false
|
||||
follow_symlink = false
|
||||
full_bin = ""
|
||||
include_dir = []
|
||||
include_ext = ["go", "tpl", "tmpl", "html", ".json", ".js", ".css"]
|
||||
include_file = []
|
||||
kill_delay = "1s"
|
||||
log = "build-errors.log"
|
||||
poll = false
|
||||
poll_interval = 0
|
||||
post_cmd = []
|
||||
pre_cmd = []
|
||||
rerun = false
|
||||
rerun_delay = 500
|
||||
send_interrupt = true
|
||||
stop_on_error = true
|
||||
|
||||
[color]
|
||||
app = ""
|
||||
build = "yellow"
|
||||
main = "magenta"
|
||||
runner = "green"
|
||||
watcher = "cyan"
|
||||
|
||||
[log]
|
||||
main_only = false
|
||||
silent = false
|
||||
time = false
|
||||
|
||||
[misc]
|
||||
clean_on_exit = false
|
||||
|
||||
[proxy]
|
||||
app_port = 0
|
||||
enabled = false
|
||||
proxy_port = 0
|
||||
|
||||
[screen]
|
||||
clear_on_rebuild = false
|
||||
keep_scroll = true
|
||||
@@ -5,4 +5,29 @@ docker-compose.yml
|
||||
.DS_Store
|
||||
**/.idea/
|
||||
*.magnet
|
||||
**.torrent
|
||||
**.torrent
|
||||
torrents.json
|
||||
**/dist/
|
||||
*.json
|
||||
.ven/**
|
||||
docs/**
|
||||
|
||||
# Don't copy node modules
|
||||
node_modules/
|
||||
|
||||
# Don't copy development files
|
||||
.git/
|
||||
.gitignore
|
||||
*.md
|
||||
.env*
|
||||
*.log
|
||||
|
||||
# Build artifacts
|
||||
decypharr
|
||||
healthcheck
|
||||
*.exe
|
||||
.venv/
|
||||
data/**
|
||||
|
||||
.stignore
|
||||
.stfolder/**
|
||||
2
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
github: sirrobot01
|
||||
buy_me_a_coffee: sirrobot01
|
||||
76
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: Bug Report
|
||||
description: 'Report a new bug'
|
||||
labels: ['Type: Bug', 'Status: Needs Triage']
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description: Please search to see if an open or closed issue already exists for the bug you encountered. If a bug exists and is closed note that it may only be fixed in an unstable branch.
|
||||
options:
|
||||
- label: I have searched the existing open and closed issues
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current Behavior
|
||||
description: A concise description of what you're experiencing.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: A concise description of what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps To Reproduce
|
||||
description: Steps to reproduce the behavior.
|
||||
placeholder: |
|
||||
1. In this environment...
|
||||
2. With this config...
|
||||
3. Run '...'
|
||||
4. See error...
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment
|
||||
description: |
|
||||
examples:
|
||||
- **OS**: Ubuntu 20.04
|
||||
- **Version**: v1.0.0
|
||||
- **Docker Install**: Yes
|
||||
- **Browser**: Firefox 90 (If UI related)
|
||||
value: |
|
||||
- OS:
|
||||
- Version:
|
||||
- Docker Install:
|
||||
- Browser:
|
||||
render: markdown
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: What branch are you running?
|
||||
options:
|
||||
- Main/Latest
|
||||
- Beta
|
||||
- Experimental
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Trace Logs? **Not Optional**
|
||||
description: |
|
||||
Trace Logs
|
||||
- are **required** for bug reports
|
||||
- are not optional
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Trace Logs have been provided as applicable
|
||||
description: Trace logs are **generally required** and are not optional for all bug reports and contain `trace`. Info logs are invalid for bug reports and do not contain `debug` nor `trace`
|
||||
options:
|
||||
- label: I have read and followed the steps in the documentation link and provided the required trace logs - the logs contain `trace` - that are relevant and show this issue.
|
||||
required: true
|
||||
38
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: Feature Request
|
||||
description: 'Suggest an idea for Decypharr'
|
||||
labels: ['Type: Feature Request', 'Status: Needs Triage']
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description: Please search to see if an open or closed issue already exists for the feature you are requesting. If a request exists and is closed note that it may only be fixed in an unstable branch.
|
||||
options:
|
||||
- label: I have searched the existing open and closed issues
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Is your feature request related to a problem? Please describe
|
||||
description: A clear and concise description of what the problem is.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the solution you'd like
|
||||
description: A clear and concise description of what you want to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe alternatives you've considered
|
||||
description: A clear and concise description of any alternative solutions or features you've considered.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Anything else?
|
||||
description: |
|
||||
Links? References? Mockups? Anything that will give us more context about the feature you are encountering!
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
validations:
|
||||
required: true
|
||||
85
.github/workflows/beta-docker.yml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: Beta Docker Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- beta
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Calculate beta version
|
||||
id: calculate_version
|
||||
run: |
|
||||
LATEST_TAG=$(git tag | grep -v 'beta' | sort -V | tail -n1)
|
||||
echo "Found latest tag: ${LATEST_TAG}"
|
||||
|
||||
IFS='.' read -r -a VERSION_PARTS <<< "$LATEST_TAG"
|
||||
MAJOR="${VERSION_PARTS[0]}"
|
||||
MINOR="${VERSION_PARTS[1]}"
|
||||
PATCH="${VERSION_PARTS[2]}"
|
||||
|
||||
NEW_PATCH=$((PATCH + 1))
|
||||
BETA_VERSION="${MAJOR}.${MINOR}.${NEW_PATCH}"
|
||||
|
||||
echo "Calculated beta version: ${BETA_VERSION}"
|
||||
echo "beta_version=${BETA_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push beta Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
push: true
|
||||
tags: |
|
||||
cy01/blackhole:beta
|
||||
ghcr.io/${{ github.repository_owner }}/decypharr:beta
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
build-args: |
|
||||
VERSION=${{ env.beta_version }}
|
||||
CHANNEL=beta
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
28
.github/workflows/deploy-docs.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: ci
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
permissions:
|
||||
contents: write
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Configure Git Credentials
|
||||
run: |
|
||||
git config user.name github-actions[bot]
|
||||
git config user.email 41898282+github-actions[bot]@users.noreply.github.com
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.x
|
||||
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ env.cache_id }}
|
||||
path: .cache
|
||||
restore-keys: |
|
||||
mkdocs-material-
|
||||
- run: cd docs && pip install -r requirements.txt
|
||||
- run: cd docs && mkdocs gh-deploy --force
|
||||
33
.github/workflows/goreleaser.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: GoReleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
RELEASE_CHANNEL: stable
|
||||
77
.github/workflows/release-docker.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: Release Docker Build
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Get tag name
|
||||
id: get_tag
|
||||
run: |
|
||||
TAG_NAME=${GITHUB_REF#refs/tags/}
|
||||
echo "tag_name=${TAG_NAME}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push release Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
push: true
|
||||
tags: |
|
||||
cy01/blackhole:latest
|
||||
cy01/blackhole:${{ env.tag_name }}
|
||||
ghcr.io/${{ github.repository_owner }}/decypharr:latest
|
||||
ghcr.io/${{ github.repository_owner }}/decypharr:${{ env.tag_name }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
build-args: |
|
||||
VERSION=${{ env.tag_name }}
|
||||
CHANNEL=stable
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
11
.gitignore
vendored
@@ -1,6 +1,5 @@
|
||||
data/
|
||||
config.json
|
||||
docker-compose.yml
|
||||
.idea/
|
||||
.DS_Store
|
||||
*.torrent
|
||||
@@ -9,3 +8,13 @@ docker-compose.yml
|
||||
*.log
|
||||
*.log.*
|
||||
dist/
|
||||
tmp/**
|
||||
torrents.json
|
||||
logs/**
|
||||
auth.json
|
||||
.ven/
|
||||
.env
|
||||
node_modules/
|
||||
.venv/
|
||||
.stignore
|
||||
.stfolder/**
|
||||
@@ -1,8 +1,7 @@
|
||||
version: 2
|
||||
version: 1
|
||||
|
||||
before:
|
||||
hooks:
|
||||
# You may remove this if you don't use go modules.
|
||||
- go mod tidy
|
||||
|
||||
builds:
|
||||
@@ -16,19 +15,22 @@ builds:
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X github.com/sirrobot01/decypharr/pkg/version.Version={{.Version}}
|
||||
- -X github.com/sirrobot01/decypharr/pkg/version.Channel={{.Env.RELEASE_CHANNEL}}
|
||||
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
# this name template makes the OS and Arch compatible with the results of `uname`.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
decypharr_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
||||
75
CHANGELOG.md
@@ -1,75 +0,0 @@
|
||||
#### 0.1.0
|
||||
- Initial Release
|
||||
- Added Real Debrid Support
|
||||
- Added Arrs Support
|
||||
- Added Proxy Support
|
||||
- Added Basic Authentication for Proxy
|
||||
- Added Rate Limiting for Debrid Providers
|
||||
|
||||
#### 0.1.1
|
||||
- Added support for "No Blackhole" for Arrs
|
||||
- Added support for "Cached Only" for Proxy
|
||||
- Bug Fixes
|
||||
|
||||
#### 0.1.2
|
||||
- Bug fixes
|
||||
- Code cleanup
|
||||
- Get available hashes at once
|
||||
|
||||
#### 0.1.3
|
||||
|
||||
- Searching for infohashes in the xml description/summary/comments
|
||||
- Added local cache support
|
||||
- Added max cache size
|
||||
- Rewrite blackhole.go
|
||||
- Bug fixes
|
||||
- Fixed indexer getting disabled
|
||||
- Fixed blackhole not working
|
||||
|
||||
#### 0.1.4
|
||||
|
||||
- Rewrote Report log
|
||||
- Fix YTS, 1337x not grabbing infohash
|
||||
- Fix Torrent symlink bug
|
||||
|
||||
|
||||
#### 0.2.0-beta
|
||||
|
||||
- Switch to QbitTorrent API instead of Blackhole
|
||||
- Rewrote the whole codebase
|
||||
|
||||
|
||||
#### 0.2.0
|
||||
- Implement 0.2.0-beta changes
|
||||
- Removed Blackhole
|
||||
- Added QbitTorrent API
|
||||
- Cleaned up the code
|
||||
|
||||
#### 0.2.1
|
||||
|
||||
- Fix Uncached torrents not being downloaded/downloaded
|
||||
- Minor bug fixed
|
||||
- Fix Race condition in the cache and file system
|
||||
|
||||
#### 0.2.2
|
||||
- Fix name mismatch in the cache
|
||||
- Fix directory mapping with mounts
|
||||
- Add Support for refreshing the *arrs
|
||||
|
||||
#### 0.2.3
|
||||
|
||||
- Delete uncached items from RD
|
||||
- Fail if the torrent is not cached(optional)
|
||||
- Fix cache not being updated
|
||||
|
||||
#### 0.2.4
|
||||
|
||||
- Add file download support(Sequential Download)
|
||||
- Fix http handler error
|
||||
- Fix *arrs map failing concurrently
|
||||
- Fix cache not being updated
|
||||
|
||||
#### 0.2.5
|
||||
- Fix ContentPath not being set prior
|
||||
- Rewrote Readme
|
||||
- Cleaned up the code
|
||||
72
Dockerfile
@@ -1,27 +1,65 @@
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22 as builder
|
||||
# Stage 1: Build binaries
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-alpine as builder
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG VERSION=0.0.0
|
||||
ARG CHANNEL=dev
|
||||
|
||||
# Set destination for COPY
|
||||
WORKDIR /app
|
||||
|
||||
# Download Go modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
go mod download -x
|
||||
|
||||
# Copy the source code. Note the slash at the end, as explained in
|
||||
# https://docs.docker.com/reference/dockerfile/#copy
|
||||
ADD . .
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=$(echo $TARGETPLATFORM | cut -d '/' -f1) GOARCH=$(echo $TARGETPLATFORM | cut -d '/' -f2) go build -o /blackhole
|
||||
# Build main binary
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH \
|
||||
go build -trimpath \
|
||||
-ldflags="-w -s -X github.com/sirrobot01/decypharr/pkg/version.Version=${VERSION} -X github.com/sirrobot01/decypharr/pkg/version.Channel=${CHANNEL}" \
|
||||
-o /decypharr
|
||||
|
||||
FROM scratch
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=builder /blackhole /blackhole
|
||||
# Build healthcheck (optimized)
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH \
|
||||
go build -trimpath -ldflags="-w -s" \
|
||||
-o /healthcheck cmd/healthcheck/main.go
|
||||
|
||||
EXPOSE 8181
|
||||
# Stage 2: Final image
|
||||
FROM alpine:latest
|
||||
|
||||
# Run
|
||||
CMD ["/blackhole", "--config", "/app/config.json"]
|
||||
ARG VERSION=0.0.0
|
||||
ARG CHANNEL=dev
|
||||
|
||||
LABEL version = "${VERSION}-${CHANNEL}"
|
||||
LABEL org.opencontainers.image.source = "https://github.com/sirrobot01/decypharr"
|
||||
LABEL org.opencontainers.image.title = "decypharr"
|
||||
LABEL org.opencontainers.image.authors = "sirrobot01"
|
||||
LABEL org.opencontainers.image.documentation = "https://github.com/sirrobot01/decypharr/blob/main/README.md"
|
||||
|
||||
# Install dependencies including rclone
|
||||
RUN apk add --no-cache fuse3 ca-certificates su-exec shadow rclone && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
# Copy binaries and entrypoint
|
||||
COPY --from=builder /decypharr /usr/bin/decypharr
|
||||
COPY --from=builder /healthcheck /usr/bin/healthcheck
|
||||
COPY scripts/entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
# Set environment variables
|
||||
ENV PUID=1000
|
||||
ENV PGID=1000
|
||||
ENV LOG_PATH=/app/logs
|
||||
|
||||
EXPOSE 8282
|
||||
VOLUME ["/app"]
|
||||
|
||||
HEALTHCHECK --interval=10s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app", "--basic"]
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/usr/bin/decypharr", "--config", "/app"]
|
||||
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Mukhtar Akere
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
174
README.md
@@ -1,146 +1,72 @@
|
||||
### GoBlackHole(with Debrid Proxy Support)
|
||||
# Decypharr
|
||||
|
||||
This is a Golang implementation go Torrent QbitTorrent with a **Real Debrid Proxy Support**.
|
||||

|
||||
|
||||
#### Uses
|
||||
- Mock Qbittorent API that supports the Arrs(Sonarr, Radarr, etc)
|
||||
- Proxy support for the Arrs
|
||||
**Decypharr** is an implementation of QbitTorrent with **Multiple Debrid service support**, written in Go.
|
||||
|
||||
The proxy is useful in filtering out un-cached Real Debrid torrents
|
||||
## What is Decypharr?
|
||||
|
||||
### Changelog
|
||||
Decypharr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications.
|
||||
|
||||
- View the [CHANGELOG.md](CHANGELOG.md) for the latest changes
|
||||
## Features
|
||||
|
||||
- Mock Qbittorent API that supports the Arrs (Sonarr, Radarr, Lidarr etc)
|
||||
- Full-fledged UI for managing torrents
|
||||
- Multiple Debrid providers support
|
||||
- WebDAV server support for each debrid provider
|
||||
- Optional mounting of WebDAV to your system(using [Rclone](https://rclone.org/))
|
||||
- Repair Worker for missing files
|
||||
|
||||
## Supported Debrid Providers
|
||||
|
||||
- [Real Debrid](https://real-debrid.com)
|
||||
- [Torbox](https://torbox.app)
|
||||
- [Debrid Link](https://debrid-link.com)
|
||||
- [All Debrid](https://alldebrid.com)
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Docker (Recommended)
|
||||
|
||||
#### Installation
|
||||
##### Docker Compose
|
||||
```yaml
|
||||
version: '3.7'
|
||||
services:
|
||||
blackhole:
|
||||
image: cy01/blackhole:latest # or cy01/blackhole:beta
|
||||
container_name: blackhole
|
||||
decypharr:
|
||||
image: cy01/blackhole:latest
|
||||
container_name: decypharr
|
||||
ports:
|
||||
- "8282:8282" # qBittorrent
|
||||
- "8181:8181" # Proxy
|
||||
user: "1000:1000"
|
||||
- "8282:8282"
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
- ~/plex/media:/media
|
||||
- ~/plex/media/symlinks/:/media/symlinks/
|
||||
- ~/plex/configs/blackhole/config.json:/app/config.json # Config file, see below
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- UMASK=002
|
||||
- QBIT_PORT=8282 # qBittorrent Port. This is optional. You can set this in the config file
|
||||
- PORT=8181 # Proxy Port. This is optional. You can set this in the config file
|
||||
- /mnt/:/mnt:rshared
|
||||
- ./configs/:/app # config.json must be in this directory
|
||||
restart: unless-stopped
|
||||
|
||||
devices:
|
||||
- /dev/fuse:/dev/fuse:rwm
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
security_opt:
|
||||
- apparmor:unconfined
|
||||
```
|
||||
|
||||
##### Binary
|
||||
Download the binary from the releases page and run it with the config file.
|
||||
## Documentation
|
||||
|
||||
```bash
|
||||
./blackhole --config /path/to/config.json
|
||||
```
|
||||
For complete documentation, please visit our [Documentation](https://sirrobot01.github.io/decypharr/).
|
||||
|
||||
#### Config
|
||||
```json
|
||||
{
|
||||
"debrid": {
|
||||
"name": "realdebrid",
|
||||
"host": "https://api.real-debrid.com/rest/1.0",
|
||||
"api_key": "realdebrid_api_key",
|
||||
"folder": "data/realdebrid/torrents/",
|
||||
"rate_limit": "250/minute"
|
||||
},
|
||||
"proxy": {
|
||||
"enabled": true,
|
||||
"port": "8181",
|
||||
"debug": false,
|
||||
"username": "username",
|
||||
"password": "password",
|
||||
"cached_only": true
|
||||
},
|
||||
"max_cache_size": 1000,
|
||||
"qbittorrent": {
|
||||
"port": "8282",
|
||||
"download_folder": "/media/symlinks/",
|
||||
"categories": ["sonarr", "radarr"],
|
||||
"refresh_interval": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
The documentation includes:
|
||||
|
||||
#### Config Notes
|
||||
##### Max Cache Size
|
||||
- The `max_cache_size` key is used to set the maximum number of infohashes that can be stored in the availability cache. This is used to prevent round trip to the debrid provider when using the proxy/Qbittorrent
|
||||
- The default value is `1000`
|
||||
- The cache is stored in memory and is not persisted on restart
|
||||
- Detailed installation instructions
|
||||
- Configuration guide
|
||||
- Usage with Sonarr/Radarr
|
||||
- WebDAV setup
|
||||
- Repair Worker information
|
||||
- ...and more!
|
||||
|
||||
##### Debrid Config
|
||||
- This config key is important as it's used for both Blackhole and Proxy
|
||||
## Basic Configuration
|
||||
|
||||
##### Proxy Config
|
||||
- The `enabled` key is used to enable the proxy
|
||||
- The `port` key is the port the proxy will listen on
|
||||
- The `debug` key is used to enable debug logs
|
||||
- The `username` and `password` keys are used for basic authentication
|
||||
- The `cached_only` means only cached torrents will be returned
|
||||
You can configure Decypharr through the Web UI or by editing the `config.json` file directly.
|
||||
|
||||
## Contributing
|
||||
|
||||
##### Qbittorrent Config
|
||||
- The `port` key is the port the qBittorrent will listen on
|
||||
- The `download_folder` is the folder where the torrents will be downloaded. e.g `/media/symlinks/`
|
||||
- The `categories` key is used to filter out torrents based on the category. e.g `sonarr`, `radarr`
|
||||
- The `refresh_interval` key is used to set the interval in minutes to refresh the Arrs Monitored Downloads(it's in seconds). The default value is `5` seconds
|
||||
Contributions are welcome! Please feel free to submit a Pull Request.
|
||||
|
||||
### Proxy
|
||||
|
||||
The proxy is useful in filtering out un-cached Real Debrid torrents.
|
||||
The proxy is a simple HTTP proxy that requires basic authentication. The proxy can be enabled by setting the `proxy.enabled` to `true` in the config file.
|
||||
The proxy listens on the port `8181` by default. The username and password can be set in the config file.
|
||||
|
||||
Setting Up Proxy in Arr
|
||||
|
||||
- Sonarr/Radarr
|
||||
- Settings -> General -> Use Proxy
|
||||
- Hostname: `localhost` # or the IP of the server
|
||||
- Port: `8181` # or the port set in the config file
|
||||
- Username: `username` # or the username set in the config file
|
||||
- Password: `password` # or the password set in the config file
|
||||
- Bypass Proxy for Local Addresses -> `No`
|
||||
|
||||
### Qbittorrent
|
||||
|
||||
The qBittorrent is a mock qBittorrent API that supports the Arrs(Sonarr, Radarr, etc).
|
||||
|
||||
Setting Up Qbittorrent in Arr
|
||||
|
||||
- Sonarr/Radarr
|
||||
- Settings -> Download Client -> Add Client -> qBittorrent
|
||||
- Host: `localhost` # or the IP of the server
|
||||
- Port: `8282` # or the port set in the config file/ docker-compose env
|
||||
- Username: `http://sonarr:8989` # Your arr host with http/https
|
||||
- Password: `sonarr_token` # Your arr token
|
||||
- Category: e.g `sonarr`, `radarr`
|
||||
- Use SSL -> `No`
|
||||
- Sequential Download -> `No`|`Yes` (If you want to download the torrents locally instead of symlink)
|
||||
- Test
|
||||
- Save
|
||||
|
||||
### TODO
|
||||
- [ ] A proper name!!!!
|
||||
- [ ] Debrid
|
||||
- [ ] Add more Debrid Providers
|
||||
|
||||
- [ ] Proxy
|
||||
- [ ] Add more Proxy features
|
||||
|
||||
- [ ] Qbittorrent
|
||||
- [ ] Add more Qbittorrent features
|
||||
- [ ] Persist torrents on restart/server crash
|
||||
- [ ] Add tests
|
||||
## License
|
||||
This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
|
||||
206
cmd/decypharr/main.go
Normal file
@@ -0,0 +1,206 @@
|
||||
package decypharr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/pkg/qbit"
|
||||
"github.com/sirrobot01/decypharr/pkg/server"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"github.com/sirrobot01/decypharr/pkg/version"
|
||||
"github.com/sirrobot01/decypharr/pkg/web"
|
||||
"github.com/sirrobot01/decypharr/pkg/webdav"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func Start(ctx context.Context) error {
|
||||
|
||||
if umaskStr := os.Getenv("UMASK"); umaskStr != "" {
|
||||
umask, err := strconv.ParseInt(umaskStr, 8, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid UMASK value: %s", umaskStr)
|
||||
}
|
||||
SetUmask(int(umask))
|
||||
}
|
||||
|
||||
restartCh := make(chan struct{}, 1)
|
||||
web.SetRestartFunc(func() {
|
||||
select {
|
||||
case restartCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
|
||||
svcCtx, cancelSvc := context.WithCancel(ctx)
|
||||
defer cancelSvc()
|
||||
|
||||
for {
|
||||
cfg := config.Get()
|
||||
_log := logger.Default()
|
||||
|
||||
// ascii banner
|
||||
fmt.Printf(`
|
||||
+-------------------------------------------------------+
|
||||
| |
|
||||
| ╔╦╗╔═╗╔═╗╦ ╦╔═╗╦ ╦╔═╗╦═╗╦═╗ |
|
||||
| ║║║╣ ║ └┬┘╠═╝╠═╣╠═╣╠╦╝╠╦╝ (%s) |
|
||||
| ═╩╝╚═╝╚═╝ ┴ ╩ ╩ ╩╩ ╩╩╚═╩╚═ |
|
||||
| |
|
||||
+-------------------------------------------------------+
|
||||
| Log Level: %s |
|
||||
+-------------------------------------------------------+
|
||||
`, version.GetInfo(), cfg.LogLevel)
|
||||
|
||||
// Initialize services
|
||||
qb := qbit.New()
|
||||
wd := webdav.New()
|
||||
|
||||
ui := web.New().Routes()
|
||||
webdavRoutes := wd.Routes()
|
||||
qbitRoutes := qb.Routes()
|
||||
|
||||
// Register routes
|
||||
handlers := map[string]http.Handler{
|
||||
"/": ui,
|
||||
"/api/v2": qbitRoutes,
|
||||
"/webdav": webdavRoutes,
|
||||
}
|
||||
srv := server.New(handlers)
|
||||
|
||||
reset := func() {
|
||||
// Reset the store and services
|
||||
qb.Reset()
|
||||
store.Reset()
|
||||
// refresh GC
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func(ctx context.Context) {
|
||||
if err := startServices(ctx, cancelSvc, wd, srv); err != nil {
|
||||
_log.Error().Err(err).Msg("Error starting services")
|
||||
cancelSvc()
|
||||
}
|
||||
close(done)
|
||||
}(svcCtx)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// graceful shutdown
|
||||
cancelSvc() // propagate to services
|
||||
<-done // wait for them to finish
|
||||
_log.Info().Msg("Decypharr has been stopped gracefully.")
|
||||
reset() // reset store and services
|
||||
return nil
|
||||
|
||||
case <-restartCh:
|
||||
cancelSvc() // tell existing services to shut down
|
||||
_log.Info().Msg("Restarting Decypharr...")
|
||||
<-done // wait for them to finish
|
||||
_log.Info().Msg("Decypharr has been restarted.")
|
||||
reset() // reset store and services
|
||||
// rebuild svcCtx off the original parent
|
||||
svcCtx, cancelSvc = context.WithCancel(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav.WebDav, srv *server.Server) error {
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error)
|
||||
|
||||
_log := logger.Default()
|
||||
|
||||
safeGo := func(f func() error) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
stack := debug.Stack()
|
||||
_log.Error().
|
||||
Interface("panic", r).
|
||||
Str("stack", string(stack)).
|
||||
Msg("Recovered from panic in goroutine")
|
||||
|
||||
// Send error to channel so the main goroutine is aware
|
||||
errChan <- fmt.Errorf("panic: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := f(); err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
safeGo(func() error {
|
||||
return wd.Start(ctx)
|
||||
})
|
||||
|
||||
safeGo(func() error {
|
||||
return srv.Start(ctx)
|
||||
})
|
||||
|
||||
// Start rclone RC server if enabled
|
||||
safeGo(func() error {
|
||||
rcManager := store.Get().RcloneManager()
|
||||
if rcManager == nil {
|
||||
return nil
|
||||
}
|
||||
return rcManager.Start(ctx)
|
||||
})
|
||||
|
||||
safeGo(func() error {
|
||||
arr := store.Get().Arr()
|
||||
if arr == nil {
|
||||
return nil
|
||||
}
|
||||
return arr.StartSchedule(ctx)
|
||||
})
|
||||
|
||||
if cfg := config.Get(); cfg.Repair.Enabled {
|
||||
safeGo(func() error {
|
||||
repair := store.Get().Repair()
|
||||
if repair != nil {
|
||||
if err := repair.Start(ctx); err != nil {
|
||||
_log.Error().Err(err).Msg("repair failed")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
safeGo(func() error {
|
||||
return store.Get().StartQueueSchedule(ctx)
|
||||
})
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for err := range errChan {
|
||||
if err != nil {
|
||||
_log.Error().Err(err).Msg("Service error detected")
|
||||
// If the error is critical, return it to stop the main loop
|
||||
if ctx.Err() == nil {
|
||||
_log.Error().Msg("Stopping services due to error")
|
||||
cancelSvc() // Cancel the service context to stop all services
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for context cancellation
|
||||
<-ctx.Done()
|
||||
_log.Debug().Msg("Services context cancelled")
|
||||
return nil
|
||||
}
|
||||
9
cmd/decypharr/umask_unix.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build !windows
|
||||
|
||||
package decypharr
|
||||
|
||||
import "syscall"
|
||||
|
||||
func SetUmask(umask int) {
|
||||
syscall.Umask(umask)
|
||||
}
|
||||
8
cmd/decypharr/umask_win.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package decypharr
|
||||
|
||||
func SetUmask(umask int) {
|
||||
// No-op on Windows
|
||||
}
|
||||
175
cmd/healthcheck/main.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HealthStatus represents the status of various components
|
||||
type HealthStatus struct {
|
||||
QbitAPI bool `json:"qbit_api"`
|
||||
WebUI bool `json:"web_ui"`
|
||||
WebDAVService bool `json:"webdav_service"`
|
||||
OverallStatus bool `json:"overall_status"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
configPath string
|
||||
isBasicCheck bool
|
||||
debug bool
|
||||
)
|
||||
flag.StringVar(&configPath, "config", "/data", "path to the data folder")
|
||||
flag.BoolVar(&isBasicCheck, "basic", false, "perform basic health check without WebDAV")
|
||||
flag.BoolVar(&debug, "debug", false, "enable debug mode for detailed output")
|
||||
flag.Parse()
|
||||
config.SetConfigPath(configPath)
|
||||
cfg := config.Get()
|
||||
// Get port from environment variable or use default
|
||||
port := getEnvOrDefault("QBIT_PORT", cfg.Port)
|
||||
webdavPath := ""
|
||||
for _, debrid := range cfg.Debrids {
|
||||
if debrid.UseWebDav {
|
||||
webdavPath = debrid.Name
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize status
|
||||
status := HealthStatus{
|
||||
QbitAPI: false,
|
||||
WebUI: false,
|
||||
WebDAVService: false,
|
||||
OverallStatus: false,
|
||||
}
|
||||
|
||||
// Create a context with timeout for all HTTP requests
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
baseUrl := cmp.Or(cfg.URLBase, "/")
|
||||
if !strings.HasPrefix(baseUrl, "/") {
|
||||
baseUrl = "/" + baseUrl
|
||||
}
|
||||
|
||||
// Check qBittorrent API
|
||||
if checkQbitAPI(ctx, baseUrl, port) {
|
||||
status.QbitAPI = true
|
||||
}
|
||||
|
||||
// Check Web UI
|
||||
if checkWebUI(ctx, baseUrl, port) {
|
||||
status.WebUI = true
|
||||
}
|
||||
|
||||
if isBasicCheck {
|
||||
status.WebDAVService = checkBaseWebdav(ctx, baseUrl, port)
|
||||
} else {
|
||||
// If not a basic check, check WebDAV with debrid path
|
||||
if webdavPath != "" {
|
||||
status.WebDAVService = checkDebridWebDAV(ctx, baseUrl, port, webdavPath)
|
||||
} else {
|
||||
// If no WebDAV path is set, consider it healthy
|
||||
status.WebDAVService = true
|
||||
}
|
||||
}
|
||||
// Determine overall status
|
||||
// Consider the application healthy if core services are running
|
||||
status.OverallStatus = status.QbitAPI && status.WebUI
|
||||
if webdavPath != "" {
|
||||
status.OverallStatus = status.OverallStatus && status.WebDAVService
|
||||
}
|
||||
|
||||
// Optional: output health status as JSON for logging
|
||||
if debug {
|
||||
statusJSON, _ := json.MarshalIndent(status, "", " ")
|
||||
fmt.Println(string(statusJSON))
|
||||
}
|
||||
|
||||
// Exit with appropriate code
|
||||
if status.OverallStatus {
|
||||
os.Exit(0)
|
||||
} else {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func getEnvOrDefault(key, defaultValue string) string {
|
||||
if value, exists := os.LookupEnv(key); exists {
|
||||
return value
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func checkQbitAPI(ctx context.Context, baseUrl, port string) bool {
|
||||
url := fmt.Sprintf("http://localhost:%s%sapi/v2/app/version", port, baseUrl)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return resp.StatusCode == http.StatusOK
|
||||
}
|
||||
|
||||
func checkWebUI(ctx context.Context, baseUrl, port string) bool {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("http://localhost:%s%s", port, baseUrl), nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return resp.StatusCode == http.StatusOK
|
||||
}
|
||||
|
||||
func checkBaseWebdav(ctx context.Context, baseUrl, port string) bool {
|
||||
url := fmt.Sprintf("http://localhost:%s%swebdav/", port, baseUrl)
|
||||
req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return resp.StatusCode == http.StatusMultiStatus ||
|
||||
resp.StatusCode == http.StatusOK
|
||||
}
|
||||
|
||||
func checkDebridWebDAV(ctx context.Context, baseUrl, port, path string) bool {
|
||||
url := fmt.Sprintf("http://localhost:%s%swebdav/%s", port, baseUrl, path)
|
||||
req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return resp.StatusCode == http.StatusMultiStatus ||
|
||||
resp.StatusCode == http.StatusOK
|
||||
|
||||
}
|
||||
40
cmd/main.go
@@ -1,40 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"goBlack/common"
|
||||
"goBlack/pkg/debrid"
|
||||
"goBlack/pkg/proxy"
|
||||
"goBlack/pkg/qbit"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func Start(config *common.Config) {
|
||||
maxCacheSize := cmp.Or(config.MaxCacheSize, 1000)
|
||||
cache := common.NewCache(maxCacheSize)
|
||||
|
||||
deb := debrid.NewDebrid(config.Debrid, cache)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if config.Proxy.Enabled {
|
||||
p := proxy.NewProxy(*config, deb, cache)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
p.Start()
|
||||
}()
|
||||
}
|
||||
if config.QBitTorrent.Port != "" {
|
||||
qb := qbit.NewQBit(config, deb, cache)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
qb.Start()
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait indefinitely
|
||||
wg.Wait()
|
||||
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Cache struct {
|
||||
data map[string]struct{}
|
||||
order []string
|
||||
maxItems int
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewCache(maxItems int) *Cache {
|
||||
if maxItems <= 0 {
|
||||
maxItems = 1000
|
||||
}
|
||||
return &Cache{
|
||||
data: make(map[string]struct{}, maxItems),
|
||||
order: make([]string, 0, maxItems),
|
||||
maxItems: maxItems,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) Add(value string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if _, exists := c.data[value]; !exists {
|
||||
if len(c.order) >= c.maxItems {
|
||||
delete(c.data, c.order[0])
|
||||
c.order = c.order[1:]
|
||||
}
|
||||
c.data[value] = struct{}{}
|
||||
c.order = append(c.order, value)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) AddMultiple(values map[string]bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
for value := range values {
|
||||
if _, exists := c.data[value]; !exists {
|
||||
if len(c.order) >= c.maxItems {
|
||||
delete(c.data, c.order[0])
|
||||
c.order = c.order[1:]
|
||||
}
|
||||
c.data[value] = struct{}{}
|
||||
c.order = append(c.order, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) Get(index int) (string, bool) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
if index < 0 || index >= len(c.order) {
|
||||
return "", false
|
||||
}
|
||||
return c.order[index], true
|
||||
}
|
||||
|
||||
func (c *Cache) GetMultiple(values []string) map[string]bool {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
result := make(map[string]bool, len(values))
|
||||
for _, value := range values {
|
||||
if _, exists := c.data[value]; exists {
|
||||
result[value] = true
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *Cache) Exists(value string) bool {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
_, exists := c.data[value]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (c *Cache) Len() int {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return len(c.order)
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type DebridConfig struct {
|
||||
Name string `json:"name"`
|
||||
Host string `json:"host"`
|
||||
APIKey string `json:"api_key"`
|
||||
Folder string `json:"folder"`
|
||||
DownloadUncached bool `json:"download_uncached"`
|
||||
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
|
||||
}
|
||||
|
||||
type ProxyConfig struct {
|
||||
Port string `json:"port"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Debug bool `json:"debug"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
CachedOnly *bool `json:"cached_only"`
|
||||
}
|
||||
|
||||
type QBitTorrentConfig struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Port string `json:"port"`
|
||||
Debug bool `json:"debug"`
|
||||
DownloadFolder string `json:"download_folder"`
|
||||
Categories []string `json:"categories"`
|
||||
RefreshInterval int `json:"refresh_interval"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Debrid DebridConfig `json:"debrid"`
|
||||
Proxy ProxyConfig `json:"proxy"`
|
||||
MaxCacheSize int `json:"max_cache_size"`
|
||||
QBitTorrent QBitTorrentConfig `json:"qbittorrent"`
|
||||
}
|
||||
|
||||
func LoadConfig(path string) (*Config, error) {
|
||||
// Load the config file
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func(file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}(file)
|
||||
|
||||
decoder := json.NewDecoder(file)
|
||||
config := &Config{}
|
||||
err = decoder.Decode(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.Proxy.CachedOnly == nil {
|
||||
config.Proxy.CachedOnly = new(bool)
|
||||
*config.Proxy.CachedOnly = true
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
VIDEOMATCH = "(?i)(\\.)(YUV|WMV|WEBM|VOB|VIV|SVI|ROQ|RMVB|RM|OGV|OGG|NSV|MXF|MPG|MPEG|M2V|MP2|MPE|MPV|MP4|M4P|M4V|MOV|QT|MNG|MKV|FLV|DRC|AVI|ASF|AMV|MKA|F4V|3GP|3G2|DIVX|X264|X265)$"
|
||||
MUSICMATCH = "(?i)(\\.)(?:MP3|WAV|FLAC|AAC|OGG|WMA|AIFF|ALAC|M4A|APE|AC3|DTS|M4P|MID|MIDI|MKA|MP2|MPA|RA|VOC|WV|AMR)$"
|
||||
SUBMATCH = "(?i)(\\.)(SRT|SUB|SBV|ASS|VTT|TTML|DFXP|STL|SCC|CAP|SMI|TTXT|TDS|USF|JSS|SSA|PSB|RT|LRC|SSB)$"
|
||||
SAMPLEMATCH = `(?i)(^|[\\/]|[._-])(sample|trailer|thumb)s?([._-]|$)`
|
||||
)
|
||||
|
||||
func RegexMatch(regex string, value string) bool {
|
||||
re := regexp.MustCompile(regex)
|
||||
return re.MatchString(value)
|
||||
}
|
||||
|
||||
func RemoveInvalidChars(value string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if r == filepath.Separator || r == ':' {
|
||||
return r
|
||||
}
|
||||
if filepath.IsAbs(string(r)) {
|
||||
return r
|
||||
}
|
||||
if strings.ContainsRune(filepath.VolumeName("C:"+string(r)), r) {
|
||||
return r
|
||||
}
|
||||
if r < 32 || strings.ContainsRune(`<>:"/\|?*`, r) {
|
||||
return -1
|
||||
}
|
||||
return r
|
||||
}, value)
|
||||
}
|
||||
|
||||
func RemoveExtension(value string) string {
|
||||
re := regexp.MustCompile(VIDEOMATCH + "|" + SUBMATCH + "|" + SAMPLEMATCH + "|" + MUSICMATCH)
|
||||
|
||||
// Find the last index of the matched extension
|
||||
loc := re.FindStringIndex(value)
|
||||
if loc != nil {
|
||||
return value[:loc[0]]
|
||||
} else {
|
||||
return value
|
||||
}
|
||||
}
|
||||
|
||||
func RegexFind(regex string, value string) string {
|
||||
re := regexp.MustCompile(regex)
|
||||
match := re.FindStringSubmatch(value)
|
||||
if len(match) > 0 {
|
||||
return match[0]
|
||||
} else {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"golang.org/x/time/rate"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RLHTTPClient struct {
|
||||
client *http.Client
|
||||
Ratelimiter *rate.Limiter
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
func (c *RLHTTPClient) Doer(req *http.Request) (*http.Response, error) {
|
||||
if c.Ratelimiter != nil {
|
||||
err := c.Ratelimiter.Wait(req.Context())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *RLHTTPClient) Do(req *http.Request) (*http.Response, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
backoff := time.Millisecond * 500
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
resp, err = c.Doer(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusTooManyRequests {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Close the response body to prevent resource leakage
|
||||
resp.Body.Close()
|
||||
|
||||
// Wait for the backoff duration before retrying
|
||||
time.Sleep(backoff)
|
||||
|
||||
// Exponential backoff
|
||||
backoff *= 2
|
||||
}
|
||||
|
||||
return resp, fmt.Errorf("max retries exceeded")
|
||||
}
|
||||
|
||||
func (c *RLHTTPClient) MakeRequest(method string, url string, body io.Reader) ([]byte, error) {
|
||||
req, err := http.NewRequest(method, url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.Headers != nil {
|
||||
for key, value := range c.Headers {
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := c.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statusOk := strconv.Itoa(res.StatusCode)[0] == '2'
|
||||
if !statusOk {
|
||||
return nil, fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
||||
}
|
||||
defer func(Body io.ReadCloser) {
|
||||
err := Body.Close()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}(res.Body)
|
||||
return io.ReadAll(res.Body)
|
||||
}
|
||||
|
||||
func NewRLHTTPClient(rl *rate.Limiter, headers map[string]string) *RLHTTPClient {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
c := &RLHTTPClient{
|
||||
client: &http.Client{
|
||||
Transport: tr,
|
||||
},
|
||||
Ratelimiter: rl,
|
||||
Headers: headers,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func ParseRateLimit(rateStr string) *rate.Limiter {
|
||||
if rateStr == "" {
|
||||
return nil
|
||||
}
|
||||
re := regexp.MustCompile(`(\d+)/(minute|second)`)
|
||||
matches := re.FindStringSubmatch(rateStr)
|
||||
if len(matches) != 3 {
|
||||
return nil
|
||||
}
|
||||
|
||||
count, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
unit := matches[2]
|
||||
switch unit {
|
||||
case "minute":
|
||||
reqsPerSecond := float64(count) / 60.0
|
||||
return rate.NewLimiter(rate.Limit(reqsPerSecond), 5)
|
||||
case "second":
|
||||
return rate.NewLimiter(rate.Limit(float64(count)), 5)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
418
docs/docs/api-spec.yaml
Normal file
@@ -0,0 +1,418 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Decypharr API
|
||||
description: QbitTorrent with Debrid Support API
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: Decypharr
|
||||
url: https://github.com/sirrobot01/decypharr
|
||||
|
||||
servers:
|
||||
- url: /api
|
||||
description: API endpoints
|
||||
|
||||
security:
|
||||
- cookieAuth: []
|
||||
- bearerAuth: []
|
||||
|
||||
paths:
|
||||
/arrs:
|
||||
get:
|
||||
summary: Get all configured Arrs
|
||||
description: Retrieve a list of all configured Arr applications (Sonarr, Radarr, etc.)
|
||||
tags:
|
||||
- Arrs
|
||||
responses:
|
||||
'200':
|
||||
description: Successfully retrieved Arrs
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Arr'
|
||||
|
||||
/add:
|
||||
post:
|
||||
summary: Add content for processing
|
||||
description: Add torrent files or magnet links for processing through debrid services
|
||||
tags:
|
||||
- Content
|
||||
requestBody:
|
||||
content:
|
||||
multipart/form-data:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
arr:
|
||||
type: string
|
||||
description: Name of the Arr application
|
||||
action:
|
||||
type: string
|
||||
description: Action to perform
|
||||
debrid:
|
||||
type: string
|
||||
description: Debrid service to use
|
||||
callbackUrl:
|
||||
type: string
|
||||
description: Optional callback URL
|
||||
downloadFolder:
|
||||
type: string
|
||||
description: Download folder path
|
||||
downloadUncached:
|
||||
type: boolean
|
||||
description: Whether to download uncached content
|
||||
urls:
|
||||
type: string
|
||||
description: Newline-separated URLs or magnet links
|
||||
files:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: binary
|
||||
description: Torrent files to upload
|
||||
responses:
|
||||
'200':
|
||||
description: Content added successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
results:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/ImportRequest'
|
||||
errors:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
'400':
|
||||
description: Bad request
|
||||
|
||||
/repair:
|
||||
post:
|
||||
summary: Repair media
|
||||
description: Start a repair process for specified media items
|
||||
tags:
|
||||
- Repair
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RepairRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: Repair started or completed
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: string
|
||||
'400':
|
||||
description: Bad request
|
||||
'404':
|
||||
description: Arr not found
|
||||
'500':
|
||||
description: Internal server error
|
||||
|
||||
/repair/jobs:
|
||||
get:
|
||||
summary: Get repair jobs
|
||||
description: Retrieve all repair jobs
|
||||
tags:
|
||||
- Repair
|
||||
responses:
|
||||
'200':
|
||||
description: Successfully retrieved repair jobs
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/RepairJob'
|
||||
delete:
|
||||
summary: Delete repair jobs
|
||||
description: Delete multiple repair jobs by IDs
|
||||
tags:
|
||||
- Repair
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
ids:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
required:
|
||||
- ids
|
||||
responses:
|
||||
'200':
|
||||
description: Jobs deleted successfully
|
||||
'400':
|
||||
description: Bad request
|
||||
|
||||
/repair/jobs/{id}/process:
|
||||
post:
|
||||
summary: Process repair job
|
||||
description: Process a specific repair job by ID
|
||||
tags:
|
||||
- Repair
|
||||
parameters:
|
||||
- name: id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: Job ID
|
||||
responses:
|
||||
'200':
|
||||
description: Job processed successfully
|
||||
'400':
|
||||
description: Bad request
|
||||
|
||||
/repair/jobs/{id}/stop:
|
||||
post:
|
||||
summary: Stop repair job
|
||||
description: Stop a running repair job by ID
|
||||
tags:
|
||||
- Repair
|
||||
parameters:
|
||||
- name: id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: Job ID
|
||||
responses:
|
||||
'200':
|
||||
description: Job stopped successfully
|
||||
'400':
|
||||
description: Bad request
|
||||
'500':
|
||||
description: Internal server error
|
||||
|
||||
/torrents:
|
||||
get:
|
||||
summary: Get all torrents
|
||||
description: Retrieve all torrents sorted by added date
|
||||
tags:
|
||||
- Torrents
|
||||
responses:
|
||||
'200':
|
||||
description: Successfully retrieved torrents
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Torrent'
|
||||
delete:
|
||||
summary: Delete multiple torrents
|
||||
description: Delete multiple torrents by hash list
|
||||
tags:
|
||||
- Torrents
|
||||
parameters:
|
||||
- name: hashes
|
||||
in: query
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: Comma-separated list of torrent hashes
|
||||
- name: removeFromDebrid
|
||||
in: query
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
description: Whether to remove from debrid service
|
||||
responses:
|
||||
'200':
|
||||
description: Torrents deleted successfully
|
||||
'400':
|
||||
description: Bad request
|
||||
|
||||
/torrents/{category}/{hash}:
|
||||
delete:
|
||||
summary: Delete single torrent
|
||||
description: Delete a specific torrent by category and hash
|
||||
tags:
|
||||
- Torrents
|
||||
parameters:
|
||||
- name: category
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: Torrent category
|
||||
- name: hash
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: Torrent hash
|
||||
- name: removeFromDebrid
|
||||
in: query
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
description: Whether to remove from debrid service
|
||||
responses:
|
||||
'200':
|
||||
description: Torrent deleted successfully
|
||||
'400':
|
||||
description: Bad request
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
cookieAuth:
|
||||
type: apiKey
|
||||
in: cookie
|
||||
name: auth-session
|
||||
bearerAuth:
|
||||
type: http
|
||||
scheme: bearer
|
||||
bearerFormat: token
|
||||
description: API token for authentication
|
||||
|
||||
schemas:
|
||||
Arr:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of the Arr application
|
||||
host:
|
||||
type: string
|
||||
description: Host URL of the Arr application
|
||||
token:
|
||||
type: string
|
||||
description: API token for the Arr application
|
||||
cleanup:
|
||||
type: boolean
|
||||
description: Whether to cleanup after processing
|
||||
skipRepair:
|
||||
type: boolean
|
||||
description: Whether to skip repair operations
|
||||
downloadUncached:
|
||||
type: boolean
|
||||
description: Whether to download uncached content
|
||||
selectedDebrid:
|
||||
type: string
|
||||
description: Selected debrid service
|
||||
source:
|
||||
type: string
|
||||
description: Source of the Arr configuration
|
||||
|
||||
ImportRequest:
|
||||
type: object
|
||||
properties:
|
||||
debridName:
|
||||
type: string
|
||||
description: Name of the debrid service
|
||||
downloadFolder:
|
||||
type: string
|
||||
description: Download folder path
|
||||
magnet:
|
||||
type: string
|
||||
description: Magnet link
|
||||
arr:
|
||||
$ref: '#/components/schemas/Arr'
|
||||
action:
|
||||
type: string
|
||||
description: Action to perform
|
||||
downloadUncached:
|
||||
type: boolean
|
||||
description: Whether to download uncached content
|
||||
callbackUrl:
|
||||
type: string
|
||||
description: Callback URL
|
||||
importType:
|
||||
type: string
|
||||
description: Type of import (API, etc.)
|
||||
|
||||
RepairRequest:
|
||||
type: object
|
||||
properties:
|
||||
arrName:
|
||||
type: string
|
||||
description: Name of the Arr application
|
||||
mediaIds:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: List of media IDs to repair
|
||||
autoProcess:
|
||||
type: boolean
|
||||
description: Whether to auto-process the repair
|
||||
async:
|
||||
type: boolean
|
||||
description: Whether to run repair asynchronously
|
||||
required:
|
||||
- arrName
|
||||
|
||||
RepairJob:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Job ID
|
||||
status:
|
||||
type: string
|
||||
description: Job status
|
||||
arrName:
|
||||
type: string
|
||||
description: Associated Arr application
|
||||
mediaIds:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Media IDs being repaired
|
||||
createdAt:
|
||||
type: string
|
||||
format: date-time
|
||||
description: Job creation timestamp
|
||||
|
||||
Torrent:
|
||||
type: object
|
||||
properties:
|
||||
hash:
|
||||
type: string
|
||||
description: Torrent hash
|
||||
name:
|
||||
type: string
|
||||
description: Torrent name
|
||||
category:
|
||||
type: string
|
||||
description: Torrent category
|
||||
addedOn:
|
||||
type: string
|
||||
format: date-time
|
||||
description: Date when torrent was added
|
||||
size:
|
||||
type: integer
|
||||
description: Torrent size in bytes
|
||||
progress:
|
||||
type: number
|
||||
format: float
|
||||
description: Download progress (0-1)
|
||||
status:
|
||||
type: string
|
||||
description: Torrent status
|
||||
|
||||
|
||||
tags:
|
||||
- name: Arrs
|
||||
description: Arr application management
|
||||
- name: Content
|
||||
description: Content addition and processing
|
||||
- name: Repair
|
||||
description: Media repair operations
|
||||
- name: Torrents
|
||||
description: Torrent management
|
||||
- name: Configuration
|
||||
description: Application configuration
|
||||
- name: Authentication
|
||||
description: API token management
|
||||
90
docs/docs/api.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# API Documentation
|
||||
|
||||
Decypharr provides a RESTful API for managing torrents, debrid services, and Arr integrations. The API requires authentication and all endpoints are prefixed with `/api`.
|
||||
|
||||
## Authentication
|
||||
|
||||
The API supports two authentication methods:
|
||||
|
||||
### 1. Session-based Authentication (Cookies)
|
||||
Log in through the web interface (`/login`) to establish an authenticated session. The session cookie (`auth-session`) will be automatically included in subsequent API requests from the same browser session.
|
||||
|
||||
### 2. API Token Authentication (Bearer Token)
|
||||
Use API tokens for programmatic access. Include the token in the `Authorization` header for each request:
|
||||
|
||||
- `Authorization: Bearer <your-token>`
|
||||
|
||||
## Interactive API Documentation
|
||||
|
||||
<swagger-ui src="api-spec.yaml"/>
|
||||
|
||||
## API Endpoints Overview
|
||||
|
||||
### Arrs Management
|
||||
- `GET /api/arrs` - Get all configured Arr applications (Sonarr, Radarr, etc.)
|
||||
|
||||
### Content Management
|
||||
- `POST /api/add` - Add torrent files or magnet links for processing through debrid services
|
||||
|
||||
### Repair Operations
|
||||
- `POST /api/repair` - Start repair process for media items
|
||||
- `GET /api/repair/jobs` - Get all repair jobs
|
||||
- `POST /api/repair/jobs/{id}/process` - Process a specific repair job
|
||||
- `POST /api/repair/jobs/{id}/stop` - Stop a running repair job
|
||||
- `DELETE /api/repair/jobs` - Delete multiple repair jobs
|
||||
|
||||
### Torrent Management
|
||||
- `GET /api/torrents` - Get all torrents
|
||||
- `DELETE /api/torrents/{category}/{hash}` - Delete a specific torrent
|
||||
- `DELETE /api/torrents/` - Delete multiple torrents
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Adding Content via API
|
||||
|
||||
#### Using API Token:
|
||||
```bash
|
||||
curl -H "Authorization: Bearer $API_TOKEN" -X POST http://localhost:8080/api/add \
|
||||
-F "arr=sonarr" \
|
||||
-F "debrid=realdebrid" \
|
||||
-F "urls=magnet:?xt=urn:btih:..." \
|
||||
-F "downloadUncached=true"
|
||||
-F "file=@/path/to/torrent/file.torrent"
|
||||
-F "callbackUrl=http://your.callback.url/endpoint"
|
||||
```
|
||||
|
||||
#### Using Session Cookies:
|
||||
```bash
|
||||
# Login first (this sets the session cookie)
|
||||
curl -c cookies.txt -X POST http://localhost:8080/login \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"username": "your_username", "password": "your_password"}'
|
||||
|
||||
# Then use the session cookie for API calls
|
||||
curl -b cookies.txt -X POST http://localhost:8080/api/add \
|
||||
-F "arr=sonarr" \
|
||||
-F "debrid=realdebrid" \
|
||||
-F "urls=magnet:?xt=urn:btih:..." \
|
||||
-F "downloadUncached=true"
|
||||
```
|
||||
|
||||
### Getting Torrents
|
||||
|
||||
```bash
|
||||
# With API token
|
||||
curl -H "Authorization: Bearer $API_TOKEN" -X GET http://localhost:8080/api/torrents
|
||||
```
|
||||
|
||||
### Starting a Repair Job
|
||||
|
||||
```bash
|
||||
# With API token
|
||||
curl -H "Authorization: Bearer $API_TOKEN" -X POST http://localhost:8080/api/repair \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"arrName": "sonarr",
|
||||
"mediaIds": ["123", "456"],
|
||||
"autoProcess": true,
|
||||
"async": true
|
||||
}'
|
||||
```
|
||||
44
docs/docs/features/index.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Features Overview
|
||||
|
||||
Decypharr extends the functionality of qBittorrent by integrating with Debrid services, providing several powerful features that enhance your media management experience.
|
||||
|
||||
## Core Features
|
||||
|
||||
### Mock qBittorrent API
|
||||
|
||||
Decypharr implements a complete qBittorrent-compatible API that can be used with Sonarr, Radarr, Lidarr, and other Arr applications. This allows you to:
|
||||
|
||||
- Seamlessly integrate with your existing Arr setup
|
||||
- Use familiar interfaces to manage your downloads
|
||||
- Benefit from Debrid services without changing your workflow
|
||||
|
||||
### Comprehensive UI
|
||||
|
||||
The Decypharr user interface provides:
|
||||
|
||||
- Torrent management capabilities
|
||||
- Status monitoring
|
||||
- Configuration options
|
||||
- Multiple Debrid provider integration
|
||||
|
||||
## Advanced Features
|
||||
|
||||
Decypharr includes several advanced features that extend its capabilities:
|
||||
|
||||
- [Repair Support](repair-worker.md): Identifies and fixes issues with your media files
|
||||
- WebDav Server: Provides direct access to your Debrid files
|
||||
- Mounting Support: Allows you to mount Debrid services using [rclone](https://rclone.org), making it easy to access your files directly from your system
|
||||
- Multiple Debrid Providers: Supports Real Debrid, Torbox, Debrid Link, and All Debrid, allowing you to choose the best service for your needs
|
||||
|
||||
## Supported Debrid Providers
|
||||
|
||||
Decypharr supports multiple Debrid providers:
|
||||
|
||||
- Real Debrid
|
||||
- Torbox
|
||||
- Debrid Link
|
||||
- All Debrid
|
||||
- Premiumize(Coming Soon)
|
||||
- Usenet(Coming Soon)
|
||||
|
||||
Each provider can be configured separately, allowing you to use one or multiple services simultaneously.
|
||||
18
docs/docs/features/repair-worker.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Repair Worker
|
||||
|
||||

|
||||
|
||||
The Repair Worker is a powerful feature that helps maintain the health of your media library by scanning for and fixing issues with files.
|
||||
|
||||
## What It Does
|
||||
|
||||
The Repair Worker performs the following tasks:
|
||||
|
||||
- Searches for broken symlinks or file references
|
||||
- Identifies missing files in your library
|
||||
- Locates deleted or unreadable files
|
||||
- Automatically repairs issues when possible
|
||||
|
||||
## Configuration
|
||||
|
||||
You can enable and configure the Repair Worker in the Decypharr settings. It can be set to run at regular intervals, such as every 12 hours or daily.
|
||||
26
docs/docs/guides/downloading.md
Normal file
@@ -0,0 +1,26 @@
|
||||
### Downloading with Decypharr
|
||||
|
||||
While Decypharr provides a Qbittorent API for integration with media management applications, it also allows you to manually download torrents directly through its interface. This guide will walk you through the process of downloading torrents using Decypharr.
|
||||
|
||||
- You can either use the Decypharr UI to add torrents manually or use its [API](../api.md) to automate the process.
|
||||
|
||||
## Manual Downloading
|
||||
|
||||

|
||||
To manually download a torrent using Decypharr, follow these steps:
|
||||
1. Navigate to the "Download" section in the Decypharr UI.
|
||||
2. You can either upload torrent file(s) or paste magnet links directly into the input fields
|
||||
3. Select the action(defaults to Symlink)
|
||||
|
||||
4. Add any additional options, such as:
|
||||
- *Download Folder*: Specify the folder where the downloaded files will be saved.
|
||||
- *Arr Category*: Choose the category for the download, which helps in organizing files in your media management applications.
|
||||
- **Post Download Action**: Select what to do after the download completes:
|
||||
- **Create Symlink**: Create a symlink to the downloaded files in the mount folder(default)
|
||||
- **Download**: Download the file directly.
|
||||
- **No Action**: Do nothing after the download completes.
|
||||
- **Debrid Provider**: Choose which Debrid service to use for the download(if you have multiple)
|
||||
- **Download Uncached**: If enabled, Decypharr will attempt to download uncached files from the Debrid service.
|
||||
|
||||
Note:
|
||||
- If you use an arr category, your download will go into **{download_folder}/{arr}**
|
||||
4
docs/docs/guides/index.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Guides for setting up Decypharr
|
||||
|
||||
- [Manual Downloading with Decypharr](downloading.md)
|
||||
- [Internal Mounting](internal-mounting.md)
|
||||
81
docs/docs/guides/internal-mounting.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# Internal Mounting
|
||||
|
||||
This guide explains how to use Decypharr's internal mounting feature to eliminate the need for external rclone setup.
|
||||
|
||||
## Overview
|
||||
|
||||

|
||||
|
||||
Instead of requiring users to install and configure rclone separately, Decypharr can now mount your WebDAV endpoints internally using rclone as a library dependency. This provides a seamless experience where files appear as regular filesystem paths without any external dependencies.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **Docker users**: FUSE support may need to be enabled in the container depending on your Docker setup
|
||||
- **macOS users**: May need [macFUSE](https://osxfuse.github.io/) installed for mounting functionality
|
||||
- **Linux users**: FUSE should be available by default on most distributions
|
||||
- **Windows users**: Mounting functionality may be limited
|
||||
|
||||
### Configuration Options
|
||||
|
||||
You can set the options in the Web UI or directly in the configuration file:
|
||||
|
||||
#### Note:
|
||||
Check the Rclone documentation for more details on the available options: [Rclone Mount Options](https://rclone.org/commands/rclone_mount/).
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **WebDAV Server**: Decypharr starts its internal WebDAV server for enabled providers
|
||||
2. **Internal Mount**: Rclone is used internally to mount the WebDAV endpoint to a local filesystem path
|
||||
3. **File Access**: Your applications can access files using regular filesystem paths like `/mnt/decypharr/realdebrid/__all__/MyMovie/`
|
||||
|
||||
## Benefits
|
||||
|
||||
- **Automatic Setup**: Mounting is handled automatically by Decypharr using internal rclone rcd
|
||||
- **Filesystem Access**: Files appear as regular directories and files
|
||||
- **Seamless Integration**: Works with existing media servers without changes
|
||||
|
||||
## Docker Compose
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
services:
|
||||
decypharr:
|
||||
image: sirrobot01/decypharr:latest
|
||||
container_name: decypharr
|
||||
ports:
|
||||
- "8282:8282"
|
||||
volumes:
|
||||
- ./config:/config
|
||||
- /mnt:/mnt:rshared # Important: use 'rshared' for mount propagation
|
||||
devices:
|
||||
- /dev/fuse:/dev/fuse:rwm
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
environment:
|
||||
- UMASK=002
|
||||
```
|
||||
|
||||
**Important Docker Notes:**
|
||||
- Mount volumes with `:rshared` to allow mount propagation
|
||||
- Include `/dev/fuse` device for FUSE mounting
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Mount Failures
|
||||
|
||||
If mounting fails, check:
|
||||
|
||||
1. **FUSE Installation**:
|
||||
- **macOS**: Install macFUSE from https://osxfuse.github.io/
|
||||
- **Linux**: Install fuse package (`apt install fuse` or `yum install fuse`)
|
||||
- **Docker**: Fuse is already included in the container, but ensure the host supports it
|
||||
2. **Permissions**: Ensure the application has sufficient privileges
|
||||
|
||||
### No Mount Methods Available
|
||||
|
||||
If you see "no mount method available" errors:
|
||||
|
||||
1. **Check Platform Support**: Some platforms have limited FUSE support
|
||||
2. **Install Dependencies**: Ensure FUSE libraries are installed
|
||||
3. **Use WebDAV Directly**: Access files via `http://localhost:8282/webdav/provider/`
|
||||
4. **External Mounting**: Use OS-native WebDAV mounting as fallback
|
||||
BIN
docs/docs/images/download.png
Normal file
|
After Width: | Height: | Size: 293 KiB |
BIN
docs/docs/images/logo.png
Normal file
|
After Width: | Height: | Size: 1.2 MiB |
BIN
docs/docs/images/main-light.png
Normal file
|
After Width: | Height: | Size: 431 KiB |
BIN
docs/docs/images/main.png
Normal file
|
After Width: | Height: | Size: 417 KiB |
BIN
docs/docs/images/repair.png
Normal file
|
After Width: | Height: | Size: 286 KiB |
BIN
docs/docs/images/settings/arr.png
Normal file
|
After Width: | Height: | Size: 264 KiB |
BIN
docs/docs/images/settings/debrid.png
Normal file
|
After Width: | Height: | Size: 264 KiB |
BIN
docs/docs/images/settings/qbittorent.png
Normal file
|
After Width: | Height: | Size: 169 KiB |
BIN
docs/docs/images/settings/rclone.png
Normal file
|
After Width: | Height: | Size: 364 KiB |
BIN
docs/docs/images/settings/repair.png
Normal file
|
After Width: | Height: | Size: 216 KiB |
BIN
docs/docs/images/webdav.png
Normal file
|
After Width: | Height: | Size: 62 KiB |
28
docs/docs/index.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Decypharr
|
||||
{: .light-mode-image}
|
||||
{: .dark-mode-image}
|
||||
|
||||
**Decypharr** is an implementation of QbitTorrent with **Multiple Debrid service support**, written in Go.
|
||||
|
||||
## What is Decypharr?
|
||||
|
||||
**TLDR**; Decypharr is a self-hosted, open-source download client that integrates with multiple Debrid services. It provides a user-friendly interface for managing files and supports popular media management applications like Sonarr and Radarr.
|
||||
|
||||
|
||||
## Key Features
|
||||
|
||||
- Mock Qbittorent API that supports Sonarr, Radarr, Lidarr, and other Arr applications
|
||||
- Multiple Debrid providers support
|
||||
- WebDAV server support for each Debrid provider with an optional mounting feature(using [rclone](https://rclone.org))
|
||||
- Repair Worker for missing files, symlinks etc
|
||||
|
||||
## Supported Debrid Providers
|
||||
|
||||
- [Real Debrid](https://real-debrid.com)
|
||||
- [Torbox](https://torbox.app)
|
||||
- [Debrid Link](https://debrid-link.com)
|
||||
- [All Debrid](https://alldebrid.com)
|
||||
|
||||
## Getting Started
|
||||
|
||||
Check out our [Installation Guide](installation.md) to get started with Decypharr.
|
||||
107
docs/docs/installation.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# Installation
|
||||
|
||||
There are multiple ways to install and run Decypharr. Choose the method that works best for your setup.
|
||||
|
||||
## Docker Installation (Recommended)
|
||||
|
||||
Docker is the easiest way to get started with Decypharr.
|
||||
|
||||
### Available Docker Registries
|
||||
|
||||
You can use either Docker Hub or GitHub Container Registry to pull the image:
|
||||
|
||||
- Docker Hub: `cy01/blackhole:latest`
|
||||
- GitHub Container Registry: `ghcr.io/sirrobot01/decypharr:latest`
|
||||
|
||||
### Docker Tags
|
||||
|
||||
- `latest`: The latest stable release
|
||||
- `beta`: The latest beta release
|
||||
- `vX.Y.Z`: A specific version (e.g., `v0.1.0`)
|
||||
- `experimental`: The latest experimental build (highly unstable)
|
||||
|
||||
### Docker CLI Setup
|
||||
|
||||
Pull the Docker image:
|
||||
```bash
|
||||
docker pull cy01/blackhole:latest
|
||||
```
|
||||
Run the Docker container:
|
||||
```bash
|
||||
docker run -d \
|
||||
--name decypharr \
|
||||
--restart unless-stopped \
|
||||
-p 8282:8282 \
|
||||
-v /mnt/:/mnt:rshared \
|
||||
-v ./config/:/app \
|
||||
--device /dev/fuse:/dev/fuse:rwm \
|
||||
--cap-add SYS_ADMIN \
|
||||
--security-opt apparmor:unconfined \
|
||||
cy01/blackhole:latest
|
||||
```
|
||||
|
||||
### Docker Compose Setup
|
||||
|
||||
Create a `docker-compose.yml` file with the following content:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
decypharr:
|
||||
image: cy01/blackhole:latest
|
||||
container_name: decypharr
|
||||
ports:
|
||||
- "8282:8282"
|
||||
volumes:
|
||||
- /mnt/:/mnt:rshared
|
||||
- ./config/:/app
|
||||
restart: unless-stopped
|
||||
devices:
|
||||
- /dev/fuse:/dev/fuse:rwm
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
security_opt:
|
||||
- apparmor:unconfined
|
||||
```
|
||||
|
||||
Run the Docker Compose setup:
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
|
||||
## Binary Installation
|
||||
If you prefer not to use Docker, you can download and run the binary directly.
|
||||
|
||||
Download your OS-specific release from the [release page](https://github.com/sirrobot01/decypharr/releases).
|
||||
Create a configuration file (see Configuration)
|
||||
Run the binary:
|
||||
|
||||
```bash
|
||||
chmod +x decypharr
|
||||
./decypharr --config /path/to/config/folder
|
||||
```
|
||||
|
||||
### Notes for Docker Users
|
||||
|
||||
- Ensure that the `/mnt/` directory is mounted correctly to access your media files.
|
||||
- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions.
|
||||
- The `UMASK` environment variable can be set to control file permissions created by Decypharr.
|
||||
|
||||
##### Health Checks
|
||||
- Health checks are disabled by default. You can enable them by adding a `healthcheck` section in your `docker-compose.yml` file.
|
||||
- Health checks the availability of several parts of the application;
|
||||
- The main web interface
|
||||
- The qBittorrent API
|
||||
- The WebDAV server (if enabled). You should disable health checks for the initial indexes as they can take a long time to complete.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
decypharr:
|
||||
...
|
||||
...
|
||||
healthcheck:
|
||||
test: ["CMD", "/usr/bin/healthcheck", "--config", "/app/"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
24
docs/docs/styles/styles.css
Normal file
@@ -0,0 +1,24 @@
|
||||
/* Light mode image - visible by default */
|
||||
.light-mode-image {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/* Dark mode image - hidden by default */
|
||||
.dark-mode-image {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* When dark theme (slate) is active */
|
||||
[data-md-color-scheme="slate"] .light-mode-image {
|
||||
display: none;
|
||||
}
|
||||
|
||||
[data-md-color-scheme="slate"] .dark-mode-image {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/* Optional: smooth transition */
|
||||
.light-mode-image,
|
||||
.dark-mode-image {
|
||||
transition: opacity 0.2s ease-in-out;
|
||||
}
|
||||
75
docs/docs/usage.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# Usage Guide
|
||||
|
||||
This guide will help you get started with Decypharr after installation.
|
||||
|
||||
After installing Decypharr, you can access the web interface at `http://localhost:8282` or your configured host/port.
|
||||
|
||||
### Initial Configuration
|
||||
If it's the first time you're accessing the UI, you will be prompted to set up your credentials. You can skip this step if you don't want to enable authentication. If you choose to set up credentials, enter a username and password confirm password, then click **Save**. You will be redirected to the settings page.
|
||||
|
||||
### Debrid Configuration
|
||||

|
||||
- Click on **Debrid** in the tab
|
||||
- Add your desired Debrid services (Real Debrid, Torbox, Debrid Link, All Debrid) by entering the required API keys or tokens.
|
||||
- Set the **Mount/Rclone Folder**. This is where decypharr will look for added torrents to symlink them to your media library.
|
||||
- If you're using internal webdav, do not forget the `/__all__` suffix
|
||||
- Enable WebDAV
|
||||
- You can leave the remaining settings as default for now.
|
||||
|
||||
### Qbittorent Configuration
|
||||

|
||||
|
||||
- Click on **Qbittorrent** in the tab
|
||||
- Set the **Download Folder** to where you want Decypharr to save downloaded files. These files will be symlinked to the mount folder you configured earlier.
|
||||
You can leave the remaining settings as default for now.
|
||||
|
||||
### Arrs Configuration
|
||||
|
||||
You can skip Arr configuration for now. Decypharr will auto-add them when you connect to Sonarr or Radarr later.
|
||||
|
||||
|
||||
#### Connecting to Sonarr/Radarr
|
||||
|
||||

|
||||
To connect Decypharr to your Sonarr or Radarr instance:
|
||||
|
||||
1. In Sonarr/Radarr, go to **Settings → Download Client → Add Client → qBittorrent**
|
||||
2. Configure the following settings:
|
||||
- **Host**: `localhost` (or the IP of your Decypharr server)
|
||||
- **Port**: `8282` (or your configured qBittorrent port)
|
||||
- **Username**: `http://sonarr:8989` (your Arr host with http/https)
|
||||
- **Password**: `sonarr_token` (your Arr API token, you can get this from Sonarr/Radarr settings)
|
||||
- **Category**: e.g., `sonarr`, `radarr` (match what you configured in Decypharr)
|
||||
- **Use SSL**: `No`
|
||||
- **Sequential Download**: `No` or `Yes` (if you want to download torrents locally instead of symlink)
|
||||
3. Click **Test** to verify the connection
|
||||
4. Click **Save** to add the download client
|
||||
|
||||
|
||||
### Rclone Configuration
|
||||
|
||||

|
||||
|
||||
If you want Decypharr to automatically mount WebDAV folders using Rclone, you need to set up Rclone first:
|
||||
|
||||
If you're using Docker, the rclone binary is already included in the container. If you're running Decypharr directly, make sure Rclone is installed on your system.
|
||||
|
||||
Enable **Mount**
|
||||
- **Global Mount Path**: Set the path where you want to mount the WebDAV folders (e.g., `/mnt/remote`). Decypharr will create subfolders for each Debrid service. For example, if you set `/mnt/remote`, it will create `/mnt/remote/realdebrid`, `/mnt/remote/torbox`, etc. This should be the grandparent of your mount folder set in the Debrid configuration.
|
||||
- **User ID**: Set the user ID for Rclone mounts (default is gotten from the environment variable `PUID`).
|
||||
- **Group ID**: Set the group ID for Rclone mounts (default is gotten from the environment variable `PGID`).
|
||||
- **Buffer Size**: Set the buffer size for Rclone mounts.
|
||||
|
||||
You should set other options based on your use case. If you don't know what you're doing, leave it as defaults. Checkout the [Rclone documentation](https://rclone.org/commands/rclone_mount/) for more details.
|
||||
|
||||
### Repair Configuration
|
||||
|
||||

|
||||
|
||||
Repair is an optional feature that allows you to fix missing files, symlinks, and other issues in your media library.
|
||||
- Click on **Repair** in the tab
|
||||
- Enable **Scheduled Repair** if you want Decypharr to automatically check for missing files at your specified interval.
|
||||
- Set the **Repair Interval** to how often you want Decypharr to check for missing files (e.g 1h, 6h, 12h, 24h, you can also use cron syntax like `0 0 * * *` for daily checks).
|
||||
- Enable **WebDav**(You shoukd enable this, if you enabled WebDav in Debrid configuration)
|
||||
- **Auto Process**: Enable this if you want Decypharr to automatically process repair jobs when they are done. This could delete the original files, symlinks, be wary!!!
|
||||
- **Worker Threads**: Set the number of worker threads for processing repair jobs. More threads can speed up the process but may consume more resources.
|
||||
78
docs/mkdocs.yml
Normal file
@@ -0,0 +1,78 @@
|
||||
site_name: Decypharr
|
||||
site_url: https://sirrobot01.github.io/decypharr
|
||||
site_description: QbitTorrent with Debrid Support
|
||||
repo_url: https://github.com/sirrobot01/decypharr
|
||||
repo_name: sirrobot01/decypharr
|
||||
edit_uri: blob/main/docs
|
||||
|
||||
|
||||
extra_css:
|
||||
- styles/styles.css
|
||||
|
||||
theme:
|
||||
name: material
|
||||
logo: images/logo.png
|
||||
font:
|
||||
text: Roboto
|
||||
code: Roboto Mono
|
||||
palette:
|
||||
- media: "(prefers-color-scheme: light)"
|
||||
scheme: default
|
||||
primary: indigo
|
||||
accent: indigo
|
||||
toggle:
|
||||
icon: material/weather-night
|
||||
name: Switch to dark mode
|
||||
- media: "(prefers-color-scheme: dark)"
|
||||
scheme: slate
|
||||
primary: indigo
|
||||
accent: indigo
|
||||
toggle:
|
||||
icon: material/weather-sunny
|
||||
name: Switch to light mode
|
||||
features:
|
||||
- navigation.search.highlight
|
||||
- navigation.search.suggest
|
||||
- navigation.search.share
|
||||
- navigation.search.suggest
|
||||
- navigation.search.share
|
||||
- navigation.search.highlight
|
||||
- navigation.search.suggest
|
||||
- navigation.search.share
|
||||
icon:
|
||||
repo: fontawesome/brands/github
|
||||
|
||||
markdown_extensions:
|
||||
- admonition
|
||||
- pymdownx.details
|
||||
- pymdownx.superfences
|
||||
- pymdownx.highlight
|
||||
- pymdownx.inlinehilite
|
||||
- pymdownx.tabbed
|
||||
- pymdownx.emoji:
|
||||
emoji_index: !!python/name:material.extensions.emoji.twemoji
|
||||
emoji_generator: !!python/name:materialx.emoji.to_svg
|
||||
- attr_list
|
||||
- md_in_html
|
||||
- def_list
|
||||
- toc:
|
||||
permalink: true
|
||||
|
||||
nav:
|
||||
- Home: index.md
|
||||
- Installation: installation.md
|
||||
- Usage: usage.md
|
||||
- API Documentation: api.md
|
||||
- Features:
|
||||
- Overview: features/index.md
|
||||
- Repair Worker: features/repair-worker.md
|
||||
- Guides:
|
||||
- Overview: guides/index.md
|
||||
- Manual Downloading: guides/downloading.md
|
||||
- Internal Mounting: guides/internal-mounting.md
|
||||
|
||||
|
||||
plugins:
|
||||
- search
|
||||
- tags
|
||||
- swagger-ui-tag
|
||||
3
docs/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
mkdocs==1.6.1
|
||||
mkdocs-material==9.6.16
|
||||
mkdocs-swagger-ui-tag==0.6.10
|
||||
39
go.mod
@@ -1,28 +1,39 @@
|
||||
module goBlack
|
||||
module github.com/sirrobot01/decypharr
|
||||
|
||||
go 1.22
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.3
|
||||
|
||||
require (
|
||||
github.com/anacrolix/torrent v1.55.0
|
||||
github.com/cavaliergopher/grab/v3 v3.0.1
|
||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380
|
||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2
|
||||
github.com/go-chi/chi/v5 v5.1.0
|
||||
github.com/go-chi/chi/v5 v5.2.2
|
||||
github.com/go-co-op/gocron/v2 v2.16.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/valyala/fasthttp v1.55.0
|
||||
github.com/valyala/fastjson v1.6.4
|
||||
golang.org/x/time v0.6.0
|
||||
github.com/gorilla/sessions v1.4.0
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/stanNthe5/stringbuf v0.0.3
|
||||
go.uber.org/ratelimit v0.3.1
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/net v0.41.0
|
||||
golang.org/x/sync v0.15.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/anacrolix/missinggo v1.3.0 // indirect
|
||||
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
|
||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
golang.org/x/net v0.27.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.5.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
)
|
||||
|
||||
90
go.sum
@@ -35,9 +35,9 @@ github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pm
|
||||
github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8=
|
||||
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
|
||||
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
|
||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
@@ -50,19 +50,17 @@ github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIH
|
||||
github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 h1:1NyRx2f4W4WBRyg0Kys0ZbaNmDDzZ2R/C7DTi+bbsJ0=
|
||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380/go.mod h1:thX175TtLTzLj3p7N/Q9IiKZ7NF+p72cvL91emV0hzo=
|
||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM=
|
||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -72,13 +70,16 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod
|
||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
|
||||
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||
github.com/go-co-op/gocron/v2 v2.16.1 h1:ux/5zxVRveCaCuTtNI3DiOk581KC1KpJbpJFYUEVYwo=
|
||||
github.com/go-co-op/gocron/v2 v2.16.1/go.mod h1:opexeOFy5BplhsKdA7bzY9zeYih8I8/WNJ4arTIFPVc=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
@@ -101,9 +102,11 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
@@ -112,6 +115,10 @@ github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORR
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
|
||||
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
|
||||
github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ=
|
||||
github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
|
||||
@@ -119,14 +126,14 @@ github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63
|
||||
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
|
||||
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
|
||||
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -136,6 +143,13 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -153,8 +167,9 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
@@ -172,9 +187,13 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@@ -182,30 +201,34 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
|
||||
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
|
||||
github.com/stanNthe5/stringbuf v0.0.3 h1:3ChRipDckEY6FykaQ1Dowy3B+ZQa72EDBCasvT5+D1w=
|
||||
github.com/stanNthe5/stringbuf v0.0.3/go.mod h1:hii5Vr+mucoWkNJlIYQVp8YvuPtq45fFnJEAhcPf2cQ=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8=
|
||||
github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM=
|
||||
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
|
||||
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0=
|
||||
go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -221,8 +244,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -230,6 +253,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -241,12 +266,13 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -273,6 +299,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
||||
494
internal/config/config.go
Normal file
@@ -0,0 +1,494 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type RepairStrategy string
|
||||
|
||||
const (
|
||||
RepairStrategyPerFile RepairStrategy = "per_file"
|
||||
RepairStrategyPerTorrent RepairStrategy = "per_torrent"
|
||||
)
|
||||
|
||||
var (
|
||||
instance *Config
|
||||
once sync.Once
|
||||
configPath string
|
||||
)
|
||||
|
||||
type Debrid struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
APIKey string `json:"api_key,omitempty"`
|
||||
DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
|
||||
Folder string `json:"folder,omitempty"`
|
||||
DownloadUncached bool `json:"download_uncached,omitempty"`
|
||||
CheckCached bool `json:"check_cached,omitempty"`
|
||||
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
|
||||
RepairRateLimit string `json:"repair_rate_limit,omitempty"`
|
||||
DownloadRateLimit string `json:"download_rate_limit,omitempty"`
|
||||
Proxy string `json:"proxy,omitempty"`
|
||||
UnpackRar bool `json:"unpack_rar,omitempty"`
|
||||
AddSamples bool `json:"add_samples,omitempty"`
|
||||
MinimumFreeSlot int `json:"minimum_free_slot,omitempty"` // Minimum active pots to use this debrid
|
||||
Limit int `json:"limit,omitempty"` // Maximum number of total torrents
|
||||
|
||||
UseWebDav bool `json:"use_webdav,omitempty"`
|
||||
WebDav
|
||||
}
|
||||
|
||||
type QBitTorrent struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Port string `json:"port,omitempty"` // deprecated
|
||||
DownloadFolder string `json:"download_folder,omitempty"`
|
||||
Categories []string `json:"categories,omitempty"`
|
||||
RefreshInterval int `json:"refresh_interval,omitempty"`
|
||||
SkipPreCache bool `json:"skip_pre_cache,omitempty"`
|
||||
MaxDownloads int `json:"max_downloads,omitempty"`
|
||||
}
|
||||
|
||||
type Arr struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Host string `json:"host,omitempty"`
|
||||
Token string `json:"token,omitempty"`
|
||||
Cleanup bool `json:"cleanup,omitempty"`
|
||||
SkipRepair bool `json:"skip_repair,omitempty"`
|
||||
DownloadUncached *bool `json:"download_uncached,omitempty"`
|
||||
SelectedDebrid string `json:"selected_debrid,omitempty"`
|
||||
Source string `json:"source,omitempty"` // The source of the arr, e.g. "auto", "config", "". Auto means it was automatically detected from the arr
|
||||
}
|
||||
|
||||
type Repair struct {
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
Interval string `json:"interval,omitempty"`
|
||||
ZurgURL string `json:"zurg_url,omitempty"`
|
||||
AutoProcess bool `json:"auto_process,omitempty"`
|
||||
UseWebDav bool `json:"use_webdav,omitempty"`
|
||||
Workers int `json:"workers,omitempty"`
|
||||
ReInsert bool `json:"reinsert,omitempty"`
|
||||
Strategy RepairStrategy `json:"strategy,omitempty"`
|
||||
}
|
||||
|
||||
type Auth struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
APIToken string `json:"api_token,omitempty"`
|
||||
}
|
||||
|
||||
type Rclone struct {
|
||||
// Global mount folder where all providers will be mounted as subfolders
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
MountPath string `json:"mount_path,omitempty"`
|
||||
|
||||
// Cache settings
|
||||
CacheDir string `json:"cache_dir,omitempty"`
|
||||
|
||||
// VFS settings
|
||||
VfsCacheMode string `json:"vfs_cache_mode,omitempty"` // off, minimal, writes, full
|
||||
VfsCacheMaxAge string `json:"vfs_cache_max_age,omitempty"` // Maximum age of objects in the cache (default 1h)
|
||||
VfsCacheMaxSize string `json:"vfs_cache_max_size,omitempty"` // Maximum size of the cache (default off)
|
||||
VfsCachePollInterval string `json:"vfs_cache_poll_interval,omitempty"` // How often to poll for changes (default 1m)
|
||||
VfsReadChunkSize string `json:"vfs_read_chunk_size,omitempty"` // Read chunk size (default 128M)
|
||||
VfsReadChunkSizeLimit string `json:"vfs_read_chunk_size_limit,omitempty"` // Max chunk size (default off)
|
||||
VfsReadAhead string `json:"vfs_read_ahead,omitempty"` // read ahead size
|
||||
VfsPollInterval string `json:"vfs_poll_interval,omitempty"` // How often to rclone cleans the cache (default 1m)
|
||||
BufferSize string `json:"buffer_size,omitempty"` // Buffer size for reading files (default 16M)
|
||||
|
||||
// File system settings
|
||||
UID uint32 `json:"uid,omitempty"` // User ID for mounted files
|
||||
GID uint32 `json:"gid,omitempty"` // Group ID for mounted files
|
||||
Umask string `json:"umask,omitempty"`
|
||||
|
||||
// Timeout settings
|
||||
AttrTimeout string `json:"attr_timeout,omitempty"` // Attribute cache timeout (default 1s)
|
||||
DirCacheTime string `json:"dir_cache_time,omitempty"` // Directory cache time (default 5m)
|
||||
|
||||
// Performance settings
|
||||
NoModTime bool `json:"no_modtime,omitempty"` // Don't read/write modification time
|
||||
NoChecksum bool `json:"no_checksum,omitempty"` // Don't checksum files on upload
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
// server
|
||||
BindAddress string `json:"bind_address,omitempty"`
|
||||
URLBase string `json:"url_base,omitempty"`
|
||||
Port string `json:"port,omitempty"`
|
||||
|
||||
LogLevel string `json:"log_level,omitempty"`
|
||||
Debrids []Debrid `json:"debrids,omitempty"`
|
||||
QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"`
|
||||
Arrs []Arr `json:"arrs,omitempty"`
|
||||
Repair Repair `json:"repair,omitempty"`
|
||||
WebDav WebDav `json:"webdav,omitempty"`
|
||||
Rclone Rclone `json:"rclone,omitempty"`
|
||||
AllowedExt []string `json:"allowed_file_types,omitempty"`
|
||||
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
|
||||
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
|
||||
Path string `json:"-"` // Path to save the config file
|
||||
UseAuth bool `json:"use_auth,omitempty"`
|
||||
Auth *Auth `json:"-"`
|
||||
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
|
||||
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
|
||||
}
|
||||
|
||||
func (c *Config) JsonFile() string {
|
||||
return filepath.Join(c.Path, "config.json")
|
||||
}
|
||||
func (c *Config) AuthFile() string {
|
||||
return filepath.Join(c.Path, "auth.json")
|
||||
}
|
||||
|
||||
func (c *Config) TorrentsFile() string {
|
||||
return filepath.Join(c.Path, "torrents.json")
|
||||
}
|
||||
|
||||
func (c *Config) loadConfig() error {
|
||||
// Load the config file
|
||||
if configPath == "" {
|
||||
return fmt.Errorf("config path not set")
|
||||
}
|
||||
c.Path = configPath
|
||||
file, err := os.ReadFile(c.JsonFile())
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
fmt.Printf("Config file not found, creating a new one at %s\n", c.JsonFile())
|
||||
// Create a default config file if it doesn't exist
|
||||
if err := c.createConfig(c.Path); err != nil {
|
||||
return fmt.Errorf("failed to create config file: %w", err)
|
||||
}
|
||||
return c.Save()
|
||||
}
|
||||
return fmt.Errorf("error reading config file: %w", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(file, &c); err != nil {
|
||||
return fmt.Errorf("error unmarshaling config: %w", err)
|
||||
}
|
||||
c.setDefaults()
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDebrids(debrids []Debrid) error {
|
||||
if len(debrids) == 0 {
|
||||
return errors.New("no debrids configured")
|
||||
}
|
||||
|
||||
for _, debrid := range debrids {
|
||||
// Basic field validation
|
||||
if debrid.APIKey == "" {
|
||||
return errors.New("debrid api key is required")
|
||||
}
|
||||
if debrid.Folder == "" {
|
||||
return errors.New("debrid folder is required")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateQbitTorrent(config *QBitTorrent) error {
|
||||
if config.DownloadFolder == "" {
|
||||
return errors.New("qbittorent download folder is required")
|
||||
}
|
||||
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
|
||||
return fmt.Errorf("qbittorent download folder(%s) does not exist", config.DownloadFolder)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateRepair(config *Repair) error {
|
||||
if !config.Enabled {
|
||||
return nil
|
||||
}
|
||||
if config.Interval == "" {
|
||||
return errors.New("repair interval is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateConfig(config *Config) error {
|
||||
// Run validations concurrently
|
||||
|
||||
if err := validateDebrids(config.Debrids); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := validateQbitTorrent(&config.QBitTorrent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := validateRepair(&config.Repair); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateAPIToken creates a new random API token
|
||||
func generateAPIToken() (string, error) {
|
||||
bytes := make([]byte, 32) // 256-bit token
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(bytes), nil
|
||||
}
|
||||
|
||||
func SetConfigPath(path string) {
|
||||
configPath = path
|
||||
}
|
||||
|
||||
func Get() *Config {
|
||||
once.Do(func() {
|
||||
instance = &Config{} // Initialize instance first
|
||||
if err := instance.loadConfig(); err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "configuration Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
})
|
||||
return instance
|
||||
}
|
||||
|
||||
func (c *Config) GetMinFileSize() int64 {
|
||||
// 0 means no limit
|
||||
if c.MinFileSize == "" {
|
||||
return 0
|
||||
}
|
||||
s, err := ParseSize(c.MinFileSize)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *Config) GetMaxFileSize() int64 {
|
||||
// 0 means no limit
|
||||
if c.MaxFileSize == "" {
|
||||
return 0
|
||||
}
|
||||
s, err := ParseSize(c.MaxFileSize)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *Config) IsSizeAllowed(size int64) bool {
|
||||
if size == 0 {
|
||||
return true // Maybe the debrid hasn't reported the size yet
|
||||
}
|
||||
if c.GetMinFileSize() > 0 && size < c.GetMinFileSize() {
|
||||
return false
|
||||
}
|
||||
if c.GetMaxFileSize() > 0 && size > c.GetMaxFileSize() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Config) GetAuth() *Auth {
|
||||
if !c.UseAuth {
|
||||
return nil
|
||||
}
|
||||
if c.Auth == nil {
|
||||
c.Auth = &Auth{}
|
||||
if _, err := os.Stat(c.AuthFile()); err == nil {
|
||||
file, err := os.ReadFile(c.AuthFile())
|
||||
if err == nil {
|
||||
_ = json.Unmarshal(file, c.Auth)
|
||||
}
|
||||
}
|
||||
}
|
||||
return c.Auth
|
||||
}
|
||||
|
||||
func (c *Config) SaveAuth(auth *Auth) error {
|
||||
c.Auth = auth
|
||||
data, err := json.Marshal(auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(c.AuthFile(), data, 0644)
|
||||
}
|
||||
|
||||
func (c *Config) NeedsSetup() error {
|
||||
return ValidateConfig(c)
|
||||
}
|
||||
|
||||
func (c *Config) NeedsAuth() bool {
|
||||
if c.UseAuth {
|
||||
return c.GetAuth().Username == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Config) updateDebrid(d Debrid) Debrid {
|
||||
workers := runtime.NumCPU() * 50
|
||||
perDebrid := workers / len(c.Debrids)
|
||||
|
||||
var downloadKeys []string
|
||||
|
||||
if len(d.DownloadAPIKeys) > 0 {
|
||||
downloadKeys = d.DownloadAPIKeys
|
||||
} else {
|
||||
// If no download API keys are specified, use the main API key
|
||||
downloadKeys = []string{d.APIKey}
|
||||
}
|
||||
d.DownloadAPIKeys = downloadKeys
|
||||
|
||||
if !d.UseWebDav {
|
||||
return d
|
||||
}
|
||||
|
||||
if d.TorrentsRefreshInterval == "" {
|
||||
d.TorrentsRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "45s") // 45 seconds
|
||||
}
|
||||
if d.WebDav.DownloadLinksRefreshInterval == "" {
|
||||
d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes
|
||||
}
|
||||
if d.Workers == 0 {
|
||||
d.Workers = perDebrid
|
||||
}
|
||||
if d.FolderNaming == "" {
|
||||
d.FolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext")
|
||||
}
|
||||
if d.AutoExpireLinksAfter == "" {
|
||||
d.AutoExpireLinksAfter = cmp.Or(c.WebDav.AutoExpireLinksAfter, "3d") // 2 days
|
||||
}
|
||||
|
||||
// Merge debrid specified directories with global directories
|
||||
|
||||
directories := c.WebDav.Directories
|
||||
if directories == nil {
|
||||
directories = make(map[string]WebdavDirectories)
|
||||
}
|
||||
|
||||
for name, dir := range d.Directories {
|
||||
directories[name] = dir
|
||||
}
|
||||
d.Directories = directories
|
||||
|
||||
d.RcUrl = cmp.Or(d.RcUrl, c.WebDav.RcUrl)
|
||||
d.RcUser = cmp.Or(d.RcUser, c.WebDav.RcUser)
|
||||
d.RcPass = cmp.Or(d.RcPass, c.WebDav.RcPass)
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func (c *Config) setDefaults() {
|
||||
for i, debrid := range c.Debrids {
|
||||
c.Debrids[i] = c.updateDebrid(debrid)
|
||||
}
|
||||
|
||||
if len(c.AllowedExt) == 0 {
|
||||
c.AllowedExt = getDefaultExtensions()
|
||||
}
|
||||
|
||||
c.Port = cmp.Or(c.Port, c.QBitTorrent.Port)
|
||||
|
||||
if c.URLBase == "" {
|
||||
c.URLBase = "/"
|
||||
}
|
||||
// validate url base starts with /
|
||||
if !strings.HasPrefix(c.URLBase, "/") {
|
||||
c.URLBase = "/" + c.URLBase
|
||||
}
|
||||
if !strings.HasSuffix(c.URLBase, "/") {
|
||||
c.URLBase += "/"
|
||||
}
|
||||
|
||||
// Set repair defaults
|
||||
if c.Repair.Strategy == "" {
|
||||
c.Repair.Strategy = RepairStrategyPerTorrent
|
||||
}
|
||||
|
||||
// Rclone defaults
|
||||
if c.Rclone.Enabled {
|
||||
c.Rclone.VfsCacheMode = cmp.Or(c.Rclone.VfsCacheMode, "off")
|
||||
if c.Rclone.UID == 0 {
|
||||
c.Rclone.UID = uint32(os.Getuid())
|
||||
}
|
||||
if c.Rclone.GID == 0 {
|
||||
if runtime.GOOS == "windows" {
|
||||
// On Windows, we use the current user's SID as GID
|
||||
c.Rclone.GID = uint32(os.Getuid()) // Windows does not have GID, using UID instead
|
||||
} else {
|
||||
c.Rclone.GID = uint32(os.Getgid())
|
||||
}
|
||||
}
|
||||
if c.Rclone.VfsCacheMode != "off" {
|
||||
c.Rclone.VfsCachePollInterval = cmp.Or(c.Rclone.VfsCachePollInterval, "1m") // Clean cache every minute
|
||||
}
|
||||
c.Rclone.DirCacheTime = cmp.Or(c.Rclone.DirCacheTime, "5m")
|
||||
}
|
||||
// Load the auth file
|
||||
c.Auth = c.GetAuth()
|
||||
|
||||
// Generate API token if auth is enabled and no token exists
|
||||
if c.UseAuth {
|
||||
if c.Auth == nil {
|
||||
c.Auth = &Auth{}
|
||||
}
|
||||
if c.Auth.APIToken == "" {
|
||||
if token, err := generateAPIToken(); err == nil {
|
||||
c.Auth.APIToken = token
|
||||
// Save the updated auth config
|
||||
_ = c.SaveAuth(c.Auth)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) Save() error {
|
||||
|
||||
c.setDefaults()
|
||||
|
||||
data, err := json.MarshalIndent(c, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.WriteFile(c.JsonFile(), data, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) createConfig(path string) error {
|
||||
// Create the directory if it doesn't exist
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create config directory: %w", err)
|
||||
}
|
||||
|
||||
c.Path = path
|
||||
c.URLBase = "/"
|
||||
c.Port = "8282"
|
||||
c.LogLevel = "info"
|
||||
c.UseAuth = true
|
||||
c.QBitTorrent = QBitTorrent{
|
||||
DownloadFolder: filepath.Join(path, "downloads"),
|
||||
Categories: []string{"sonarr", "radarr"},
|
||||
RefreshInterval: 15,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reload forces a reload of the configuration from disk
|
||||
func Reload() {
|
||||
instance = nil
|
||||
once = sync.Once{}
|
||||
}
|
||||
|
||||
func DefaultFreeSlot() int {
|
||||
return 10
|
||||
}
|
||||
75
internal/config/misc.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (c *Config) IsAllowedFile(filename string) bool {
|
||||
ext := strings.ToLower(filepath.Ext(filename))
|
||||
if ext == "" {
|
||||
return false
|
||||
}
|
||||
// Remove the leading dot
|
||||
ext = ext[1:]
|
||||
|
||||
for _, allowed := range c.AllowedExt {
|
||||
if ext == allowed {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getDefaultExtensions() []string {
|
||||
videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,vob,mkv,mk3d,ts,wtv,m2ts", ",")
|
||||
musicExts := strings.Split("MP3,WAV,FLAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",")
|
||||
|
||||
// Combine both slices
|
||||
allExts := append(videoExts, musicExts...)
|
||||
|
||||
// Convert to lowercase
|
||||
for i, ext := range allExts {
|
||||
allExts[i] = strings.ToLower(ext)
|
||||
}
|
||||
|
||||
// Remove duplicates
|
||||
seen := make(map[string]struct{})
|
||||
var unique []string
|
||||
|
||||
for _, ext := range allExts {
|
||||
if _, ok := seen[ext]; !ok {
|
||||
seen[ext] = struct{}{}
|
||||
unique = append(unique, ext)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(unique)
|
||||
return unique
|
||||
}
|
||||
|
||||
func ParseSize(sizeStr string) (int64, error) {
|
||||
sizeStr = strings.ToUpper(strings.TrimSpace(sizeStr))
|
||||
|
||||
// Absolute size-based cache
|
||||
multiplier := 1.0
|
||||
if strings.HasSuffix(sizeStr, "GB") {
|
||||
multiplier = 1024 * 1024 * 1024
|
||||
sizeStr = strings.TrimSuffix(sizeStr, "GB")
|
||||
} else if strings.HasSuffix(sizeStr, "MB") {
|
||||
multiplier = 1024 * 1024
|
||||
sizeStr = strings.TrimSuffix(sizeStr, "MB")
|
||||
} else if strings.HasSuffix(sizeStr, "KB") {
|
||||
multiplier = 1024
|
||||
sizeStr = strings.TrimSuffix(sizeStr, "KB")
|
||||
}
|
||||
|
||||
size, err := strconv.ParseFloat(sizeStr, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return int64(size * multiplier), nil
|
||||
}
|
||||
26
internal/config/webdav.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package config
|
||||
|
||||
type WebdavDirectories struct {
|
||||
Filters map[string]string `json:"filters,omitempty"`
|
||||
//SaveStrms bool `json:"save_streams,omitempty"`
|
||||
}
|
||||
|
||||
type WebDav struct {
|
||||
TorrentsRefreshInterval string `json:"torrents_refresh_interval,omitempty"`
|
||||
DownloadLinksRefreshInterval string `json:"download_links_refresh_interval,omitempty"`
|
||||
Workers int `json:"workers,omitempty"`
|
||||
AutoExpireLinksAfter string `json:"auto_expire_links_after,omitempty"`
|
||||
ServeFromRclone bool `json:"serve_from_rclone,omitempty"`
|
||||
|
||||
// Folder
|
||||
FolderNaming string `json:"folder_naming,omitempty"`
|
||||
|
||||
// Rclone
|
||||
RcUrl string `json:"rc_url,omitempty"`
|
||||
RcUser string `json:"rc_user,omitempty"`
|
||||
RcPass string `json:"rc_pass,omitempty"`
|
||||
RcRefreshDirs string `json:"rc_refresh_dirs,omitempty"` // comma separated list of directories to refresh
|
||||
|
||||
// Directories
|
||||
Directories map[string]WebdavDirectories `json:"directories,omitempty"`
|
||||
}
|
||||
114
internal/logger/logger.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
logger zerolog.Logger
|
||||
)
|
||||
|
||||
func GetLogPath() string {
|
||||
cfg := config.Get()
|
||||
logsDir := filepath.Join(cfg.Path, "logs")
|
||||
|
||||
if _, err := os.Stat(logsDir); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(logsDir, 0755); err != nil {
|
||||
panic(fmt.Sprintf("Failed to create logs directory: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
return filepath.Join(logsDir, "decypharr.log")
|
||||
}
|
||||
|
||||
func New(prefix string) zerolog.Logger {
|
||||
|
||||
level := config.Get().LogLevel
|
||||
|
||||
rotatingLogFile := &lumberjack.Logger{
|
||||
Filename: GetLogPath(),
|
||||
MaxSize: 10,
|
||||
MaxAge: 15,
|
||||
Compress: true,
|
||||
}
|
||||
|
||||
consoleWriter := zerolog.ConsoleWriter{
|
||||
Out: os.Stdout,
|
||||
TimeFormat: "2006-01-02 15:04:05",
|
||||
NoColor: false, // Set to true if you don't want colors
|
||||
FormatLevel: func(i interface{}) string {
|
||||
var colorCode string
|
||||
switch strings.ToLower(fmt.Sprintf("%s", i)) {
|
||||
case "debug":
|
||||
colorCode = "\033[36m"
|
||||
case "info":
|
||||
colorCode = "\033[32m"
|
||||
case "warn":
|
||||
colorCode = "\033[33m"
|
||||
case "error":
|
||||
colorCode = "\033[31m"
|
||||
case "fatal":
|
||||
colorCode = "\033[35m"
|
||||
case "panic":
|
||||
colorCode = "\033[41m"
|
||||
default:
|
||||
colorCode = "\033[37m" // White
|
||||
}
|
||||
return fmt.Sprintf("%s| %-6s|\033[0m", colorCode, strings.ToUpper(fmt.Sprintf("%s", i)))
|
||||
},
|
||||
FormatMessage: func(i interface{}) string {
|
||||
return fmt.Sprintf("[%s] %v", prefix, i)
|
||||
},
|
||||
}
|
||||
|
||||
fileWriter := zerolog.ConsoleWriter{
|
||||
Out: rotatingLogFile,
|
||||
TimeFormat: "2006-01-02 15:04:05",
|
||||
NoColor: true, // No colors in file output
|
||||
FormatLevel: func(i interface{}) string {
|
||||
return strings.ToUpper(fmt.Sprintf("| %-6s|", i))
|
||||
},
|
||||
FormatMessage: func(i interface{}) string {
|
||||
return fmt.Sprintf("[%s] %v", prefix, i)
|
||||
},
|
||||
}
|
||||
|
||||
multi := zerolog.MultiLevelWriter(consoleWriter, fileWriter)
|
||||
|
||||
logger := zerolog.New(multi).
|
||||
With().
|
||||
Timestamp().
|
||||
Logger().
|
||||
Level(zerolog.InfoLevel)
|
||||
|
||||
// Set the log level
|
||||
level = strings.ToLower(level)
|
||||
switch level {
|
||||
case "debug":
|
||||
logger = logger.Level(zerolog.DebugLevel)
|
||||
case "info":
|
||||
logger = logger.Level(zerolog.InfoLevel)
|
||||
case "warn":
|
||||
logger = logger.Level(zerolog.WarnLevel)
|
||||
case "error":
|
||||
logger = logger.Level(zerolog.ErrorLevel)
|
||||
case "trace":
|
||||
logger = logger.Level(zerolog.TraceLevel)
|
||||
}
|
||||
return logger
|
||||
}
|
||||
|
||||
func Default() zerolog.Logger {
|
||||
once.Do(func() {
|
||||
logger = New("decypharr")
|
||||
})
|
||||
return logger
|
||||
}
|
||||
100
internal/request/discord.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package request
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DiscordEmbed struct {
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Color int `json:"color"`
|
||||
}
|
||||
|
||||
type DiscordWebhook struct {
|
||||
Embeds []DiscordEmbed `json:"embeds"`
|
||||
}
|
||||
|
||||
func getDiscordColor(status string) int {
|
||||
switch status {
|
||||
case "success":
|
||||
return 3066993
|
||||
case "error":
|
||||
return 15158332
|
||||
case "warning":
|
||||
return 15844367
|
||||
case "pending":
|
||||
return 3447003
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func getDiscordHeader(event string) string {
|
||||
switch event {
|
||||
case "download_complete":
|
||||
return "[Decypharr] Download Completed"
|
||||
case "download_failed":
|
||||
return "[Decypharr] Download Failed"
|
||||
case "repair_pending":
|
||||
return "[Decypharr] Repair Completed, Awaiting action"
|
||||
case "repair_complete":
|
||||
return "[Decypharr] Repair Complete"
|
||||
default:
|
||||
// split the event string and capitalize the first letter of each word
|
||||
evs := strings.Split(event, "_")
|
||||
for i, ev := range evs {
|
||||
evs[i] = strings.ToTitle(ev)
|
||||
}
|
||||
return "[Decypharr] %s" + strings.Join(evs, " ")
|
||||
}
|
||||
}
|
||||
|
||||
func SendDiscordMessage(event string, status string, message string) error {
|
||||
cfg := config.Get()
|
||||
webhookURL := cfg.DiscordWebhook
|
||||
if webhookURL == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create the proper Discord webhook structure
|
||||
|
||||
webhook := DiscordWebhook{
|
||||
Embeds: []DiscordEmbed{
|
||||
{
|
||||
Title: getDiscordHeader(event),
|
||||
Description: message,
|
||||
Color: getDiscordColor(status),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(webhook)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal discord payload: %v", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, webhookURL, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create discord request: %v", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send discord message: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("discord returned error status code: %s, body: %s", resp.Status, string(bodyBytes))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
419
internal/request/request.go
Normal file
@@ -0,0 +1,419 @@
|
||||
package request
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"go.uber.org/ratelimit"
|
||||
"golang.org/x/net/proxy"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func JoinURL(base string, paths ...string) (string, error) {
|
||||
// Split the last path component to separate query parameters
|
||||
lastPath := paths[len(paths)-1]
|
||||
parts := strings.Split(lastPath, "?")
|
||||
paths[len(paths)-1] = parts[0]
|
||||
|
||||
joined, err := url.JoinPath(base, paths...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Add back query parameters if they exist
|
||||
if len(parts) > 1 {
|
||||
return joined + "?" + parts[1], nil
|
||||
}
|
||||
|
||||
return joined, nil
|
||||
}
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
instance *Client
|
||||
)
|
||||
|
||||
type ClientOption func(*Client)
|
||||
|
||||
// Client represents an HTTP client with additional capabilities
|
||||
type Client struct {
|
||||
client *http.Client
|
||||
rateLimiter ratelimit.Limiter
|
||||
headers map[string]string
|
||||
headersMu sync.RWMutex
|
||||
maxRetries int
|
||||
timeout time.Duration
|
||||
skipTLSVerify bool
|
||||
retryableStatus map[int]struct{}
|
||||
logger zerolog.Logger
|
||||
proxy string
|
||||
}
|
||||
|
||||
// WithMaxRetries sets the maximum number of retry attempts
|
||||
func WithMaxRetries(maxRetries int) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.maxRetries = maxRetries
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeout sets the request timeout
|
||||
func WithTimeout(timeout time.Duration) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
func WithRedirectPolicy(policy func(req *http.Request, via []*http.Request) error) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.client.CheckRedirect = policy
|
||||
}
|
||||
}
|
||||
|
||||
// WithRateLimiter sets a rate limiter
|
||||
func WithRateLimiter(rl ratelimit.Limiter) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.rateLimiter = rl
|
||||
}
|
||||
}
|
||||
|
||||
// WithHeaders sets default headers
|
||||
func WithHeaders(headers map[string]string) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.headersMu.Lock()
|
||||
c.headers = headers
|
||||
c.headersMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) SetHeader(key, value string) {
|
||||
c.headersMu.Lock()
|
||||
c.headers[key] = value
|
||||
c.headersMu.Unlock()
|
||||
}
|
||||
|
||||
func WithLogger(logger zerolog.Logger) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
func WithTransport(transport *http.Transport) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.client.Transport = transport
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryableStatus adds status codes that should trigger a retry
|
||||
func WithRetryableStatus(statusCodes ...int) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.retryableStatus = make(map[int]struct{}) // reset the map
|
||||
for _, code := range statusCodes {
|
||||
c.retryableStatus[code] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func WithProxy(proxyURL string) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.proxy = proxyURL
|
||||
}
|
||||
}
|
||||
|
||||
// doRequest performs a single HTTP request with rate limiting
|
||||
func (c *Client) doRequest(req *http.Request) (*http.Response, error) {
|
||||
if c.rateLimiter != nil {
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
return nil, req.Context().Err()
|
||||
default:
|
||||
c.rateLimiter.Take()
|
||||
}
|
||||
}
|
||||
|
||||
return c.client.Do(req)
|
||||
}
|
||||
|
||||
// Do performs an HTTP request with retries for certain status codes
|
||||
func (c *Client) Do(req *http.Request) (*http.Response, error) {
|
||||
// Save the request body for reuse in retries
|
||||
var bodyBytes []byte
|
||||
var err error
|
||||
|
||||
if req.Body != nil {
|
||||
bodyBytes, err = io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading request body: %w", err)
|
||||
}
|
||||
req.Body.Close()
|
||||
}
|
||||
|
||||
backoff := time.Millisecond * 500
|
||||
var resp *http.Response
|
||||
|
||||
for attempt := 0; attempt <= c.maxRetries; attempt++ {
|
||||
// Reset the request body if it exists
|
||||
if bodyBytes != nil {
|
||||
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
}
|
||||
|
||||
// Apply headers
|
||||
c.headersMu.RLock()
|
||||
if c.headers != nil {
|
||||
for key, value := range c.headers {
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
c.headersMu.RUnlock()
|
||||
|
||||
resp, err = c.doRequest(req)
|
||||
if err != nil {
|
||||
// Check if this is a network error that might be worth retrying
|
||||
if isRetryableError(err) && attempt < c.maxRetries {
|
||||
// Apply backoff with jitter
|
||||
jitter := time.Duration(rand.Int63n(int64(backoff / 4)))
|
||||
sleepTime := backoff + jitter
|
||||
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
return nil, req.Context().Err()
|
||||
case <-time.After(sleepTime):
|
||||
// Continue to next retry attempt
|
||||
}
|
||||
|
||||
// Exponential backoff
|
||||
backoff *= 2
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if the status code is retryable
|
||||
if _, ok := c.retryableStatus[resp.StatusCode]; !ok || attempt == c.maxRetries {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Close the response body before retrying
|
||||
resp.Body.Close()
|
||||
|
||||
// Apply backoff with jitter
|
||||
jitter := time.Duration(rand.Int63n(int64(backoff / 4)))
|
||||
sleepTime := backoff + jitter
|
||||
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
return nil, req.Context().Err()
|
||||
case <-time.After(sleepTime):
|
||||
// Continue to next retry attempt
|
||||
}
|
||||
|
||||
// Exponential backoff
|
||||
backoff *= 2
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("max retries exceeded")
|
||||
}
|
||||
|
||||
// MakeRequest performs an HTTP request and returns the response body as bytes
|
||||
func (c *Client) MakeRequest(req *http.Request) ([]byte, error) {
|
||||
res, err := c.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
c.logger.Printf("Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
bodyBytes, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading response body: %w", err)
|
||||
}
|
||||
|
||||
if res.StatusCode < 200 || res.StatusCode >= 300 {
|
||||
return nil, fmt.Errorf("HTTP error %d: %s", res.StatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
return bodyBytes, nil
|
||||
}
|
||||
|
||||
func (c *Client) Get(url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating GET request: %w", err)
|
||||
}
|
||||
|
||||
return c.Do(req)
|
||||
}
|
||||
|
||||
// New creates a new HTTP client with the specified options
|
||||
func New(options ...ClientOption) *Client {
|
||||
client := &Client{
|
||||
maxRetries: 3,
|
||||
skipTLSVerify: true,
|
||||
retryableStatus: map[int]struct{}{
|
||||
http.StatusTooManyRequests: struct{}{},
|
||||
http.StatusInternalServerError: struct{}{},
|
||||
http.StatusBadGateway: struct{}{},
|
||||
http.StatusServiceUnavailable: struct{}{},
|
||||
http.StatusGatewayTimeout: struct{}{},
|
||||
},
|
||||
logger: logger.New("request"),
|
||||
timeout: 60 * time.Second,
|
||||
proxy: "",
|
||||
headers: make(map[string]string),
|
||||
}
|
||||
|
||||
// default http client
|
||||
client.client = &http.Client{
|
||||
Timeout: client.timeout,
|
||||
}
|
||||
|
||||
// Apply options before configuring transport
|
||||
for _, option := range options {
|
||||
option(client)
|
||||
}
|
||||
|
||||
// Check if transport was set by WithTransport option
|
||||
if client.client.Transport == nil {
|
||||
transport := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: client.skipTLSVerify,
|
||||
},
|
||||
DisableKeepAlives: false,
|
||||
}
|
||||
|
||||
// Configure proxy if needed
|
||||
if client.proxy != "" {
|
||||
if strings.HasPrefix(client.proxy, "socks5://") {
|
||||
// Handle SOCKS5 proxy
|
||||
socksURL, err := url.Parse(client.proxy)
|
||||
if err != nil {
|
||||
client.logger.Error().Msgf("Failed to parse SOCKS5 proxy URL: %v", err)
|
||||
} else {
|
||||
auth := &proxy.Auth{}
|
||||
if socksURL.User != nil {
|
||||
auth.User = socksURL.User.Username()
|
||||
password, _ := socksURL.User.Password()
|
||||
auth.Password = password
|
||||
}
|
||||
|
||||
dialer, err := proxy.SOCKS5("tcp", socksURL.Host, auth, proxy.Direct)
|
||||
if err != nil {
|
||||
client.logger.Error().Msgf("Failed to create SOCKS5 dialer: %v", err)
|
||||
} else {
|
||||
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return dialer.Dial(network, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
proxyURL, err := url.Parse(client.proxy)
|
||||
if err != nil {
|
||||
client.logger.Error().Msgf("Failed to parse proxy URL: %v", err)
|
||||
} else {
|
||||
transport.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
transport.Proxy = http.ProxyFromEnvironment
|
||||
}
|
||||
|
||||
// Set the transport to the client
|
||||
client.client.Transport = transport
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func ParseRateLimit(rateStr string) ratelimit.Limiter {
|
||||
if rateStr == "" {
|
||||
return nil
|
||||
}
|
||||
parts := strings.SplitN(rateStr, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// parse count
|
||||
count, err := strconv.Atoi(strings.TrimSpace(parts[0]))
|
||||
if err != nil || count <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set slack size to 10%
|
||||
slackSize := count / 10
|
||||
|
||||
// normalize unit
|
||||
unit := strings.ToLower(strings.TrimSpace(parts[1]))
|
||||
unit = strings.TrimSuffix(unit, "s")
|
||||
switch unit {
|
||||
case "minute", "min":
|
||||
return ratelimit.New(count, ratelimit.Per(time.Minute), ratelimit.WithSlack(slackSize))
|
||||
case "second", "sec":
|
||||
return ratelimit.New(count, ratelimit.Per(time.Second), ratelimit.WithSlack(slackSize))
|
||||
case "hour", "hr":
|
||||
return ratelimit.New(count, ratelimit.Per(time.Hour), ratelimit.WithSlack(slackSize))
|
||||
case "day", "d":
|
||||
return ratelimit.New(count, ratelimit.Per(24*time.Hour), ratelimit.WithSlack(slackSize))
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(code)
|
||||
err := json.NewEncoder(w).Encode(data)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func Default() *Client {
|
||||
once.Do(func() {
|
||||
instance = New()
|
||||
})
|
||||
return instance
|
||||
}
|
||||
|
||||
func isRetryableError(err error) bool {
|
||||
errString := err.Error()
|
||||
|
||||
// Connection reset and other network errors
|
||||
if strings.Contains(errString, "connection reset by peer") ||
|
||||
strings.Contains(errString, "read: connection reset") ||
|
||||
strings.Contains(errString, "connection refused") ||
|
||||
strings.Contains(errString, "network is unreachable") ||
|
||||
strings.Contains(errString, "connection timed out") ||
|
||||
strings.Contains(errString, "no such host") ||
|
||||
strings.Contains(errString, "i/o timeout") ||
|
||||
strings.Contains(errString, "unexpected EOF") ||
|
||||
strings.Contains(errString, "TLS handshake timeout") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for net.Error type which can provide more information
|
||||
var netErr net.Error
|
||||
if errors.As(err, &netErr) {
|
||||
// Retry on timeout errors and temporary errors
|
||||
return netErr.Timeout()
|
||||
}
|
||||
|
||||
// Not a retryable error
|
||||
return false
|
||||
}
|
||||
43
internal/utils/debouncer.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Debouncer[T any] struct {
|
||||
mu sync.Mutex
|
||||
timer *time.Timer
|
||||
interval time.Duration
|
||||
caller func(arg T)
|
||||
}
|
||||
|
||||
func NewDebouncer[T any](interval time.Duration, caller func(arg T)) *Debouncer[T] {
|
||||
return &Debouncer[T]{
|
||||
interval: interval,
|
||||
caller: caller,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Debouncer[T]) Call(arg T) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if d.timer != nil {
|
||||
d.timer.Stop()
|
||||
}
|
||||
|
||||
d.timer = time.AfterFunc(d.interval, func() {
|
||||
d.caller(arg)
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Debouncer[T]) Stop() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if d.timer != nil {
|
||||
d.timer.Stop()
|
||||
d.timer = nil
|
||||
}
|
||||
}
|
||||
47
internal/utils/error.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package utils
|
||||
|
||||
import "errors"
|
||||
|
||||
type HTTPError struct {
|
||||
StatusCode int
|
||||
Message string
|
||||
Code string
|
||||
}
|
||||
|
||||
func (e *HTTPError) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
var HosterUnavailableError = &HTTPError{
|
||||
StatusCode: 503,
|
||||
Message: "Hoster is unavailable",
|
||||
Code: "hoster_unavailable",
|
||||
}
|
||||
|
||||
var TrafficExceededError = &HTTPError{
|
||||
StatusCode: 503,
|
||||
Message: "Traffic exceeded",
|
||||
Code: "traffic_exceeded",
|
||||
}
|
||||
|
||||
var ErrLinkBroken = &HTTPError{
|
||||
StatusCode: 404,
|
||||
Message: "File is unavailable",
|
||||
Code: "file_unavailable",
|
||||
}
|
||||
|
||||
var TorrentNotFoundError = &HTTPError{
|
||||
StatusCode: 404,
|
||||
Message: "Torrent not found",
|
||||
Code: "torrent_not_found",
|
||||
}
|
||||
|
||||
var TooManyActiveDownloadsError = &HTTPError{
|
||||
StatusCode: 509,
|
||||
Message: "Too many active downloads",
|
||||
Code: "too_many_active_downloads",
|
||||
}
|
||||
|
||||
func IsTooManyActiveDownloadsError(err error) bool {
|
||||
return errors.As(err, &TooManyActiveDownloadsError)
|
||||
}
|
||||
86
internal/utils/file.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func PathUnescape(path string) string {
|
||||
|
||||
// try to use url.PathUnescape
|
||||
if unescaped, err := url.PathUnescape(path); err == nil {
|
||||
return unescaped
|
||||
}
|
||||
|
||||
// unescape %
|
||||
unescapedPath := strings.ReplaceAll(path, "%25", "%")
|
||||
|
||||
// add others
|
||||
|
||||
return unescapedPath
|
||||
}
|
||||
|
||||
func PreCacheFile(filePaths []string) error {
|
||||
if len(filePaths) == 0 {
|
||||
return fmt.Errorf("no file paths provided")
|
||||
}
|
||||
|
||||
for _, filePath := range filePaths {
|
||||
err := func(f string) error {
|
||||
|
||||
file, err := os.Open(f)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// File has probably been moved by arr, return silently
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to open file: %s: %v", f, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Pre-cache the file header (first 256KB) using 16KB chunks.
|
||||
if err := readSmallChunks(file, 0, 256*1024, 16*1024); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error {
|
||||
_, err := file.Seek(startPos, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := make([]byte, chunkSize)
|
||||
bytesRemaining := totalToRead
|
||||
|
||||
for bytesRemaining > 0 {
|
||||
toRead := chunkSize
|
||||
if bytesRemaining < chunkSize {
|
||||
toRead = bytesRemaining
|
||||
}
|
||||
|
||||
n, err := file.Read(buf[:toRead])
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
bytesRemaining -= n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,56 +1,65 @@
|
||||
package common
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base32"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
hexRegex = regexp.MustCompile("^[0-9a-fA-F]{40}$")
|
||||
)
|
||||
|
||||
type Magnet struct {
|
||||
Name string
|
||||
InfoHash string
|
||||
Size int64
|
||||
Link string
|
||||
Name string `json:"name"`
|
||||
InfoHash string `json:"infoHash"`
|
||||
Size int64 `json:"size"`
|
||||
Link string `json:"link"`
|
||||
File []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Magnet) IsTorrent() bool {
|
||||
return m.File != nil
|
||||
}
|
||||
|
||||
func GetMagnetFromFile(file io.Reader, filePath string) (*Magnet, error) {
|
||||
var (
|
||||
m *Magnet
|
||||
err error
|
||||
)
|
||||
if filepath.Ext(filePath) == ".torrent" {
|
||||
mi, err := metainfo.Load(file)
|
||||
torrentData, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := mi.HashInfoBytes()
|
||||
infoHash := hash.HexString()
|
||||
info, err := mi.UnmarshalInfo()
|
||||
m, err = GetMagnetFromBytes(torrentData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
magnet := &Magnet{
|
||||
InfoHash: infoHash,
|
||||
Name: info.Name,
|
||||
Size: info.Length,
|
||||
Link: mi.Magnet(&hash, &info).String(),
|
||||
}
|
||||
return magnet, nil
|
||||
} else {
|
||||
// .magnet file
|
||||
magnetLink := ReadMagnetFile(file)
|
||||
return GetMagnetInfo(magnetLink)
|
||||
m, err = GetMagnetInfo(magnetLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
m.Name = strings.TrimSuffix(filePath, filepath.Ext(filePath))
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func GetMagnetFromUrl(url string) (*Magnet, error) {
|
||||
@@ -62,6 +71,28 @@ func GetMagnetFromUrl(url string) (*Magnet, error) {
|
||||
return nil, fmt.Errorf("invalid url")
|
||||
}
|
||||
|
||||
func GetMagnetFromBytes(torrentData []byte) (*Magnet, error) {
|
||||
// Create a scanner to read the file line by line
|
||||
mi, err := metainfo.Load(bytes.NewReader(torrentData))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := mi.HashInfoBytes()
|
||||
infoHash := hash.HexString()
|
||||
info, err := mi.UnmarshalInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
magnet := &Magnet{
|
||||
InfoHash: infoHash,
|
||||
Name: info.Name,
|
||||
Size: info.Length,
|
||||
Link: mi.Magnet(&hash, &info).String(),
|
||||
File: torrentData,
|
||||
}
|
||||
return magnet, nil
|
||||
}
|
||||
|
||||
func OpenMagnetFile(filePath string) string {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
@@ -104,27 +135,11 @@ func OpenMagnetHttpURL(magnetLink string) (*Magnet, error) {
|
||||
return
|
||||
}
|
||||
}(resp) // Ensure the response is closed after the function ends
|
||||
|
||||
// Create a scanner to read the file line by line
|
||||
|
||||
mi, err := metainfo.Load(resp.Body)
|
||||
torrentData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
hash := mi.HashInfoBytes()
|
||||
infoHash := hash.HexString()
|
||||
info, err := mi.UnmarshalInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Println("InfoHash: ", infoHash)
|
||||
magnet := &Magnet{
|
||||
InfoHash: infoHash,
|
||||
Name: info.Name,
|
||||
Size: info.Length,
|
||||
Link: mi.Magnet(&hash, &info).String(),
|
||||
}
|
||||
return magnet, nil
|
||||
return GetMagnetFromBytes(torrentData)
|
||||
}
|
||||
|
||||
func GetMagnetInfo(magnetLink string) (*Magnet, error) {
|
||||
@@ -156,15 +171,6 @@ func GetMagnetInfo(magnetLink string) (*Magnet, error) {
|
||||
return magnet, nil
|
||||
}
|
||||
|
||||
func RandomString(length int) string {
|
||||
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
b := make([]byte, length)
|
||||
for i := range b {
|
||||
b[i] = charset[rand.Intn(len(charset))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func ExtractInfoHash(magnetDesc string) string {
|
||||
const prefix = "xt=urn:btih:"
|
||||
start := strings.Index(magnetDesc, prefix)
|
||||
@@ -185,7 +191,6 @@ func ExtractInfoHash(magnetDesc string) string {
|
||||
|
||||
func processInfoHash(input string) (string, error) {
|
||||
// Regular expression for a valid 40-character hex infohash
|
||||
hexRegex := regexp.MustCompile("^[0-9a-fA-F]{40}$")
|
||||
|
||||
// If it's already a valid hex infohash, return it as is
|
||||
if hexRegex.MatchString(input) {
|
||||
@@ -209,30 +214,26 @@ func processInfoHash(input string) (string, error) {
|
||||
return "", fmt.Errorf("invalid infohash: %s", input)
|
||||
}
|
||||
|
||||
func NewLogger(prefix string, output *os.File) *log.Logger {
|
||||
f := fmt.Sprintf("[%s] ", prefix)
|
||||
return log.New(output, f, log.LstdFlags)
|
||||
}
|
||||
|
||||
func GetInfohashFromURL(url string) (string, error) {
|
||||
// Download the torrent file
|
||||
var magnetLink string
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
client := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||
if len(via) >= 3 {
|
||||
return fmt.Errorf("stopped after 3 redirects")
|
||||
}
|
||||
if strings.HasPrefix(req.URL.String(), "magnet:") {
|
||||
// Stop the redirect chain
|
||||
magnetLink = req.URL.String()
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
return nil
|
||||
},
|
||||
redirectFunc := func(req *http.Request, via []*http.Request) error {
|
||||
if len(via) >= 3 {
|
||||
return fmt.Errorf("stopped after 3 redirects")
|
||||
}
|
||||
if strings.HasPrefix(req.URL.String(), "magnet:") {
|
||||
// Stop the redirect chain
|
||||
magnetLink = req.URL.String()
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
client := request.New(
|
||||
request.WithTimeout(30*time.Second),
|
||||
request.WithRedirectPolicy(redirectFunc),
|
||||
)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -255,21 +256,14 @@ func GetInfohashFromURL(url string) (string, error) {
|
||||
return infoHash, nil
|
||||
}
|
||||
|
||||
func JoinURL(base string, paths ...string) (string, error) {
|
||||
// Parse the base URL
|
||||
u, err := url.Parse(base)
|
||||
if err != nil {
|
||||
return "", err
|
||||
func ConstructMagnet(infoHash, name string) *Magnet {
|
||||
// Create a magnet link from the infohash and name
|
||||
name = url.QueryEscape(strings.TrimSpace(name))
|
||||
magnetUri := fmt.Sprintf("magnet:?xt=urn:btih:%s&dn=%s", infoHash, name)
|
||||
return &Magnet{
|
||||
InfoHash: infoHash,
|
||||
Name: name,
|
||||
Size: 0,
|
||||
Link: magnetUri,
|
||||
}
|
||||
|
||||
// Join the path components
|
||||
u.Path = path.Join(u.Path, path.Join(paths...))
|
||||
|
||||
// Return the resulting URL as a string
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
func FileReady(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return !os.IsNotExist(err) // Returns true if the file exists
|
||||
}
|
||||
24
internal/utils/misc.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package utils
|
||||
|
||||
func RemoveItem[S ~[]E, E comparable](s S, values ...E) S {
|
||||
result := make(S, 0, len(s))
|
||||
outer:
|
||||
for _, item := range s {
|
||||
for _, v := range values {
|
||||
if item == v {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
result = append(result, item)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func Contains(slice []string, value string) bool {
|
||||
for _, item := range slice {
|
||||
if item == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
59
internal/utils/regex.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
videoMatch = "(?i)(\\.)(webm|m4v|3gp|nsv|ty|strm|rm|rmvb|m3u|ifo|mov|qt|divx|xvid|bivx|nrg|pva|wmv|asf|asx|ogm|ogv|m2v|avi|bin|dat|dvr-ms|mpg|mpeg|mp4|avc|vp3|svq3|nuv|viv|dv|fli|flv|wpl|vob|mkv|mk3d|ts|wtv|m2ts)$"
|
||||
musicMatch = "(?i)(\\.)(mp2|mp3|m4a|m4b|m4p|ogg|oga|opus|wma|wav|wv|flac|ape|aif|aiff|aifc)$"
|
||||
sampleMatch = `(?i)(^|[\s/\\])(sample|trailer|thumb|special|extras?)s?[-/]|(\((sample|trailer|thumb|special|extras?)s?\))|(-\s*(sample|trailer|thumb|special|extras?)s?)`
|
||||
)
|
||||
|
||||
var (
|
||||
mediaRegex = regexp.MustCompile(videoMatch + "|" + musicMatch)
|
||||
sampleRegex = regexp.MustCompile(sampleMatch)
|
||||
)
|
||||
|
||||
func RegexMatch(re *regexp.Regexp, value string) bool {
|
||||
return re.MatchString(value)
|
||||
}
|
||||
|
||||
func RemoveInvalidChars(value string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if r == filepath.Separator || r == ':' {
|
||||
return r
|
||||
}
|
||||
if filepath.IsAbs(string(r)) {
|
||||
return r
|
||||
}
|
||||
if strings.ContainsRune(filepath.VolumeName("C:"+string(r)), r) {
|
||||
return r
|
||||
}
|
||||
if r < 32 || strings.ContainsRune(`<>:"/\|?*`, r) {
|
||||
return -1
|
||||
}
|
||||
return r
|
||||
}, value)
|
||||
}
|
||||
|
||||
func RemoveExtension(value string) string {
|
||||
if loc := mediaRegex.FindStringIndex(value); loc != nil {
|
||||
return value[:loc[0]]
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func IsMediaFile(path string) bool {
|
||||
return RegexMatch(mediaRegex, path)
|
||||
}
|
||||
|
||||
func IsSampleFile(path string) bool {
|
||||
filename := filepath.Base(path)
|
||||
if strings.HasSuffix(strings.ToLower(filename), "sample.mkv") {
|
||||
return true
|
||||
}
|
||||
return RegexMatch(sampleRegex, path)
|
||||
}
|
||||
76
internal/utils/scheduler.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"github.com/robfig/cron/v3"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func ScheduleJob(ctx context.Context, interval string, loc *time.Location, jobFunc func()) (gocron.Scheduler, error) {
|
||||
if loc == nil {
|
||||
loc = time.Local
|
||||
}
|
||||
s, err := gocron.NewScheduler(gocron.WithLocation(loc))
|
||||
if err != nil {
|
||||
return s, fmt.Errorf("failed to create scheduler: %w", err)
|
||||
}
|
||||
jd, err := ConvertToJobDef(interval)
|
||||
if err != nil {
|
||||
return s, fmt.Errorf("failed to convert interval to job definition: %w", err)
|
||||
}
|
||||
// Schedule the job
|
||||
if _, err = s.NewJob(jd, gocron.NewTask(jobFunc), gocron.WithContext(ctx)); err != nil {
|
||||
return s, fmt.Errorf("failed to create job: %w", err)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// ConvertToJobDef converts a string interval to a gocron.JobDefinition.
|
||||
func ConvertToJobDef(interval string) (gocron.JobDefinition, error) {
|
||||
// Parse the interval string
|
||||
// Interval could be in the format "1h", "30m", "15s" or "1h30m" or "04:05"
|
||||
var jd gocron.JobDefinition
|
||||
|
||||
if t, ok := parseClockTime(interval); ok {
|
||||
return gocron.DailyJob(1, gocron.NewAtTimes(
|
||||
gocron.NewAtTime(uint(t.Hour()), uint(t.Minute()), uint(t.Second())),
|
||||
)), nil
|
||||
}
|
||||
|
||||
if _, err := cron.ParseStandard(interval); err == nil {
|
||||
return gocron.CronJob(interval, false), nil
|
||||
}
|
||||
|
||||
if dur, err := time.ParseDuration(interval); err == nil {
|
||||
return gocron.DurationJob(dur), nil
|
||||
}
|
||||
|
||||
return jd, fmt.Errorf("invalid interval format: %s", interval)
|
||||
}
|
||||
|
||||
func parseClockTime(s string) (time.Time, bool) {
|
||||
parts := strings.Split(s, ":")
|
||||
if len(parts) != 2 {
|
||||
return time.Time{}, false
|
||||
}
|
||||
h, err := strconv.Atoi(parts[0])
|
||||
if err != nil || h < 0 || h > 23 {
|
||||
return time.Time{}, false
|
||||
}
|
||||
m, err := strconv.Atoi(parts[1])
|
||||
if err != nil || m < 0 || m > 59 {
|
||||
return time.Time{}, false
|
||||
}
|
||||
now := time.Now()
|
||||
// build a time.Time for today at h:m:00 in the local zone
|
||||
t := time.Date(
|
||||
now.Year(), now.Month(), now.Day(),
|
||||
h, m, 0, 0,
|
||||
time.Local,
|
||||
)
|
||||
return t, true
|
||||
}
|
||||
29
main.go
@@ -1,22 +1,35 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"goBlack/cmd"
|
||||
"goBlack/common"
|
||||
"github.com/sirrobot01/decypharr/cmd/decypharr"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/debug"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("FATAL: Recovered from panic in main: %v\n", r)
|
||||
debug.PrintStack()
|
||||
}
|
||||
}()
|
||||
var configPath string
|
||||
flag.StringVar(&configPath, "config", "config.json", "path to the config file")
|
||||
flag.StringVar(&configPath, "config", "/data", "path to the data folder")
|
||||
flag.Parse()
|
||||
config.SetConfigPath(configPath)
|
||||
config.Get()
|
||||
|
||||
// Load the config file
|
||||
conf, err := common.LoadConfig(configPath)
|
||||
if err != nil {
|
||||
// Create a context canceled on SIGINT/SIGTERM
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
if err := decypharr.Start(ctx); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cmd.Start(conf)
|
||||
|
||||
}
|
||||
|
||||
1624
package-lock.json
generated
Normal file
19
package.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"name": "decypharr",
|
||||
"version": "1.0.0",
|
||||
"description": "Media management tool",
|
||||
"scripts": {
|
||||
"build-css": "tailwindcss -i ./pkg/web/assets/styles.css -o ./pkg/web/assets/build/css/styles.css --minify",
|
||||
"minify-js": "node scripts/minify-js.js",
|
||||
"download-assets": "node scripts/download-assets.js",
|
||||
"build": "npm run build-css && npm run minify-js",
|
||||
"build-all": "npm run download-assets && npm run build",
|
||||
"dev": "npm run build && air"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tailwindcss": "^3.4.0",
|
||||
"daisyui": "^4.12.10",
|
||||
"terser": "^5.24.0",
|
||||
"clean-css": "^5.3.3"
|
||||
}
|
||||
}
|
||||
232
pkg/arr/arr.go
Normal file
@@ -0,0 +1,232 @@
|
||||
package arr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Type is a type of arr
|
||||
type Type string
|
||||
|
||||
var sharedClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
Timeout: 60 * time.Second,
|
||||
}
|
||||
|
||||
const (
|
||||
Sonarr Type = "sonarr"
|
||||
Radarr Type = "radarr"
|
||||
Lidarr Type = "lidarr"
|
||||
Readarr Type = "readarr"
|
||||
)
|
||||
|
||||
type Arr struct {
|
||||
Name string `json:"name"`
|
||||
Host string `json:"host"`
|
||||
Token string `json:"token"`
|
||||
Type Type `json:"type"`
|
||||
Cleanup bool `json:"cleanup"`
|
||||
SkipRepair bool `json:"skip_repair"`
|
||||
DownloadUncached *bool `json:"download_uncached"`
|
||||
SelectedDebrid string `json:"selected_debrid,omitempty"` // The debrid service selected for this arr
|
||||
Source string `json:"source,omitempty"` // The source of the arr, e.g. "auto", "manual". Auto means it was automatically detected from the arr
|
||||
}
|
||||
|
||||
func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool, selectedDebrid, source string) *Arr {
|
||||
return &Arr{
|
||||
Name: name,
|
||||
Host: host,
|
||||
Token: strings.TrimSpace(token),
|
||||
Type: InferType(host, name),
|
||||
Cleanup: cleanup,
|
||||
SkipRepair: skipRepair,
|
||||
DownloadUncached: downloadUncached,
|
||||
SelectedDebrid: selectedDebrid,
|
||||
Source: source,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Response, error) {
|
||||
if a.Token == "" || a.Host == "" {
|
||||
return nil, fmt.Errorf("arr not configured")
|
||||
}
|
||||
url, err := request.JoinURL(a.Host, endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var body io.Reader
|
||||
if payload != nil {
|
||||
b, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body = bytes.NewReader(b)
|
||||
}
|
||||
req, err := http.NewRequest(method, url, body)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-Api-Key", a.Token)
|
||||
|
||||
var resp *http.Response
|
||||
|
||||
for attempts := 0; attempts < 5; attempts++ {
|
||||
resp, err = sharedClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we got a 401, wait briefly and retry
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
resp.Body.Close() // Don't leak response bodies
|
||||
if attempts < 4 { // Don't sleep on the last attempt
|
||||
time.Sleep(time.Duration(attempts+1) * 100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *Arr) Validate() error {
|
||||
if a.Token == "" || a.Host == "" {
|
||||
return fmt.Errorf("arr not configured: %s", a.Name)
|
||||
}
|
||||
resp, err := a.Request("GET", "/api/v3/health", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
// If response is not 200 or 404(this is the case for Lidarr, etc), return an error
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNotFound {
|
||||
return fmt.Errorf("failed to validate arr %s: %s", a.Name, resp.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Storage struct {
|
||||
Arrs map[string]*Arr // name -> arr
|
||||
mu sync.Mutex
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
func (s *Storage) Cleanup() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.Arrs = make(map[string]*Arr)
|
||||
}
|
||||
|
||||
func InferType(host, name string) Type {
|
||||
switch {
|
||||
case strings.Contains(host, "sonarr") || strings.Contains(name, "sonarr"):
|
||||
return Sonarr
|
||||
case strings.Contains(host, "radarr") || strings.Contains(name, "radarr"):
|
||||
return Radarr
|
||||
case strings.Contains(host, "lidarr") || strings.Contains(name, "lidarr"):
|
||||
return Lidarr
|
||||
case strings.Contains(host, "readarr") || strings.Contains(name, "readarr"):
|
||||
return Readarr
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func NewStorage() *Storage {
|
||||
arrs := make(map[string]*Arr)
|
||||
for _, a := range config.Get().Arrs {
|
||||
if a.Host == "" || a.Token == "" || a.Name == "" {
|
||||
continue // Skip if host or token is not set
|
||||
}
|
||||
name := a.Name
|
||||
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid, a.Source)
|
||||
}
|
||||
return &Storage{
|
||||
Arrs: arrs,
|
||||
logger: logger.New("arr"),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) AddOrUpdate(arr *Arr) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if arr.Host == "" || arr.Token == "" || arr.Name == "" {
|
||||
return
|
||||
}
|
||||
s.Arrs[arr.Name] = arr
|
||||
}
|
||||
|
||||
func (s *Storage) Get(name string) *Arr {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.Arrs[name]
|
||||
}
|
||||
|
||||
func (s *Storage) GetAll() []*Arr {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
arrs := make([]*Arr, 0, len(s.Arrs))
|
||||
for _, arr := range s.Arrs {
|
||||
arrs = append(arrs, arr)
|
||||
}
|
||||
return arrs
|
||||
}
|
||||
|
||||
func (s *Storage) StartSchedule(ctx context.Context) error {
|
||||
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
s.cleanupArrsQueue()
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) cleanupArrsQueue() {
|
||||
arrs := make([]*Arr, 0)
|
||||
for _, arr := range s.Arrs {
|
||||
if !arr.Cleanup {
|
||||
continue
|
||||
}
|
||||
arrs = append(arrs, arr)
|
||||
}
|
||||
if len(arrs) > 0 {
|
||||
for _, arr := range arrs {
|
||||
if err := arr.CleanupQueue(); err != nil {
|
||||
s.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Arr) Refresh() {
|
||||
payload := struct {
|
||||
Name string `json:"name"`
|
||||
}{
|
||||
Name: "RefreshMonitoredDownloads",
|
||||
}
|
||||
|
||||
_, _ = a.Request(http.MethodPost, "api/v3/command", payload)
|
||||
}
|
||||
284
pkg/arr/content.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package arr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type episode struct {
|
||||
Id int `json:"id"`
|
||||
EpisodeFileID int `json:"episodeFileId"`
|
||||
}
|
||||
|
||||
type sonarrSearch struct {
|
||||
Name string `json:"name"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
SeriesId int `json:"seriesId"`
|
||||
}
|
||||
|
||||
type radarrSearch struct {
|
||||
Name string `json:"name"`
|
||||
MovieIds []int `json:"movieIds"`
|
||||
}
|
||||
|
||||
func (a *Arr) GetMedia(mediaId string) ([]Content, error) {
|
||||
// Get series
|
||||
if a.Type == Radarr {
|
||||
return GetMovies(a, mediaId)
|
||||
}
|
||||
// This is likely Sonarr
|
||||
resp, err := a.Request(http.MethodGet, fmt.Sprintf("api/v3/series?tvdbId=%s", mediaId), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
// This is likely Radarr
|
||||
return GetMovies(a, mediaId)
|
||||
}
|
||||
a.Type = Sonarr
|
||||
|
||||
type series struct {
|
||||
Title string `json:"title"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("failed to get series: %s", resp.Status)
|
||||
}
|
||||
var data []series
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode series: %v", err)
|
||||
}
|
||||
// Get series files
|
||||
contents := make([]Content, 0)
|
||||
for _, d := range data {
|
||||
resp, err = a.Request(http.MethodGet, fmt.Sprintf("api/v3/episodefile?seriesId=%d", d.Id), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var ct Content
|
||||
var seriesFiles []seriesFile
|
||||
episodeFileIDMap := make(map[int]int)
|
||||
func() {
|
||||
defer resp.Body.Close()
|
||||
if err = json.NewDecoder(resp.Body).Decode(&seriesFiles); err != nil {
|
||||
return
|
||||
}
|
||||
ct = Content{
|
||||
Title: d.Title,
|
||||
Id: d.Id,
|
||||
}
|
||||
}()
|
||||
resp, err = a.Request(http.MethodGet, fmt.Sprintf("api/v3/episode?seriesId=%d", d.Id), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
func() {
|
||||
defer resp.Body.Close()
|
||||
var episodes []episode
|
||||
if err = json.NewDecoder(resp.Body).Decode(&episodes); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, e := range episodes {
|
||||
episodeFileIDMap[e.EpisodeFileID] = e.Id
|
||||
}
|
||||
}()
|
||||
files := make([]ContentFile, 0)
|
||||
for _, file := range seriesFiles {
|
||||
eId, ok := episodeFileIDMap[file.Id]
|
||||
if !ok {
|
||||
eId = 0
|
||||
}
|
||||
if file.Id == 0 || file.Path == "" {
|
||||
// Skip files without path
|
||||
continue
|
||||
}
|
||||
files = append(files, ContentFile{
|
||||
FileId: file.Id,
|
||||
Path: file.Path,
|
||||
Id: d.Id,
|
||||
EpisodeId: eId,
|
||||
SeasonNumber: file.SeasonNumber,
|
||||
Size: file.Size,
|
||||
})
|
||||
}
|
||||
if len(files) == 0 {
|
||||
// Skip series without files
|
||||
continue
|
||||
}
|
||||
ct.Files = files
|
||||
contents = append(contents, ct)
|
||||
}
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
func GetMovies(a *Arr, tvId string) ([]Content, error) {
|
||||
resp, err := a.Request(http.MethodGet, fmt.Sprintf("api/v3/movie?tmdbId=%s", tvId), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
// This is likely Lidarr or Readarr
|
||||
return nil, fmt.Errorf("failed to get movies: %s", resp.Status)
|
||||
}
|
||||
a.Type = Radarr
|
||||
defer resp.Body.Close()
|
||||
var movies []Movie
|
||||
if err = json.NewDecoder(resp.Body).Decode(&movies); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode movies: %v", err)
|
||||
}
|
||||
contents := make([]Content, 0)
|
||||
for _, movie := range movies {
|
||||
if movie.MovieFile.Id == 0 || movie.MovieFile.Path == "" {
|
||||
// Skip movies without files
|
||||
continue
|
||||
}
|
||||
ct := Content{
|
||||
Title: movie.Title,
|
||||
Id: movie.Id,
|
||||
}
|
||||
files := make([]ContentFile, 0)
|
||||
|
||||
files = append(files, ContentFile{
|
||||
FileId: movie.MovieFile.Id,
|
||||
Id: movie.Id,
|
||||
Path: movie.MovieFile.Path,
|
||||
Size: movie.MovieFile.Size,
|
||||
})
|
||||
ct.Files = files
|
||||
contents = append(contents, ct)
|
||||
}
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
// searchSonarr searches for missing files in the arr
|
||||
// map ids are series id and season number
|
||||
func (a *Arr) searchSonarr(files []ContentFile) error {
|
||||
ids := make(map[string]any)
|
||||
for _, f := range files {
|
||||
// Join series id and season number
|
||||
id := fmt.Sprintf("%d-%d", f.Id, f.SeasonNumber)
|
||||
ids[id] = nil
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
|
||||
// Limit concurrent goroutines
|
||||
g.SetLimit(10)
|
||||
for id := range ids {
|
||||
id := id
|
||||
g.Go(func() error {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
parts := strings.Split(id, "-")
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid id: %s", id)
|
||||
}
|
||||
seriesId, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seasonNumber, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
payload := sonarrSearch{
|
||||
Name: "SeasonSearch",
|
||||
SeasonNumber: seasonNumber,
|
||||
SeriesId: seriesId,
|
||||
}
|
||||
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to automatic search: %v", err)
|
||||
}
|
||||
if resp.StatusCode >= 300 || resp.StatusCode < 200 {
|
||||
return fmt.Errorf("failed to automatic search. Status Code: %s", resp.Status)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Arr) searchRadarr(files []ContentFile) error {
|
||||
ids := make([]int, 0)
|
||||
for _, f := range files {
|
||||
ids = append(ids, f.Id)
|
||||
}
|
||||
payload := radarrSearch{
|
||||
Name: "MoviesSearch",
|
||||
MovieIds: ids,
|
||||
}
|
||||
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to automatic search: %v", err)
|
||||
}
|
||||
if statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'; !statusOk {
|
||||
return fmt.Errorf("failed to automatic search. Status Code: %s", resp.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Arr) SearchMissing(files []ContentFile) error {
|
||||
switch a.Type {
|
||||
case Sonarr:
|
||||
return a.searchSonarr(files)
|
||||
case Radarr:
|
||||
return a.searchRadarr(files)
|
||||
default:
|
||||
return fmt.Errorf("unknown arr type: %s", a.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Arr) DeleteFiles(files []ContentFile) error {
|
||||
ids := make([]int, 0)
|
||||
for _, f := range files {
|
||||
ids = append(ids, f.FileId)
|
||||
}
|
||||
defer func() {
|
||||
// Delete files, or at least try
|
||||
for _, f := range files {
|
||||
f.Delete()
|
||||
}
|
||||
}()
|
||||
var payload interface{}
|
||||
switch a.Type {
|
||||
case Sonarr:
|
||||
payload = struct {
|
||||
EpisodeFileIds []int `json:"episodeFileIds"`
|
||||
}{
|
||||
EpisodeFileIds: ids,
|
||||
}
|
||||
_, err := a.Request(http.MethodDelete, "api/v3/episodefile/bulk", payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case Radarr:
|
||||
payload = struct {
|
||||
MovieFileIds []int `json:"movieFileIds"`
|
||||
}{
|
||||
MovieFileIds: ids,
|
||||
}
|
||||
_, err := a.Request(http.MethodDelete, "api/v3/moviefile/bulk", payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown arr type: %s", a.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
191
pkg/arr/history.go
Normal file
@@ -0,0 +1,191 @@
|
||||
package arr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type HistorySchema struct {
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
SortKey string `json:"sortKey"`
|
||||
SortDirection string `json:"sortDirection"`
|
||||
TotalRecords int `json:"totalRecords"`
|
||||
Records []struct {
|
||||
ID int `json:"id"`
|
||||
DownloadID string `json:"downloadId"`
|
||||
} `json:"records"`
|
||||
}
|
||||
|
||||
type QueueResponseScheme struct {
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
SortKey string `json:"sortKey"`
|
||||
SortDirection string `json:"sortDirection"`
|
||||
TotalRecords int `json:"totalRecords"`
|
||||
Records []QueueSchema `json:"records"`
|
||||
}
|
||||
|
||||
type QueueSchema struct {
|
||||
SeriesId int `json:"seriesId"`
|
||||
EpisodeId int `json:"episodeId"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
TrackedDownloadStatus string `json:"trackedDownloadStatus"`
|
||||
TrackedDownloadState string `json:"trackedDownloadState"`
|
||||
StatusMessages []struct {
|
||||
Title string `json:"title"`
|
||||
Messages []string `json:"messages"`
|
||||
} `json:"statusMessages"`
|
||||
DownloadId string `json:"downloadId"`
|
||||
Protocol string `json:"protocol"`
|
||||
DownloadClient string `json:"downloadClient"`
|
||||
DownloadClientHasPostImportCategory bool `json:"downloadClientHasPostImportCategory"`
|
||||
Indexer string `json:"indexer"`
|
||||
OutputPath string `json:"outputPath"`
|
||||
EpisodeHasFile bool `json:"episodeHasFile"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
func (a *Arr) GetHistory(downloadId, eventType string) *HistorySchema {
|
||||
query := gourl.Values{}
|
||||
if downloadId != "" {
|
||||
query.Add("downloadId", downloadId)
|
||||
}
|
||||
query.Add("eventType", eventType)
|
||||
query.Add("pageSize", "100")
|
||||
url := "api/v3/history" + "?" + query.Encode()
|
||||
resp, err := a.Request(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var data *HistorySchema
|
||||
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil
|
||||
}
|
||||
return data
|
||||
|
||||
}
|
||||
|
||||
func (a *Arr) GetQueue() []QueueSchema {
|
||||
query := gourl.Values{}
|
||||
query.Add("page", "1")
|
||||
query.Add("pageSize", "200")
|
||||
results := make([]QueueSchema, 0)
|
||||
|
||||
for {
|
||||
url := "api/v3/queue" + "?" + query.Encode()
|
||||
resp, err := a.Request(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
func() {
|
||||
defer func(Body io.ReadCloser) {
|
||||
err := Body.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}(resp.Body)
|
||||
|
||||
var data QueueResponseScheme
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
results = append(results, data.Records...)
|
||||
|
||||
if len(results) >= data.TotalRecords {
|
||||
// We've fetched all records
|
||||
err = io.EOF // Signal to exit the loop
|
||||
return
|
||||
}
|
||||
|
||||
query.Set("page", strconv.Itoa(data.Page+1))
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (a *Arr) CleanupQueue() error {
|
||||
queue := a.GetQueue()
|
||||
type messedUp struct {
|
||||
id int
|
||||
episodeId int
|
||||
seasonNum int
|
||||
}
|
||||
cleanups := make(map[int][]messedUp)
|
||||
for _, q := range queue {
|
||||
isMessedUp := false
|
||||
if q.Protocol == "torrent" && q.Status == "completed" && q.TrackedDownloadStatus == "warning" && q.TrackedDownloadState == "importPending" {
|
||||
messages := q.StatusMessages
|
||||
if len(messages) > 0 {
|
||||
for _, m := range messages {
|
||||
if strings.Contains(strings.Join(m.Messages, " "), "No files found are eligible for import in") {
|
||||
isMessedUp = true
|
||||
break
|
||||
}
|
||||
if strings.Contains(m.Title, "One or more episodes expected in this release were not imported or missing from the release") {
|
||||
isMessedUp = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if isMessedUp {
|
||||
cleanups[q.SeriesId] = append(cleanups[q.SeriesId], messedUp{
|
||||
id: q.Id,
|
||||
episodeId: q.EpisodeId,
|
||||
seasonNum: q.SeasonNumber,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(cleanups) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
queueIds := make([]int, 0)
|
||||
|
||||
for _, c := range cleanups {
|
||||
// Delete the messed up episodes from queue
|
||||
for _, m := range c {
|
||||
queueIds = append(queueIds, m.id)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the messed up episodes from queue
|
||||
|
||||
payload := struct {
|
||||
Ids []int `json:"ids"`
|
||||
}{
|
||||
Ids: queueIds,
|
||||
}
|
||||
|
||||
// Blocklist that hash(it's typically not complete, then research the episode)
|
||||
|
||||
query := gourl.Values{}
|
||||
query.Add("removeFromClient", "true")
|
||||
query.Add("blocklist", "true")
|
||||
query.Add("skipRedownload", "false")
|
||||
query.Add("changeCategory", "false")
|
||||
url := "api/v3/queue/bulk" + "?" + query.Encode()
|
||||
|
||||
_, err := a.Request(http.MethodDelete, url, payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
208
pkg/arr/import.go
Normal file
@@ -0,0 +1,208 @@
|
||||
package arr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ImportResponseSchema struct {
|
||||
Path string `json:"path"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
FolderName string `json:"folderName"`
|
||||
Name string `json:"name"`
|
||||
Size int `json:"size"`
|
||||
Series struct {
|
||||
Title string `json:"title"`
|
||||
SortTitle string `json:"sortTitle"`
|
||||
Status string `json:"status"`
|
||||
Ended bool `json:"ended"`
|
||||
Overview string `json:"overview"`
|
||||
Network string `json:"network"`
|
||||
AirTime string `json:"airTime"`
|
||||
Images []struct {
|
||||
CoverType string `json:"coverType"`
|
||||
RemoteUrl string `json:"remoteUrl"`
|
||||
} `json:"images"`
|
||||
OriginalLanguage struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"originalLanguage"`
|
||||
Seasons []struct {
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
Monitored bool `json:"monitored"`
|
||||
} `json:"seasons"`
|
||||
Year int `json:"year"`
|
||||
Path string `json:"path"`
|
||||
QualityProfileId int `json:"qualityProfileId"`
|
||||
SeasonFolder bool `json:"seasonFolder"`
|
||||
Monitored bool `json:"monitored"`
|
||||
MonitorNewItems string `json:"monitorNewItems"`
|
||||
UseSceneNumbering bool `json:"useSceneNumbering"`
|
||||
Runtime int `json:"runtime"`
|
||||
TvdbId int `json:"tvdbId"`
|
||||
TvRageId int `json:"tvRageId"`
|
||||
TvMazeId int `json:"tvMazeId"`
|
||||
TmdbId int `json:"tmdbId"`
|
||||
FirstAired time.Time `json:"firstAired"`
|
||||
LastAired time.Time `json:"lastAired"`
|
||||
SeriesType string `json:"seriesType"`
|
||||
CleanTitle string `json:"cleanTitle"`
|
||||
ImdbId string `json:"imdbId"`
|
||||
TitleSlug string `json:"titleSlug"`
|
||||
Certification string `json:"certification"`
|
||||
Genres []string `json:"genres"`
|
||||
Tags []interface{} `json:"tags"`
|
||||
Added time.Time `json:"added"`
|
||||
Ratings struct {
|
||||
Votes int `json:"votes"`
|
||||
Value float64 `json:"value"`
|
||||
} `json:"ratings"`
|
||||
LanguageProfileId int `json:"languageProfileId"`
|
||||
Id int `json:"id"`
|
||||
} `json:"series"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
Episodes []struct {
|
||||
SeriesId int `json:"seriesId"`
|
||||
TvdbId int `json:"tvdbId"`
|
||||
EpisodeFileId int `json:"episodeFileId"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
EpisodeNumber int `json:"episodeNumber"`
|
||||
Title string `json:"title"`
|
||||
AirDate string `json:"airDate"`
|
||||
AirDateUtc time.Time `json:"airDateUtc"`
|
||||
Runtime int `json:"runtime"`
|
||||
Overview string `json:"overview"`
|
||||
HasFile bool `json:"hasFile"`
|
||||
Monitored bool `json:"monitored"`
|
||||
AbsoluteEpisodeNumber int `json:"absoluteEpisodeNumber"`
|
||||
UnverifiedSceneNumbering bool `json:"unverifiedSceneNumbering"`
|
||||
Id int `json:"id"`
|
||||
FinaleType string `json:"finaleType,omitempty"`
|
||||
} `json:"episodes"`
|
||||
ReleaseGroup string `json:"releaseGroup"`
|
||||
Quality struct {
|
||||
Quality struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Source string `json:"source"`
|
||||
Resolution int `json:"resolution"`
|
||||
} `json:"quality"`
|
||||
Revision struct {
|
||||
Version int `json:"version"`
|
||||
Real int `json:"real"`
|
||||
IsRepack bool `json:"isRepack"`
|
||||
} `json:"revision"`
|
||||
} `json:"quality"`
|
||||
Languages []struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"languages"`
|
||||
QualityWeight int `json:"qualityWeight"`
|
||||
CustomFormats []interface{} `json:"customFormats"`
|
||||
CustomFormatScore int `json:"customFormatScore"`
|
||||
IndexerFlags int `json:"indexerFlags"`
|
||||
ReleaseType string `json:"releaseType"`
|
||||
Rejections []struct {
|
||||
Reason string `json:"reason"`
|
||||
Type string `json:"type"`
|
||||
} `json:"rejections"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
type ManualImportRequestFile struct {
|
||||
Path string `json:"path"`
|
||||
SeriesId int `json:"seriesId"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
EpisodeIds []int `json:"episodeIds"`
|
||||
Quality struct {
|
||||
Quality struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Source string `json:"source"`
|
||||
Resolution int `json:"resolution"`
|
||||
} `json:"quality"`
|
||||
Revision struct {
|
||||
Version int `json:"version"`
|
||||
Real int `json:"real"`
|
||||
IsRepack bool `json:"isRepack"`
|
||||
} `json:"revision"`
|
||||
} `json:"quality"`
|
||||
Languages []struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"languages"`
|
||||
ReleaseGroup string `json:"releaseGroup"`
|
||||
CustomFormats []interface{} `json:"customFormats"`
|
||||
CustomFormatScore int `json:"customFormatScore"`
|
||||
IndexerFlags int `json:"indexerFlags"`
|
||||
ReleaseType string `json:"releaseType"`
|
||||
Rejections []struct {
|
||||
Reason string `json:"reason"`
|
||||
Type string `json:"type"`
|
||||
} `json:"rejections"`
|
||||
}
|
||||
|
||||
type ManualImportRequestSchema struct {
|
||||
Name string `json:"name"`
|
||||
Files []ManualImportRequestFile `json:"files"`
|
||||
ImportMode string `json:"importMode"`
|
||||
}
|
||||
|
||||
func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, error) {
|
||||
query := gourl.Values{}
|
||||
query.Add("folder", path)
|
||||
if seriesId != 0 {
|
||||
query.Add("seriesId", strconv.Itoa(seriesId))
|
||||
}
|
||||
url := "api/v3/manualimport" + "?" + query.Encode()
|
||||
resp, err := a.Request(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to import, invalid file: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var data []ImportResponseSchema
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
|
||||
var files []ManualImportRequestFile
|
||||
for _, d := range data {
|
||||
episodesIds := []int{}
|
||||
for _, e := range d.Episodes {
|
||||
episodesIds = append(episodesIds, e.Id)
|
||||
}
|
||||
file := ManualImportRequestFile{
|
||||
Path: d.Path,
|
||||
SeriesId: d.Series.Id,
|
||||
SeasonNumber: d.SeasonNumber,
|
||||
EpisodeIds: episodesIds,
|
||||
Quality: d.Quality,
|
||||
Languages: d.Languages,
|
||||
ReleaseGroup: d.ReleaseGroup,
|
||||
CustomFormats: d.CustomFormats,
|
||||
CustomFormatScore: d.CustomFormatScore,
|
||||
IndexerFlags: d.IndexerFlags,
|
||||
ReleaseType: d.ReleaseType,
|
||||
Rejections: d.Rejections,
|
||||
}
|
||||
files = append(files, file)
|
||||
}
|
||||
request := ManualImportRequestSchema{
|
||||
Name: "ManualImport",
|
||||
Files: files,
|
||||
ImportMode: "copy",
|
||||
}
|
||||
|
||||
url = "api/v3/command"
|
||||
resp, err = a.Request(http.MethodPost, url, request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to import: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.Body, nil
|
||||
}
|
||||
51
pkg/arr/types.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package arr
|
||||
|
||||
import "os"
|
||||
|
||||
type Movie struct {
|
||||
Title string `json:"title"`
|
||||
OriginalTitle string `json:"originalTitle"`
|
||||
Path string `json:"path"`
|
||||
MovieFile struct {
|
||||
MovieId int `json:"movieId"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
Path string `json:"path"`
|
||||
Id int `json:"id"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"movieFile"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
type ContentFile struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Id int `json:"id"`
|
||||
EpisodeId int `json:"showId"`
|
||||
FileId int `json:"fileId"`
|
||||
TargetPath string `json:"targetPath"`
|
||||
IsSymlink bool `json:"isSymlink"`
|
||||
IsBroken bool `json:"isBroken"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
Processed bool `json:"processed"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
func (file *ContentFile) Delete() {
|
||||
// This is useful for when sonarr bulk delete fails(this usually happens)
|
||||
// and we need to delete the file manually
|
||||
_ = os.Remove(file.Path) // nolint:errcheck
|
||||
}
|
||||
|
||||
type Content struct {
|
||||
Title string `json:"title"`
|
||||
Id int `json:"id"`
|
||||
Files []ContentFile `json:"files"`
|
||||
}
|
||||
|
||||
type seriesFile struct {
|
||||
SeriesId int `json:"seriesId"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
Path string `json:"path"`
|
||||
Id int `json:"id"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
@@ -1,148 +1,269 @@
|
||||
package debrid
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"goBlack/common"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/providers/debrid_link"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox"
|
||||
debridStore "github.com/sirrobot01/decypharr/pkg/debrid/store"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"github.com/sirrobot01/decypharr/pkg/rclone"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Service interface {
|
||||
SubmitMagnet(torrent *Torrent) (*Torrent, error)
|
||||
CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error)
|
||||
GetDownloadLinks(torrent *Torrent) error
|
||||
DeleteTorrent(torrent *Torrent)
|
||||
IsAvailable(infohashes []string) map[string]bool
|
||||
GetMountPath() string
|
||||
GetDownloadUncached() bool
|
||||
GetTorrent(id string) (*Torrent, error)
|
||||
GetName() string
|
||||
GetLogger() *log.Logger
|
||||
}
|
||||
|
||||
type Debrid struct {
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
DownloadUncached bool
|
||||
client *common.RLHTTPClient
|
||||
cache *common.Cache
|
||||
MountPath string
|
||||
logger *log.Logger
|
||||
cache *debridStore.Cache // Could be nil if not using WebDAV
|
||||
client types.Client // HTTP client for making requests to the debrid service
|
||||
}
|
||||
|
||||
func NewDebrid(dc common.DebridConfig, cache *common.Cache) Service {
|
||||
func (de *Debrid) Client() types.Client {
|
||||
return de.client
|
||||
}
|
||||
|
||||
func (de *Debrid) Cache() *debridStore.Cache {
|
||||
return de.cache
|
||||
}
|
||||
|
||||
func (de *Debrid) Reset() {
|
||||
if de.cache != nil {
|
||||
de.cache.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
type Storage struct {
|
||||
debrids map[string]*Debrid
|
||||
mu sync.RWMutex
|
||||
lastUsed string
|
||||
}
|
||||
|
||||
func NewStorage(rcManager *rclone.Manager) *Storage {
|
||||
cfg := config.Get()
|
||||
|
||||
_logger := logger.Default()
|
||||
|
||||
debrids := make(map[string]*Debrid)
|
||||
|
||||
bindAddress := cfg.BindAddress
|
||||
if bindAddress == "" {
|
||||
bindAddress = "localhost"
|
||||
}
|
||||
webdavUrl := fmt.Sprintf("http://%s:%s%s/webdav", bindAddress, cfg.Port, cfg.URLBase)
|
||||
|
||||
for _, dc := range cfg.Debrids {
|
||||
client, err := createDebridClient(dc)
|
||||
if err != nil {
|
||||
_logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client")
|
||||
continue
|
||||
}
|
||||
var (
|
||||
cache *debridStore.Cache
|
||||
mounter *rclone.Mount
|
||||
)
|
||||
_log := client.Logger()
|
||||
if dc.UseWebDav {
|
||||
if cfg.Rclone.Enabled && rcManager != nil {
|
||||
mounter = rclone.NewMount(dc.Name, webdavUrl, rcManager)
|
||||
}
|
||||
cache = debridStore.NewDebridCache(dc, client, mounter)
|
||||
_log.Info().Msg("Debrid Service started with WebDAV")
|
||||
} else {
|
||||
_log.Info().Msg("Debrid Service started")
|
||||
}
|
||||
debrids[dc.Name] = &Debrid{
|
||||
cache: cache,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
d := &Storage{
|
||||
debrids: debrids,
|
||||
lastUsed: "",
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Storage) Debrid(name string) *Debrid {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
if debrid, exists := d.debrids[name]; exists {
|
||||
return debrid
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Storage) Debrids() map[string]*Debrid {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
debridsCopy := make(map[string]*Debrid)
|
||||
for name, debrid := range d.debrids {
|
||||
if debrid != nil {
|
||||
debridsCopy[name] = debrid
|
||||
}
|
||||
}
|
||||
return debridsCopy
|
||||
}
|
||||
|
||||
func (d *Storage) Client(name string) types.Client {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
if client, exists := d.debrids[name]; exists {
|
||||
return client.client
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Storage) Reset() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
// Reset all debrid clients and caches
|
||||
for _, debrid := range d.debrids {
|
||||
if debrid != nil {
|
||||
debrid.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// Reinitialize the debrids map
|
||||
d.debrids = make(map[string]*Debrid)
|
||||
d.lastUsed = ""
|
||||
}
|
||||
|
||||
func (d *Storage) Clients() map[string]types.Client {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
clientsCopy := make(map[string]types.Client)
|
||||
for name, debrid := range d.debrids {
|
||||
if debrid != nil && debrid.client != nil {
|
||||
clientsCopy[name] = debrid.client
|
||||
}
|
||||
}
|
||||
return clientsCopy
|
||||
}
|
||||
|
||||
func (d *Storage) Caches() map[string]*debridStore.Cache {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
cachesCopy := make(map[string]*debridStore.Cache)
|
||||
for name, debrid := range d.debrids {
|
||||
if debrid != nil && debrid.cache != nil {
|
||||
cachesCopy[name] = debrid.cache
|
||||
}
|
||||
}
|
||||
return cachesCopy
|
||||
}
|
||||
|
||||
func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types.Client {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
filteredClients := make(map[string]types.Client)
|
||||
for name, client := range d.debrids {
|
||||
if client != nil && filter(client.client) {
|
||||
filteredClients[name] = client.client
|
||||
}
|
||||
}
|
||||
return filteredClients
|
||||
}
|
||||
|
||||
func createDebridClient(dc config.Debrid) (types.Client, error) {
|
||||
switch dc.Name {
|
||||
case "realdebrid":
|
||||
return NewRealDebrid(dc, cache)
|
||||
return realdebrid.New(dc)
|
||||
case "torbox":
|
||||
return torbox.New(dc)
|
||||
case "debridlink":
|
||||
return debrid_link.New(dc)
|
||||
case "alldebrid":
|
||||
return alldebrid.New(dc)
|
||||
default:
|
||||
return NewRealDebrid(dc, cache)
|
||||
return realdebrid.New(dc)
|
||||
}
|
||||
}
|
||||
|
||||
func GetTorrentInfo(filePath string) (*Torrent, error) {
|
||||
// Open and read the .torrent file
|
||||
if filepath.Ext(filePath) == ".torrent" {
|
||||
return getTorrentInfo(filePath)
|
||||
func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, action string, overrideDownloadUncached bool) (*types.Torrent, error) {
|
||||
|
||||
debridTorrent := &types.Torrent{
|
||||
InfoHash: magnet.InfoHash,
|
||||
Magnet: magnet,
|
||||
Name: magnet.Name,
|
||||
Arr: a,
|
||||
Size: magnet.Size,
|
||||
Files: make(map[string]types.File),
|
||||
}
|
||||
|
||||
clients := store.FilterClients(func(c types.Client) bool {
|
||||
if selectedDebrid != "" && c.Name() != selectedDebrid {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if len(clients) == 0 {
|
||||
return nil, fmt.Errorf("no debrid clients available")
|
||||
}
|
||||
|
||||
errs := make([]error, 0, len(clients))
|
||||
|
||||
// Override first, arr second, debrid third
|
||||
|
||||
if overrideDownloadUncached {
|
||||
debridTorrent.DownloadUncached = true
|
||||
} else if a.DownloadUncached != nil {
|
||||
// Arr cached is set
|
||||
debridTorrent.DownloadUncached = *a.DownloadUncached
|
||||
} else {
|
||||
return torrentFromMagnetFile(filePath)
|
||||
debridTorrent.DownloadUncached = false
|
||||
}
|
||||
|
||||
}
|
||||
for _, db := range clients {
|
||||
_logger := db.Logger()
|
||||
_logger.Info().
|
||||
Str("Debrid", db.Name()).
|
||||
Str("Arr", a.Name).
|
||||
Str("Hash", debridTorrent.InfoHash).
|
||||
Str("Name", debridTorrent.Name).
|
||||
Str("Action", action).
|
||||
Msg("Processing torrent")
|
||||
|
||||
func torrentFromMagnetFile(filePath string) (*Torrent, error) {
|
||||
magnetLink := common.OpenMagnetFile(filePath)
|
||||
magnet, err := common.GetMagnetInfo(magnetLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
torrent := &Torrent{
|
||||
InfoHash: magnet.InfoHash,
|
||||
Name: magnet.Name,
|
||||
Size: magnet.Size,
|
||||
Magnet: magnet,
|
||||
Filename: filePath,
|
||||
}
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func getTorrentInfo(filePath string) (*Torrent, error) {
|
||||
mi, err := metainfo.LoadFromFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := mi.HashInfoBytes()
|
||||
infoHash := hash.HexString()
|
||||
info, err := mi.UnmarshalInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
magnet := &common.Magnet{
|
||||
InfoHash: infoHash,
|
||||
Name: info.Name,
|
||||
Size: info.Length,
|
||||
Link: mi.Magnet(&hash, &info).String(),
|
||||
}
|
||||
torrent := &Torrent{
|
||||
InfoHash: infoHash,
|
||||
Name: info.Name,
|
||||
Size: info.Length,
|
||||
Magnet: magnet,
|
||||
Filename: filePath,
|
||||
}
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func GetLocalCache(infohashes []string, cache *common.Cache) ([]string, map[string]bool) {
|
||||
result := make(map[string]bool)
|
||||
hashes := make([]string, len(infohashes))
|
||||
|
||||
if len(infohashes) == 0 {
|
||||
return hashes, result
|
||||
}
|
||||
if len(infohashes) == 1 {
|
||||
if cache.Exists(infohashes[0]) {
|
||||
return hashes, map[string]bool{infohashes[0]: true}
|
||||
if !overrideDownloadUncached && a.DownloadUncached == nil {
|
||||
debridTorrent.DownloadUncached = db.GetDownloadUncached()
|
||||
}
|
||||
return infohashes, result
|
||||
}
|
||||
|
||||
cachedHashes := cache.GetMultiple(infohashes)
|
||||
for _, h := range infohashes {
|
||||
_, exists := cachedHashes[h]
|
||||
if !exists {
|
||||
hashes = append(hashes, h)
|
||||
} else {
|
||||
result[h] = true
|
||||
dbt, err := db.SubmitMagnet(debridTorrent)
|
||||
if err != nil || dbt == nil || dbt.Id == "" {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
dbt.Arr = a
|
||||
_logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.Name())
|
||||
store.lastUsed = db.Name()
|
||||
|
||||
return hashes, result
|
||||
}
|
||||
|
||||
func ProcessQBitTorrent(d Service, magnet *common.Magnet, arr *Arr, isSymlink bool) (*Torrent, error) {
|
||||
debridTorrent := &Torrent{
|
||||
InfoHash: magnet.InfoHash,
|
||||
Magnet: magnet,
|
||||
Name: magnet.Name,
|
||||
Arr: arr,
|
||||
Size: magnet.Size,
|
||||
}
|
||||
logger := d.GetLogger()
|
||||
logger.Printf("Torrent Hash: %s", debridTorrent.InfoHash)
|
||||
if !d.GetDownloadUncached() {
|
||||
hash, exists := d.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
|
||||
if !exists || !hash {
|
||||
return debridTorrent, fmt.Errorf("torrent: %s is not cached", debridTorrent.Name)
|
||||
} else {
|
||||
logger.Printf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
|
||||
torrent, err := db.CheckStatus(dbt)
|
||||
if err != nil && torrent != nil && torrent.Id != "" {
|
||||
// Delete the torrent if it was not downloaded
|
||||
go func(id string) {
|
||||
_ = db.DeleteTorrent(id)
|
||||
}(torrent.Id)
|
||||
}
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
if torrent == nil {
|
||||
errs = append(errs, fmt.Errorf("torrent %s returned nil after checking status", dbt.Name))
|
||||
continue
|
||||
}
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
debridTorrent, err := d.SubmitMagnet(debridTorrent)
|
||||
if err != nil || debridTorrent.Id == "" {
|
||||
logger.Printf("Error submitting magnet: %s", err)
|
||||
return nil, err
|
||||
if len(errs) == 0 {
|
||||
return nil, fmt.Errorf("failed to process torrent: no clients available")
|
||||
}
|
||||
return d.CheckStatus(debridTorrent, isSymlink)
|
||||
joinedErrors := errors.Join(errs...)
|
||||
return nil, fmt.Errorf("failed to process torrent: %w", joinedErrors)
|
||||
}
|
||||
|
||||
454
pkg/debrid/providers/alldebrid/alldebrid.go
Normal file
@@ -0,0 +1,454 @@
|
||||
package alldebrid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AllDebrid struct {
|
||||
name string
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
accounts *types.Accounts
|
||||
autoExpiresLinksAfter time.Duration
|
||||
DownloadUncached bool
|
||||
client *request.Client
|
||||
|
||||
MountPath string
|
||||
logger zerolog.Logger
|
||||
checkCached bool
|
||||
addSamples bool
|
||||
minimumFreeSlot int
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetProfile() (*types.Profile, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func New(dc config.Debrid) (*AllDebrid, error) {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
}
|
||||
_log := logger.New(dc.Name)
|
||||
client := request.New(
|
||||
request.WithHeaders(headers),
|
||||
request.WithLogger(_log),
|
||||
request.WithRateLimiter(rl),
|
||||
request.WithProxy(dc.Proxy),
|
||||
)
|
||||
|
||||
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
|
||||
if autoExpiresLinksAfter == 0 || err != nil {
|
||||
autoExpiresLinksAfter = 48 * time.Hour
|
||||
}
|
||||
return &AllDebrid{
|
||||
name: "alldebrid",
|
||||
Host: "http://api.alldebrid.com/v4.1",
|
||||
APIKey: dc.APIKey,
|
||||
accounts: types.NewAccounts(dc),
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||
client: client,
|
||||
MountPath: dc.Folder,
|
||||
logger: logger.New(dc.Name),
|
||||
checkCached: dc.CheckCached,
|
||||
addSamples: dc.AddSamples,
|
||||
minimumFreeSlot: dc.MinimumFreeSlot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) Name() string {
|
||||
return ad.name
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) Logger() zerolog.Logger {
|
||||
return ad.logger
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) IsAvailable(hashes []string) map[string]bool {
|
||||
// Check if the infohashes are available in the local cache
|
||||
result := make(map[string]bool)
|
||||
|
||||
// Divide hashes into groups of 100
|
||||
// AllDebrid does not support checking cached infohashes
|
||||
return result
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/magnet/upload", ad.Host)
|
||||
query := gourl.Values{}
|
||||
query.Add("magnets[]", torrent.Magnet.Link)
|
||||
url += "?" + query.Encode()
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := ad.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data UploadMagnetResponse
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
magnets := data.Data.Magnets
|
||||
if len(magnets) == 0 {
|
||||
return nil, fmt.Errorf("error adding torrent")
|
||||
}
|
||||
magnet := magnets[0]
|
||||
torrentId := strconv.Itoa(magnet.ID)
|
||||
torrent.Id = torrentId
|
||||
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func getAlldebridStatus(statusCode int) string {
|
||||
switch {
|
||||
case statusCode == 4:
|
||||
return "downloaded"
|
||||
case statusCode >= 0 && statusCode <= 3:
|
||||
return "downloading"
|
||||
default:
|
||||
return "error"
|
||||
}
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) flattenFiles(torrentId string, files []MagnetFile, parentPath string, index *int) map[string]types.File {
|
||||
result := make(map[string]types.File)
|
||||
|
||||
cfg := config.Get()
|
||||
|
||||
for _, f := range files {
|
||||
currentPath := f.Name
|
||||
if parentPath != "" {
|
||||
currentPath = filepath.Join(parentPath, f.Name)
|
||||
}
|
||||
|
||||
if f.Elements != nil {
|
||||
// This is a folder, recurse into it
|
||||
subFiles := ad.flattenFiles(torrentId, f.Elements, currentPath, index)
|
||||
for k, v := range subFiles {
|
||||
if _, ok := result[k]; ok {
|
||||
// File already exists, use path as key
|
||||
result[v.Path] = v
|
||||
} else {
|
||||
result[k] = v
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This is a file
|
||||
fileName := filepath.Base(f.Name)
|
||||
|
||||
// Skip sample files
|
||||
if !ad.addSamples && utils.IsSampleFile(f.Name) {
|
||||
continue
|
||||
}
|
||||
if !cfg.IsAllowedFile(fileName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
continue
|
||||
}
|
||||
|
||||
*index++
|
||||
file := types.File{
|
||||
TorrentId: torrentId,
|
||||
Id: strconv.Itoa(*index),
|
||||
Name: fileName,
|
||||
Size: f.Size,
|
||||
Path: currentPath,
|
||||
Link: f.Link,
|
||||
}
|
||||
result[file.Name] = file
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/magnet/status?id=%s", ad.Host, torrentId)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := ad.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res TorrentInfoResponse
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
|
||||
return nil, err
|
||||
}
|
||||
data := res.Data.Magnets
|
||||
status := getAlldebridStatus(data.StatusCode)
|
||||
name := data.Filename
|
||||
t := &types.Torrent{
|
||||
Id: strconv.Itoa(data.Id),
|
||||
Name: name,
|
||||
Status: status,
|
||||
Filename: name,
|
||||
OriginalFilename: name,
|
||||
Files: make(map[string]types.File),
|
||||
InfoHash: data.Hash,
|
||||
Debrid: ad.name,
|
||||
MountPath: ad.MountPath,
|
||||
Added: time.Unix(data.CompletionDate, 0).Format(time.RFC3339),
|
||||
}
|
||||
t.Bytes = data.Size
|
||||
t.Seeders = data.Seeders
|
||||
if status == "downloaded" {
|
||||
t.Progress = 100
|
||||
index := -1
|
||||
files := ad.flattenFiles(t.Id, data.Files, "", &index)
|
||||
t.Files = files
|
||||
} else {
|
||||
t.Progress = float64(data.Downloaded) / float64(data.Size) * 100
|
||||
t.Speed = data.DownloadSpeed
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
|
||||
url := fmt.Sprintf("%s/magnet/status?id=%s", ad.Host, t.Id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := ad.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var res TorrentInfoResponse
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
|
||||
return err
|
||||
}
|
||||
data := res.Data.Magnets
|
||||
status := getAlldebridStatus(data.StatusCode)
|
||||
name := data.Filename
|
||||
t.Name = name
|
||||
t.Status = status
|
||||
t.Filename = name
|
||||
t.OriginalFilename = name
|
||||
t.Folder = name
|
||||
t.MountPath = ad.MountPath
|
||||
t.Debrid = ad.name
|
||||
t.Bytes = data.Size
|
||||
t.Seeders = data.Seeders
|
||||
t.Added = time.Unix(data.CompletionDate, 0).Format(time.RFC3339)
|
||||
if status == "downloaded" {
|
||||
t.Progress = 100
|
||||
index := -1
|
||||
files := ad.flattenFiles(t.Id, data.Files, "", &index)
|
||||
t.Files = files
|
||||
} else {
|
||||
t.Progress = float64(data.Downloaded) / float64(data.Size) * 100
|
||||
t.Speed = data.DownloadSpeed
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
|
||||
for {
|
||||
err := ad.UpdateTorrent(torrent)
|
||||
|
||||
if err != nil || torrent == nil {
|
||||
return torrent, err
|
||||
}
|
||||
status := torrent.Status
|
||||
if status == "downloaded" {
|
||||
ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||
return torrent, nil
|
||||
} else if utils.Contains(ad.GetDownloadingStatus(), status) {
|
||||
if !torrent.DownloadUncached {
|
||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||
}
|
||||
// Break out of the loop if the torrent is downloading.
|
||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||
return torrent, nil
|
||||
} else {
|
||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
|
||||
url := fmt.Sprintf("%s/magnet/delete?id=%s", ad.Host, torrentId)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
if _, err := ad.client.MakeRequest(req); err != nil {
|
||||
return err
|
||||
}
|
||||
ad.logger.Info().Msgf("Torrent %s deleted from AD", torrentId)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
filesCh := make(chan types.File, len(t.Files))
|
||||
linksCh := make(chan *types.DownloadLink, len(t.Files))
|
||||
errCh := make(chan error, len(t.Files))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(t.Files))
|
||||
for _, file := range t.Files {
|
||||
go func(file types.File) {
|
||||
defer wg.Done()
|
||||
link, err := ad.GetDownloadLink(t, &file)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if link == nil {
|
||||
errCh <- fmt.Errorf("download link is empty")
|
||||
return
|
||||
}
|
||||
linksCh <- link
|
||||
file.DownloadLink = link
|
||||
filesCh <- file
|
||||
}(file)
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(filesCh)
|
||||
close(linksCh)
|
||||
close(errCh)
|
||||
}()
|
||||
files := make(map[string]types.File, len(t.Files))
|
||||
for file := range filesCh {
|
||||
files[file.Name] = file
|
||||
}
|
||||
|
||||
// Collect download links
|
||||
links := make(map[string]*types.DownloadLink, len(t.Files))
|
||||
|
||||
for link := range linksCh {
|
||||
if link == nil {
|
||||
continue
|
||||
}
|
||||
links[link.Link] = link
|
||||
}
|
||||
// Update the files with download links
|
||||
ad.accounts.SetDownloadLinks(links)
|
||||
|
||||
// Check for errors
|
||||
for err := range errCh {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
t.Files = files
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
||||
url := fmt.Sprintf("%s/link/unlock", ad.Host)
|
||||
query := gourl.Values{}
|
||||
query.Add("link", file.Link)
|
||||
url += "?" + query.Encode()
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := ad.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data DownloadLink
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if data.Error != nil {
|
||||
return nil, fmt.Errorf("error getting download link: %s", data.Error.Message)
|
||||
}
|
||||
link := data.Data.Link
|
||||
if link == "" {
|
||||
return nil, fmt.Errorf("download link is empty")
|
||||
}
|
||||
now := time.Now()
|
||||
return &types.DownloadLink{
|
||||
Link: file.Link,
|
||||
DownloadLink: link,
|
||||
Id: data.Data.Id,
|
||||
Size: file.Size,
|
||||
Filename: file.Name,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(ad.autoExpiresLinksAfter),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/magnet/status?status=ready", ad.Host)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := ad.client.MakeRequest(req)
|
||||
torrents := make([]*types.Torrent, 0)
|
||||
if err != nil {
|
||||
return torrents, err
|
||||
}
|
||||
var res TorrentsListResponse
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
|
||||
return torrents, err
|
||||
}
|
||||
for _, magnet := range res.Data.Magnets {
|
||||
torrents = append(torrents, &types.Torrent{
|
||||
Id: strconv.Itoa(magnet.Id),
|
||||
Name: magnet.Filename,
|
||||
Bytes: magnet.Size,
|
||||
Status: getAlldebridStatus(magnet.StatusCode),
|
||||
Filename: magnet.Filename,
|
||||
OriginalFilename: magnet.Filename,
|
||||
Files: make(map[string]types.File),
|
||||
InfoHash: magnet.Hash,
|
||||
Debrid: ad.name,
|
||||
MountPath: ad.MountPath,
|
||||
Added: time.Unix(magnet.CompletionDate, 0).Format(time.RFC3339),
|
||||
})
|
||||
}
|
||||
|
||||
return torrents, nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetDownloadingStatus() []string {
|
||||
return []string{"downloading"}
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetDownloadUncached() bool {
|
||||
return ad.DownloadUncached
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) CheckLink(link string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetMountPath() string {
|
||||
return ad.MountPath
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) DeleteDownloadLink(linkId string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetAvailableSlots() (int, error) {
|
||||
// This function is a placeholder for AllDebrid
|
||||
//TODO: Implement the logic to check available slots for AllDebrid
|
||||
return 0, fmt.Errorf("GetAvailableSlots not implemented for AllDebrid")
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) Accounts() *types.Accounts {
|
||||
return ad.accounts
|
||||
}
|
||||
114
pkg/debrid/providers/alldebrid/types.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package alldebrid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type errorResponse struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type MagnetFile struct {
|
||||
Name string `json:"n"`
|
||||
Size int64 `json:"s"`
|
||||
Link string `json:"l"`
|
||||
Elements []MagnetFile `json:"e"`
|
||||
}
|
||||
type magnetInfo struct {
|
||||
Id int `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
Size int64 `json:"size"`
|
||||
Hash string `json:"hash"`
|
||||
Status string `json:"status"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
UploadDate int64 `json:"uploadDate"`
|
||||
Downloaded int64 `json:"downloaded"`
|
||||
Uploaded int64 `json:"uploaded"`
|
||||
DownloadSpeed int64 `json:"downloadSpeed"`
|
||||
UploadSpeed int64 `json:"uploadSpeed"`
|
||||
Seeders int `json:"seeders"`
|
||||
CompletionDate int64 `json:"completionDate"`
|
||||
Type string `json:"type"`
|
||||
Notified bool `json:"notified"`
|
||||
Version int `json:"version"`
|
||||
NbLinks int `json:"nbLinks"`
|
||||
Files []MagnetFile `json:"files"`
|
||||
}
|
||||
|
||||
type Magnets []magnetInfo
|
||||
|
||||
type TorrentInfoResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Magnets magnetInfo `json:"magnets"`
|
||||
} `json:"data"`
|
||||
Error *errorResponse `json:"error"`
|
||||
}
|
||||
|
||||
type TorrentsListResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Magnets Magnets `json:"magnets"`
|
||||
} `json:"data"`
|
||||
Error *errorResponse `json:"error"`
|
||||
}
|
||||
|
||||
type UploadMagnetResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Magnets []struct {
|
||||
Magnet string `json:"magnet"`
|
||||
Hash string `json:"hash"`
|
||||
Name string `json:"name"`
|
||||
FilenameOriginal string `json:"filename_original"`
|
||||
Size int64 `json:"size"`
|
||||
Ready bool `json:"ready"`
|
||||
ID int `json:"id"`
|
||||
} `json:"magnets"`
|
||||
}
|
||||
Error *errorResponse `json:"error"`
|
||||
}
|
||||
|
||||
type DownloadLink struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Link string `json:"link"`
|
||||
Host string `json:"host"`
|
||||
Filename string `json:"filename"`
|
||||
Streaming []interface{} `json:"streaming"`
|
||||
Paws bool `json:"paws"`
|
||||
Filesize int `json:"filesize"`
|
||||
Id string `json:"id"`
|
||||
Path []struct {
|
||||
Name string `json:"n"`
|
||||
Size int `json:"s"`
|
||||
} `json:"path"`
|
||||
} `json:"data"`
|
||||
Error *errorResponse `json:"error"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom unmarshaling for Magnets type
|
||||
// It can handle both an array of magnetInfo objects or a map with string keys.
|
||||
// If the input is an array, it will be unmarshaled directly into the Magnets slice.
|
||||
// If the input is a map, it will extract the values and append them to the Magnets slice.
|
||||
// If the input is neither, it will return an error.
|
||||
func (m *Magnets) UnmarshalJSON(data []byte) error {
|
||||
// Try to unmarshal as array
|
||||
var arr []magnetInfo
|
||||
if err := json.Unmarshal(data, &arr); err == nil {
|
||||
*m = arr
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try to unmarshal as map
|
||||
var obj map[string]magnetInfo
|
||||
if err := json.Unmarshal(data, &obj); err == nil {
|
||||
for _, v := range obj {
|
||||
*m = append(*m, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("magnets: unsupported JSON format")
|
||||
}
|
||||
481
pkg/debrid/providers/debrid_link/debrid_link.go
Normal file
@@ -0,0 +1,481 @@
|
||||
package debrid_link
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"time"
|
||||
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DebridLink struct {
|
||||
name string
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
accounts *types.Accounts
|
||||
DownloadUncached bool
|
||||
client *request.Client
|
||||
|
||||
autoExpiresLinksAfter time.Duration
|
||||
|
||||
MountPath string
|
||||
logger zerolog.Logger
|
||||
checkCached bool
|
||||
addSamples bool
|
||||
}
|
||||
|
||||
func New(dc config.Debrid) (*DebridLink, error) {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
_log := logger.New(dc.Name)
|
||||
client := request.New(
|
||||
request.WithHeaders(headers),
|
||||
request.WithLogger(_log),
|
||||
request.WithRateLimiter(rl),
|
||||
request.WithProxy(dc.Proxy),
|
||||
)
|
||||
|
||||
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
|
||||
if autoExpiresLinksAfter == 0 || err != nil {
|
||||
autoExpiresLinksAfter = 48 * time.Hour
|
||||
}
|
||||
return &DebridLink{
|
||||
name: "debridlink",
|
||||
Host: "https://debrid-link.com/api/v2",
|
||||
APIKey: dc.APIKey,
|
||||
accounts: types.NewAccounts(dc),
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||
client: client,
|
||||
MountPath: dc.Folder,
|
||||
logger: logger.New(dc.Name),
|
||||
checkCached: dc.CheckCached,
|
||||
addSamples: dc.AddSamples,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetProfile() (*types.Profile, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) Name() string {
|
||||
return dl.name
|
||||
}
|
||||
|
||||
func (dl *DebridLink) Logger() zerolog.Logger {
|
||||
return dl.logger
|
||||
}
|
||||
|
||||
func (dl *DebridLink) IsAvailable(hashes []string) map[string]bool {
|
||||
// Check if the infohashes are available in the local cache
|
||||
result := make(map[string]bool)
|
||||
|
||||
// Divide hashes into groups of 100
|
||||
for i := 0; i < len(hashes); i += 100 {
|
||||
end := i + 100
|
||||
if end > len(hashes) {
|
||||
end = len(hashes)
|
||||
}
|
||||
|
||||
// Filter out empty strings
|
||||
validHashes := make([]string, 0, end-i)
|
||||
for _, hash := range hashes[i:end] {
|
||||
if hash != "" {
|
||||
validHashes = append(validHashes, hash)
|
||||
}
|
||||
}
|
||||
|
||||
// If no valid hashes in this batch, continue to the next batch
|
||||
if len(validHashes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
hashStr := strings.Join(validHashes, ",")
|
||||
url := fmt.Sprintf("%s/seedbox/cached/%s", dl.Host, hashStr)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := dl.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
dl.logger.Error().Err(err).Msgf("Error checking availability")
|
||||
return result
|
||||
}
|
||||
var data AvailableResponse
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
dl.logger.Error().Err(err).Msgf("Error marshalling availability")
|
||||
return result
|
||||
}
|
||||
if data.Value == nil {
|
||||
return result
|
||||
}
|
||||
value := *data.Value
|
||||
for _, h := range hashes[i:end] {
|
||||
_, exists := value[h]
|
||||
if exists {
|
||||
result[h] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetTorrent(torrentId string) (*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/seedbox/%s", dl.Host, torrentId)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := dl.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res torrentInfo
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !res.Success || res.Value == nil {
|
||||
return nil, fmt.Errorf("error getting torrent")
|
||||
}
|
||||
data := *res.Value
|
||||
|
||||
if len(data) == 0 {
|
||||
return nil, fmt.Errorf("torrent not found")
|
||||
}
|
||||
t := data[0]
|
||||
name := utils.RemoveInvalidChars(t.Name)
|
||||
torrent := &types.Torrent{
|
||||
Id: t.ID,
|
||||
Name: name,
|
||||
Bytes: t.TotalSize,
|
||||
Status: "downloaded",
|
||||
Filename: name,
|
||||
OriginalFilename: name,
|
||||
MountPath: dl.MountPath,
|
||||
Debrid: dl.name,
|
||||
Added: time.Unix(t.Created, 0).Format(time.RFC3339),
|
||||
}
|
||||
cfg := config.Get()
|
||||
for _, f := range t.Files {
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
continue
|
||||
}
|
||||
file := types.File{
|
||||
TorrentId: t.ID,
|
||||
Id: f.ID,
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Path: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
}
|
||||
torrent.Files[file.Name] = file
|
||||
}
|
||||
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
|
||||
url := fmt.Sprintf("%s/seedbox/list?ids=%s", dl.Host, t.Id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := dl.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var res torrentInfo
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !res.Success {
|
||||
return fmt.Errorf("error getting torrent")
|
||||
}
|
||||
if res.Value == nil {
|
||||
return fmt.Errorf("torrent not found")
|
||||
}
|
||||
dt := *res.Value
|
||||
|
||||
if len(dt) == 0 {
|
||||
return fmt.Errorf("torrent not found")
|
||||
}
|
||||
data := dt[0]
|
||||
status := "downloading"
|
||||
if data.Status == 100 {
|
||||
status = "downloaded"
|
||||
}
|
||||
name := utils.RemoveInvalidChars(data.Name)
|
||||
t.Id = data.ID
|
||||
t.Name = name
|
||||
t.Bytes = data.TotalSize
|
||||
t.Folder = name
|
||||
t.Progress = data.DownloadPercent
|
||||
t.Status = status
|
||||
t.Speed = data.DownloadSpeed
|
||||
t.Seeders = data.PeersConnected
|
||||
t.Filename = name
|
||||
t.OriginalFilename = name
|
||||
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
|
||||
cfg := config.Get()
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
now := time.Now()
|
||||
for _, f := range data.Files {
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
continue
|
||||
}
|
||||
file := types.File{
|
||||
TorrentId: t.Id,
|
||||
Id: f.ID,
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Path: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
}
|
||||
link := &types.DownloadLink{
|
||||
Filename: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
DownloadLink: f.DownloadURL,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
|
||||
}
|
||||
links[file.Link] = link
|
||||
file.DownloadLink = link
|
||||
t.Files[f.Name] = file
|
||||
}
|
||||
|
||||
dl.accounts.SetDownloadLinks(links)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/seedbox/add", dl.Host)
|
||||
payload := map[string]string{"url": t.Magnet.Link}
|
||||
jsonPayload, _ := json.Marshal(payload)
|
||||
req, _ := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(jsonPayload))
|
||||
resp, err := dl.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res SubmitTorrentInfo
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !res.Success || res.Value == nil {
|
||||
return nil, fmt.Errorf("error adding torrent")
|
||||
}
|
||||
data := *res.Value
|
||||
status := "downloading"
|
||||
name := utils.RemoveInvalidChars(data.Name)
|
||||
t.Id = data.ID
|
||||
t.Name = name
|
||||
t.Bytes = data.TotalSize
|
||||
t.Folder = name
|
||||
t.Progress = data.DownloadPercent
|
||||
t.Status = status
|
||||
t.Speed = data.DownloadSpeed
|
||||
t.Seeders = data.PeersConnected
|
||||
t.Filename = name
|
||||
t.OriginalFilename = name
|
||||
t.MountPath = dl.MountPath
|
||||
t.Debrid = dl.name
|
||||
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
|
||||
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
now := time.Now()
|
||||
for _, f := range data.Files {
|
||||
file := types.File{
|
||||
TorrentId: t.Id,
|
||||
Id: f.ID,
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Path: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
Generated: now,
|
||||
}
|
||||
link := &types.DownloadLink{
|
||||
Filename: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
DownloadLink: f.DownloadURL,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
|
||||
}
|
||||
links[file.Link] = link
|
||||
file.DownloadLink = link
|
||||
t.Files[f.Name] = file
|
||||
}
|
||||
|
||||
dl.accounts.SetDownloadLinks(links)
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
|
||||
for {
|
||||
err := dl.UpdateTorrent(torrent)
|
||||
if err != nil || torrent == nil {
|
||||
return torrent, err
|
||||
}
|
||||
status := torrent.Status
|
||||
if status == "downloaded" {
|
||||
dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||
return torrent, nil
|
||||
} else if utils.Contains(dl.GetDownloadingStatus(), status) {
|
||||
if !torrent.DownloadUncached {
|
||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||
}
|
||||
// Break out of the loop if the torrent is downloading.
|
||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||
return torrent, nil
|
||||
} else {
|
||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (dl *DebridLink) DeleteTorrent(torrentId string) error {
|
||||
url := fmt.Sprintf("%s/seedbox/%s/remove", dl.Host, torrentId)
|
||||
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||
if _, err := dl.client.MakeRequest(req); err != nil {
|
||||
return err
|
||||
}
|
||||
dl.logger.Info().Msgf("Torrent: %s deleted from DebridLink", torrentId)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
// Download links are already generated
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
||||
return dl.accounts.GetDownloadLink(file.Link)
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetDownloadingStatus() []string {
|
||||
return []string{"downloading"}
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetDownloadUncached() bool {
|
||||
return dl.DownloadUncached
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetTorrents() ([]*types.Torrent, error) {
|
||||
page := 0
|
||||
perPage := 100
|
||||
torrents := make([]*types.Torrent, 0)
|
||||
for {
|
||||
t, err := dl.getTorrents(page, perPage)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(t) == 0 {
|
||||
break
|
||||
}
|
||||
torrents = append(torrents, t...)
|
||||
page++
|
||||
}
|
||||
return torrents, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/seedbox/list?page=%d&perPage=%d", dl.Host, page, perPage)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := dl.client.MakeRequest(req)
|
||||
torrents := make([]*types.Torrent, 0)
|
||||
if err != nil {
|
||||
return torrents, err
|
||||
}
|
||||
var res torrentInfo
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
dl.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
|
||||
return torrents, err
|
||||
}
|
||||
|
||||
data := *res.Value
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
|
||||
if len(data) == 0 {
|
||||
return torrents, nil
|
||||
}
|
||||
for _, t := range data {
|
||||
if t.Status != 100 {
|
||||
continue
|
||||
}
|
||||
torrent := &types.Torrent{
|
||||
Id: t.ID,
|
||||
Name: t.Name,
|
||||
Bytes: t.TotalSize,
|
||||
Status: "downloaded",
|
||||
Filename: t.Name,
|
||||
OriginalFilename: t.Name,
|
||||
InfoHash: t.HashString,
|
||||
Files: make(map[string]types.File),
|
||||
Debrid: dl.name,
|
||||
MountPath: dl.MountPath,
|
||||
Added: time.Unix(t.Created, 0).Format(time.RFC3339),
|
||||
}
|
||||
cfg := config.Get()
|
||||
now := time.Now()
|
||||
for _, f := range t.Files {
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
continue
|
||||
}
|
||||
file := types.File{
|
||||
TorrentId: torrent.Id,
|
||||
Id: f.ID,
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Path: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
}
|
||||
link := &types.DownloadLink{
|
||||
Filename: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
DownloadLink: f.DownloadURL,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
|
||||
}
|
||||
links[file.Link] = link
|
||||
file.DownloadLink = link
|
||||
torrent.Files[f.Name] = file
|
||||
}
|
||||
torrents = append(torrents, torrent)
|
||||
}
|
||||
dl.accounts.SetDownloadLinks(links)
|
||||
|
||||
return torrents, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) CheckLink(link string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetMountPath() string {
|
||||
return dl.MountPath
|
||||
}
|
||||
|
||||
func (dl *DebridLink) DeleteDownloadLink(linkId string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetAvailableSlots() (int, error) {
|
||||
//TODO: Implement the logic to check available slots for DebridLink
|
||||
return 0, fmt.Errorf("GetAvailableSlots not implemented for DebridLink")
|
||||
}
|
||||
|
||||
func (dl *DebridLink) Accounts() *types.Accounts {
|
||||
return dl.accounts
|
||||
}
|
||||
45
pkg/debrid/providers/debrid_link/types.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package debrid_link
|
||||
|
||||
type APIResponse[T any] struct {
|
||||
Success bool `json:"success"`
|
||||
Value *T `json:"value"` // Use pointer to allow nil
|
||||
}
|
||||
|
||||
type AvailableResponse APIResponse[map[string]map[string]struct {
|
||||
Name string `json:"name"`
|
||||
HashString string `json:"hashString"`
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
Size int `json:"size"`
|
||||
} `json:"files"`
|
||||
}]
|
||||
|
||||
type _torrentInfo struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
HashString string `json:"hashString"`
|
||||
UploadRatio float64 `json:"uploadRatio"`
|
||||
ServerID string `json:"serverId"`
|
||||
Wait bool `json:"wait"`
|
||||
PeersConnected int `json:"peersConnected"`
|
||||
Status int `json:"status"`
|
||||
TotalSize int64 `json:"totalSize"`
|
||||
Files []struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
DownloadURL string `json:"downloadUrl"`
|
||||
Size int64 `json:"size"`
|
||||
DownloadPercent int `json:"downloadPercent"`
|
||||
} `json:"files"`
|
||||
Trackers []struct {
|
||||
Announce string `json:"announce"`
|
||||
} `json:"trackers"`
|
||||
Created int64 `json:"created"`
|
||||
DownloadPercent float64 `json:"downloadPercent"`
|
||||
DownloadSpeed int64 `json:"downloadSpeed"`
|
||||
UploadSpeed int64 `json:"uploadSpeed"`
|
||||
}
|
||||
|
||||
type torrentInfo APIResponse[[]_torrentInfo]
|
||||
|
||||
type SubmitTorrentInfo APIResponse[_torrentInfo]
|
||||
1
pkg/debrid/providers/realdebrid/misc.go
Normal file
@@ -0,0 +1 @@
|
||||
package realdebrid
|
||||
964
pkg/debrid/providers/realdebrid/realdebrid.go
Normal file
@@ -0,0 +1,964 @@
|
||||
package realdebrid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"io"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/rar"
|
||||
)
|
||||
|
||||
type RealDebrid struct {
|
||||
name string
|
||||
Host string `json:"host"`
|
||||
|
||||
APIKey string
|
||||
accounts *types.Accounts
|
||||
|
||||
DownloadUncached bool
|
||||
client *request.Client
|
||||
downloadClient *request.Client
|
||||
repairClient *request.Client
|
||||
autoExpiresLinksAfter time.Duration
|
||||
|
||||
MountPath string
|
||||
logger zerolog.Logger
|
||||
UnpackRar bool
|
||||
|
||||
rarSemaphore chan struct{}
|
||||
checkCached bool
|
||||
addSamples bool
|
||||
Profile *types.Profile
|
||||
minimumFreeSlot int // Minimum number of active pots to maintain (used for cached stuffs, etc.)
|
||||
limit int
|
||||
}
|
||||
|
||||
func New(dc config.Debrid) (*RealDebrid, error) {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
repairRl := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
|
||||
downloadRl := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
|
||||
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
}
|
||||
_log := logger.New(dc.Name)
|
||||
|
||||
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
|
||||
if autoExpiresLinksAfter == 0 || err != nil {
|
||||
autoExpiresLinksAfter = 48 * time.Hour
|
||||
}
|
||||
|
||||
r := &RealDebrid{
|
||||
name: "realdebrid",
|
||||
Host: "https://api.real-debrid.com/rest/1.0",
|
||||
APIKey: dc.APIKey,
|
||||
accounts: types.NewAccounts(dc),
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||
UnpackRar: dc.UnpackRar,
|
||||
client: request.New(
|
||||
request.WithHeaders(headers),
|
||||
request.WithRateLimiter(rl),
|
||||
request.WithLogger(_log),
|
||||
request.WithMaxRetries(10),
|
||||
request.WithRetryableStatus(429, 502),
|
||||
request.WithProxy(dc.Proxy),
|
||||
),
|
||||
downloadClient: request.New(
|
||||
request.WithRateLimiter(downloadRl),
|
||||
request.WithLogger(_log),
|
||||
request.WithMaxRetries(10),
|
||||
request.WithRetryableStatus(429, 447, 502),
|
||||
request.WithProxy(dc.Proxy),
|
||||
),
|
||||
repairClient: request.New(
|
||||
request.WithRateLimiter(repairRl),
|
||||
request.WithHeaders(headers),
|
||||
request.WithLogger(_log),
|
||||
request.WithMaxRetries(4),
|
||||
request.WithRetryableStatus(429, 502),
|
||||
request.WithProxy(dc.Proxy),
|
||||
),
|
||||
MountPath: dc.Folder,
|
||||
logger: logger.New(dc.Name),
|
||||
rarSemaphore: make(chan struct{}, 2),
|
||||
checkCached: dc.CheckCached,
|
||||
addSamples: dc.AddSamples,
|
||||
minimumFreeSlot: dc.MinimumFreeSlot,
|
||||
limit: dc.Limit,
|
||||
}
|
||||
|
||||
if _, err := r.GetProfile(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return r, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RealDebrid) Name() string {
|
||||
return r.name
|
||||
}
|
||||
|
||||
func (r *RealDebrid) Logger() zerolog.Logger {
|
||||
return r.logger
|
||||
}
|
||||
|
||||
func (r *RealDebrid) getSelectedFiles(t *types.Torrent, data torrentInfo) (map[string]types.File, error) {
|
||||
files := make(map[string]types.File)
|
||||
selectedFiles := make([]types.File, 0)
|
||||
|
||||
for _, f := range data.Files {
|
||||
if f.Selected == 1 {
|
||||
selectedFiles = append(selectedFiles, types.File{
|
||||
TorrentId: t.Id,
|
||||
Name: filepath.Base(f.Path),
|
||||
Path: filepath.Base(f.Path),
|
||||
Size: f.Bytes,
|
||||
Id: strconv.Itoa(f.ID),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(selectedFiles) == 0 {
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// Handle RARed torrents (single link, multiple files)
|
||||
if len(data.Links) == 1 && len(selectedFiles) > 1 {
|
||||
return r.handleRarArchive(t, data, selectedFiles)
|
||||
}
|
||||
|
||||
// Standard case - map files to links
|
||||
if len(selectedFiles) > len(data.Links) {
|
||||
r.logger.Warn().Msgf("More files than links available: %d files, %d links for %s", len(selectedFiles), len(data.Links), t.Name)
|
||||
}
|
||||
|
||||
for i, f := range selectedFiles {
|
||||
if i < len(data.Links) {
|
||||
f.Link = data.Links[i]
|
||||
files[f.Name] = f
|
||||
} else {
|
||||
r.logger.Warn().Str("file", f.Name).Msg("No link available for file")
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// handleRarArchive processes RAR archives with multiple files
|
||||
func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, selectedFiles []types.File) (map[string]types.File, error) {
|
||||
// This will block if 2 RAR operations are already in progress
|
||||
r.rarSemaphore <- struct{}{}
|
||||
defer func() {
|
||||
<-r.rarSemaphore
|
||||
}()
|
||||
|
||||
files := make(map[string]types.File)
|
||||
|
||||
if !r.UnpackRar {
|
||||
r.logger.Debug().Msgf("RAR file detected, but unpacking is disabled: %s", t.Name)
|
||||
// Create a single file representing the RAR archive
|
||||
file := types.File{
|
||||
TorrentId: t.Id,
|
||||
Id: "0",
|
||||
Name: t.Name + ".rar",
|
||||
Size: 0,
|
||||
IsRar: true,
|
||||
ByteRange: nil,
|
||||
Path: t.Name + ".rar",
|
||||
Link: data.Links[0],
|
||||
Generated: time.Now(),
|
||||
}
|
||||
files[file.Name] = file
|
||||
return files, nil
|
||||
}
|
||||
|
||||
r.logger.Info().Msgf("RAR file detected, unpacking: %s", t.Name)
|
||||
linkFile := &types.File{TorrentId: t.Id, Link: data.Links[0]}
|
||||
downloadLinkObj, err := r.GetDownloadLink(t, linkFile)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get download link for RAR file: %w", err)
|
||||
}
|
||||
|
||||
dlLink := downloadLinkObj.DownloadLink
|
||||
reader, err := rar.NewReader(dlLink)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create RAR reader: %w", err)
|
||||
}
|
||||
|
||||
rarFiles, err := reader.GetFiles()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read RAR files: %w", err)
|
||||
}
|
||||
|
||||
// Create lookup map for faster matching
|
||||
fileMap := make(map[string]*types.File)
|
||||
for i := range selectedFiles {
|
||||
// RD converts special chars to '_' for RAR file paths
|
||||
// @TODO: there might be more special chars to replace
|
||||
safeName := strings.NewReplacer("|", "_", "\"", "_", "\\", "_", "?", "_", "*", "_", ":", "_", "<", "_", ">", "_").Replace(selectedFiles[i].Name)
|
||||
fileMap[safeName] = &selectedFiles[i]
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
for _, rarFile := range rarFiles {
|
||||
if file, exists := fileMap[rarFile.Name()]; exists {
|
||||
file.IsRar = true
|
||||
file.ByteRange = rarFile.ByteRange()
|
||||
file.Link = data.Links[0]
|
||||
file.Generated = now
|
||||
files[file.Name] = *file
|
||||
} else if !rarFile.IsDirectory {
|
||||
r.logger.Warn().Msgf("RAR file %s not found in torrent files", rarFile.Name())
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// getTorrentFiles returns a list of torrent files from the torrent info
|
||||
// validate is used to determine if the files should be validated
|
||||
// if validate is false, selected files will be returned
|
||||
func (r *RealDebrid) getTorrentFiles(t *types.Torrent, data torrentInfo) map[string]types.File {
|
||||
files := make(map[string]types.File)
|
||||
cfg := config.Get()
|
||||
idx := 0
|
||||
|
||||
for _, f := range data.Files {
|
||||
name := filepath.Base(f.Path)
|
||||
if !r.addSamples && utils.IsSampleFile(f.Path) {
|
||||
// Skip sample files
|
||||
continue
|
||||
}
|
||||
|
||||
if !cfg.IsAllowedFile(name) {
|
||||
continue
|
||||
}
|
||||
if !cfg.IsSizeAllowed(f.Bytes) {
|
||||
continue
|
||||
}
|
||||
|
||||
file := types.File{
|
||||
TorrentId: t.Id,
|
||||
Name: name,
|
||||
Path: name,
|
||||
Size: f.Bytes,
|
||||
Id: strconv.Itoa(f.ID),
|
||||
}
|
||||
files[name] = file
|
||||
idx++
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func (r *RealDebrid) IsAvailable(hashes []string) map[string]bool {
|
||||
// Check if the infohashes are available in the local cache
|
||||
result := make(map[string]bool)
|
||||
|
||||
// Divide hashes into groups of 100
|
||||
for i := 0; i < len(hashes); i += 200 {
|
||||
end := i + 200
|
||||
if end > len(hashes) {
|
||||
end = len(hashes)
|
||||
}
|
||||
|
||||
// Filter out empty strings
|
||||
validHashes := make([]string, 0, end-i)
|
||||
for _, hash := range hashes[i:end] {
|
||||
if hash != "" {
|
||||
validHashes = append(validHashes, hash)
|
||||
}
|
||||
}
|
||||
|
||||
// If no valid hashes in this batch, continue to the next batch
|
||||
if len(validHashes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
hashStr := strings.Join(validHashes, "/")
|
||||
url := fmt.Sprintf("%s/torrents/instantAvailability/%s", r.Host, hashStr)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
r.logger.Error().Err(err).Msgf("Error checking availability")
|
||||
return result
|
||||
}
|
||||
var data AvailabilityResponse
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
r.logger.Error().Err(err).Msgf("Error marshalling availability")
|
||||
return result
|
||||
}
|
||||
for _, h := range hashes[i:end] {
|
||||
hosters, exists := data[strings.ToLower(h)]
|
||||
if exists && len(hosters.Rd) > 0 {
|
||||
result[h] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *RealDebrid) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
|
||||
if t.Magnet.IsTorrent() {
|
||||
return r.addTorrent(t)
|
||||
}
|
||||
return r.addMagnet(t)
|
||||
}
|
||||
|
||||
func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents/addTorrent", r.Host)
|
||||
var data AddMagnetSchema
|
||||
req, err := http.NewRequest(http.MethodPut, url, bytes.NewReader(t.Magnet.File))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/x-bittorrent")
|
||||
resp, err := r.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||
// Handle multiple_downloads
|
||||
|
||||
if resp.StatusCode == 509 {
|
||||
return nil, utils.TooManyActiveDownloadsError
|
||||
}
|
||||
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading response body: %w", err)
|
||||
}
|
||||
if err = json.Unmarshal(bodyBytes, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.Id = data.Id
|
||||
t.Debrid = r.name
|
||||
t.MountPath = r.MountPath
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) addMagnet(t *types.Torrent) (*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents/addMagnet", r.Host)
|
||||
payload := gourl.Values{
|
||||
"magnet": {t.Magnet.Link},
|
||||
}
|
||||
var data AddMagnetSchema
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||
resp, err := r.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||
// Handle multiple_downloads
|
||||
|
||||
if resp.StatusCode == 509 {
|
||||
return nil, utils.TooManyActiveDownloadsError
|
||||
}
|
||||
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading response body: %w", err)
|
||||
}
|
||||
if err = json.Unmarshal(bodyBytes, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.Id = data.Id
|
||||
t.Debrid = r.name
|
||||
t.MountPath = r.MountPath
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, torrentId)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := r.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading response body: %w", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, utils.TorrentNotFoundError
|
||||
}
|
||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
var data torrentInfo
|
||||
err = json.Unmarshal(bodyBytes, &data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := &types.Torrent{
|
||||
Id: data.ID,
|
||||
Name: data.Filename,
|
||||
Bytes: data.Bytes,
|
||||
Folder: data.OriginalFilename,
|
||||
Progress: data.Progress,
|
||||
Speed: data.Speed,
|
||||
Seeders: data.Seeders,
|
||||
Added: data.Added,
|
||||
Status: data.Status,
|
||||
Filename: data.Filename,
|
||||
OriginalFilename: data.OriginalFilename,
|
||||
Links: data.Links,
|
||||
Debrid: r.name,
|
||||
MountPath: r.MountPath,
|
||||
}
|
||||
t.Files = r.getTorrentFiles(t, data) // Get selected files
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
|
||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := r.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading response body: %w", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return utils.TorrentNotFoundError
|
||||
}
|
||||
return fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
var data torrentInfo
|
||||
err = json.Unmarshal(bodyBytes, &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Name = data.Filename
|
||||
t.Bytes = data.Bytes
|
||||
t.Folder = data.OriginalFilename
|
||||
t.Progress = data.Progress
|
||||
t.Status = data.Status
|
||||
t.Speed = data.Speed
|
||||
t.Seeders = data.Seeders
|
||||
t.Filename = data.Filename
|
||||
t.OriginalFilename = data.OriginalFilename
|
||||
t.Links = data.Links
|
||||
t.MountPath = r.MountPath
|
||||
t.Debrid = r.name
|
||||
t.Added = data.Added
|
||||
t.Files, _ = r.getSelectedFiles(t, data) // Get selected files
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) CheckStatus(t *types.Torrent) (*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
for {
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
r.logger.Info().Msgf("ERROR Checking file: %v", err)
|
||||
return t, err
|
||||
}
|
||||
var data torrentInfo
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return t, err
|
||||
}
|
||||
status := data.Status
|
||||
t.Name = data.Filename // Important because some magnet changes the name
|
||||
t.Folder = data.OriginalFilename
|
||||
t.Filename = data.Filename
|
||||
t.OriginalFilename = data.OriginalFilename
|
||||
t.Bytes = data.Bytes
|
||||
t.Progress = data.Progress
|
||||
t.Speed = data.Speed
|
||||
t.Seeders = data.Seeders
|
||||
t.Links = data.Links
|
||||
t.Status = status
|
||||
t.Debrid = r.name
|
||||
t.MountPath = r.MountPath
|
||||
if status == "waiting_files_selection" {
|
||||
t.Files = r.getTorrentFiles(t, data)
|
||||
if len(t.Files) == 0 {
|
||||
return t, fmt.Errorf("no valid files found")
|
||||
}
|
||||
filesId := make([]string, 0)
|
||||
for _, f := range t.Files {
|
||||
filesId = append(filesId, f.Id)
|
||||
}
|
||||
p := gourl.Values{
|
||||
"files": {strings.Join(filesId, ",")},
|
||||
}
|
||||
payload := strings.NewReader(p.Encode())
|
||||
req, _ := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, t.Id), payload)
|
||||
res, err := r.client.Do(req)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
if res.StatusCode != http.StatusNoContent {
|
||||
if res.StatusCode == 509 {
|
||||
return nil, utils.TooManyActiveDownloadsError
|
||||
}
|
||||
return t, fmt.Errorf("realdebrid API error: Status: %d", res.StatusCode)
|
||||
}
|
||||
} else if status == "downloaded" {
|
||||
t.Files, err = r.getSelectedFiles(t, data) // Get selected files
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
|
||||
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
|
||||
return t, nil
|
||||
} else if utils.Contains(r.GetDownloadingStatus(), status) {
|
||||
if !t.DownloadUncached {
|
||||
return t, fmt.Errorf("torrent: %s not cached", t.Name)
|
||||
}
|
||||
return t, nil
|
||||
} else {
|
||||
return t, fmt.Errorf("torrent: %s has error: %s", t.Name, status)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RealDebrid) DeleteTorrent(torrentId string) error {
|
||||
url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrentId)
|
||||
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||
if _, err := r.client.MakeRequest(req); err != nil {
|
||||
return err
|
||||
}
|
||||
r.logger.Info().Msgf("Torrent: %s deleted from RD", torrentId)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
var firstErr error
|
||||
|
||||
files := make(map[string]types.File)
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
|
||||
_files := t.GetFiles()
|
||||
wg.Add(len(_files))
|
||||
|
||||
for _, f := range _files {
|
||||
go func(file types.File) {
|
||||
defer wg.Done()
|
||||
|
||||
link, err := r.GetDownloadLink(t, &file)
|
||||
if err != nil {
|
||||
mu.Lock()
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
if link == nil {
|
||||
mu.Lock()
|
||||
if firstErr == nil {
|
||||
firstErr = fmt.Errorf("realdebrid API error: download link not found for file %s", file.Name)
|
||||
}
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
file.DownloadLink = link
|
||||
|
||||
mu.Lock()
|
||||
files[file.Name] = file
|
||||
links[link.Link] = link
|
||||
mu.Unlock()
|
||||
}(f)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if firstErr != nil {
|
||||
return firstErr
|
||||
}
|
||||
|
||||
// Add links to cache
|
||||
r.accounts.SetDownloadLinks(links)
|
||||
t.Files = files
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) CheckLink(link string) error {
|
||||
url := fmt.Sprintf("%s/unrestrict/check", r.Host)
|
||||
payload := gourl.Values{
|
||||
"link": {link},
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||
resp, err := r.repairClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return utils.HosterUnavailableError // File has been removed
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, error) {
|
||||
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
||||
_link := file.Link
|
||||
if strings.HasPrefix(file.Link, "https://real-debrid.com/d/") && len(file.Link) > 39 {
|
||||
_link = file.Link[0:39]
|
||||
}
|
||||
payload := gourl.Values{
|
||||
"link": {_link},
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||
resp, err := r.downloadClient.Do(req)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// Read the response body to get the error message
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data ErrorResponse
|
||||
if err = json.Unmarshal(b, &data); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling %d || %s \n %s", resp.StatusCode, err, string(b))
|
||||
}
|
||||
switch data.ErrorCode {
|
||||
case 19:
|
||||
return nil, utils.HosterUnavailableError // File has been removed
|
||||
case 23:
|
||||
return nil, utils.TrafficExceededError
|
||||
case 24:
|
||||
return nil, utils.HosterUnavailableError // Link has been nerfed
|
||||
case 34:
|
||||
return nil, utils.TrafficExceededError // traffic exceeded
|
||||
case 35:
|
||||
return nil, utils.HosterUnavailableError
|
||||
case 36:
|
||||
return nil, utils.TrafficExceededError // traffic exceeded
|
||||
default:
|
||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
|
||||
}
|
||||
}
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data UnrestrictResponse
|
||||
if err = json.Unmarshal(b, &data); err != nil {
|
||||
return nil, fmt.Errorf("realdebrid API error: Error unmarshalling response: %w", err)
|
||||
}
|
||||
if data.Download == "" {
|
||||
return nil, fmt.Errorf("realdebrid API error: download link not found")
|
||||
}
|
||||
now := time.Now()
|
||||
return &types.DownloadLink{
|
||||
Filename: data.Filename,
|
||||
Size: data.Filesize,
|
||||
Link: data.Link,
|
||||
DownloadLink: data.Download,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(r.autoExpiresLinksAfter),
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
||||
|
||||
accounts := r.accounts.All()
|
||||
|
||||
for _, account := range accounts {
|
||||
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", account.Token))
|
||||
downloadLink, err := r._getDownloadLink(file)
|
||||
|
||||
if err == nil {
|
||||
return downloadLink, nil
|
||||
}
|
||||
|
||||
retries := 0
|
||||
if errors.Is(err, utils.TrafficExceededError) {
|
||||
// Retries generating
|
||||
retries = 5
|
||||
} else {
|
||||
// If the error is not traffic exceeded, return the error
|
||||
return nil, err
|
||||
}
|
||||
backOff := 1 * time.Second
|
||||
for retries > 0 {
|
||||
downloadLink, err = r._getDownloadLink(file)
|
||||
if err == nil {
|
||||
return downloadLink, nil
|
||||
}
|
||||
if !errors.Is(err, utils.TrafficExceededError) {
|
||||
return nil, err
|
||||
}
|
||||
// Add a delay before retrying
|
||||
time.Sleep(backOff)
|
||||
backOff *= 2 // Exponential backoff
|
||||
retries--
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("realdebrid API error: download link not found")
|
||||
}
|
||||
|
||||
func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents?limit=%d", r.Host, limit)
|
||||
torrents := make([]*types.Torrent, 0)
|
||||
if offset > 0 {
|
||||
url = fmt.Sprintf("%s&offset=%d", url, offset)
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := r.client.Do(req)
|
||||
|
||||
if err != nil {
|
||||
return 0, torrents, err
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusNoContent {
|
||||
return 0, torrents, nil
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
return 0, torrents, fmt.Errorf("realdebrid API error: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return 0, torrents, err
|
||||
}
|
||||
totalItems, _ := strconv.Atoi(resp.Header.Get("X-Total-Count"))
|
||||
var data []TorrentsResponse
|
||||
if err = json.Unmarshal(body, &data); err != nil {
|
||||
return 0, torrents, err
|
||||
}
|
||||
filenames := map[string]struct{}{}
|
||||
for _, t := range data {
|
||||
if t.Status != "downloaded" {
|
||||
continue
|
||||
}
|
||||
torrents = append(torrents, &types.Torrent{
|
||||
Id: t.Id,
|
||||
Name: t.Filename,
|
||||
Bytes: t.Bytes,
|
||||
Progress: t.Progress,
|
||||
Status: t.Status,
|
||||
Filename: t.Filename,
|
||||
OriginalFilename: t.Filename,
|
||||
Links: t.Links,
|
||||
Files: make(map[string]types.File),
|
||||
InfoHash: t.Hash,
|
||||
Debrid: r.name,
|
||||
MountPath: r.MountPath,
|
||||
Added: t.Added.Format(time.RFC3339),
|
||||
})
|
||||
filenames[t.Filename] = struct{}{}
|
||||
}
|
||||
return totalItems, torrents, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
|
||||
limit := 5000
|
||||
if r.limit != 0 {
|
||||
limit = r.limit
|
||||
}
|
||||
hardLimit := r.limit
|
||||
|
||||
// Get first batch and total count
|
||||
allTorrents := make([]*types.Torrent, 0)
|
||||
var fetchError error
|
||||
offset := 0
|
||||
for {
|
||||
// Fetch next batch of torrents
|
||||
_, torrents, err := r.getTorrents(offset, limit)
|
||||
if err != nil {
|
||||
fetchError = err
|
||||
break
|
||||
}
|
||||
totalTorrents := len(torrents)
|
||||
if totalTorrents == 0 {
|
||||
break
|
||||
}
|
||||
allTorrents = append(allTorrents, torrents...)
|
||||
offset += totalTorrents
|
||||
if hardLimit != 0 && len(allTorrents) >= hardLimit {
|
||||
// If hard limit is set, stop fetching more torrents
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fetchError != nil {
|
||||
return nil, fetchError
|
||||
}
|
||||
|
||||
return allTorrents, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
offset := 0
|
||||
limit := 1000
|
||||
|
||||
accounts := r.accounts.All()
|
||||
|
||||
if len(accounts) < 1 {
|
||||
// No active download keys. It's likely that the key has reached bandwidth limit
|
||||
return links, fmt.Errorf("no active download keys")
|
||||
}
|
||||
activeAccount := accounts[0]
|
||||
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", activeAccount.Token))
|
||||
for {
|
||||
dl, err := r._getDownloads(offset, limit)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(dl) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
for _, d := range dl {
|
||||
if _, exists := links[d.Link]; exists {
|
||||
// This is ordered by date, so we can skip the rest
|
||||
continue
|
||||
}
|
||||
links[d.Link] = &d
|
||||
}
|
||||
|
||||
offset += len(dl)
|
||||
}
|
||||
|
||||
return links, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink, error) {
|
||||
url := fmt.Sprintf("%s/downloads?limit=%d", r.Host, limit)
|
||||
if offset > 0 {
|
||||
url = fmt.Sprintf("%s&offset=%d", url, offset)
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := r.downloadClient.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data []DownloadsResponse
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
links := make([]types.DownloadLink, 0)
|
||||
for _, d := range data {
|
||||
links = append(links, types.DownloadLink{
|
||||
Filename: d.Filename,
|
||||
Size: d.Filesize,
|
||||
Link: d.Link,
|
||||
DownloadLink: d.Download,
|
||||
Generated: d.Generated,
|
||||
ExpiresAt: d.Generated.Add(r.autoExpiresLinksAfter),
|
||||
Id: d.Id,
|
||||
})
|
||||
|
||||
}
|
||||
return links, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadingStatus() []string {
|
||||
return []string{"downloading", "magnet_conversion", "queued", "compressing", "uploading"}
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadUncached() bool {
|
||||
return r.DownloadUncached
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetMountPath() string {
|
||||
return r.MountPath
|
||||
}
|
||||
|
||||
func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
|
||||
url := fmt.Sprintf("%s/downloads/delete/%s", r.Host, linkId)
|
||||
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||
if _, err := r.downloadClient.MakeRequest(req); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetProfile() (*types.Profile, error) {
|
||||
if r.Profile != nil {
|
||||
return r.Profile, nil
|
||||
}
|
||||
url := fmt.Sprintf("%s/user", r.Host)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data profileResponse
|
||||
if json.Unmarshal(resp, &data) != nil {
|
||||
return nil, err
|
||||
}
|
||||
profile := &types.Profile{
|
||||
Id: data.Id,
|
||||
Username: data.Username,
|
||||
Email: data.Email,
|
||||
Points: data.Points,
|
||||
Premium: data.Premium,
|
||||
Expiration: data.Expiration,
|
||||
Type: data.Type,
|
||||
}
|
||||
r.Profile = profile
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetAvailableSlots() (int, error) {
|
||||
url := fmt.Sprintf("%s/torrents/activeCount", r.Host)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
var data AvailableSlotsResponse
|
||||
if json.Unmarshal(resp, &data) != nil {
|
||||
return 0, fmt.Errorf("error unmarshalling available slots response: %w", err)
|
||||
}
|
||||
return data.TotalSlots - data.ActiveSlots - r.minimumFreeSlot, nil // Ensure we maintain minimum active pots
|
||||
}
|
||||
|
||||
func (r *RealDebrid) Accounts() *types.Accounts {
|
||||
return r.accounts
|
||||
}
|
||||
@@ -1,13 +1,14 @@
|
||||
package structs
|
||||
package realdebrid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RealDebridAvailabilityResponse map[string]Hoster
|
||||
type AvailabilityResponse map[string]Hoster
|
||||
|
||||
func (r *RealDebridAvailabilityResponse) UnmarshalJSON(data []byte) error {
|
||||
func (r *AvailabilityResponse) UnmarshalJSON(data []byte) error {
|
||||
// First, try to unmarshal as an object
|
||||
var objectData map[string]Hoster
|
||||
err := json.Unmarshal(data, &objectData)
|
||||
@@ -64,18 +65,18 @@ type FileVariant struct {
|
||||
Filesize int `json:"filesize"`
|
||||
}
|
||||
|
||||
type RealDebridAddMagnetSchema struct {
|
||||
type AddMagnetSchema struct {
|
||||
Id string `json:"id"`
|
||||
Uri string `json:"uri"`
|
||||
}
|
||||
|
||||
type RealDebridTorrentInfo struct {
|
||||
type torrentInfo struct {
|
||||
ID string `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
OriginalFilename string `json:"original_filename"`
|
||||
Hash string `json:"hash"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
OriginalBytes int `json:"original_bytes"`
|
||||
OriginalBytes int64 `json:"original_bytes"`
|
||||
Host string `json:"host"`
|
||||
Split int `json:"split"`
|
||||
Progress float64 `json:"progress"`
|
||||
@@ -84,7 +85,7 @@ type RealDebridTorrentInfo struct {
|
||||
Files []struct {
|
||||
ID int `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Bytes int `json:"bytes"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Selected int `json:"selected"`
|
||||
} `json:"files"`
|
||||
Links []string `json:"links"`
|
||||
@@ -93,15 +94,65 @@ type RealDebridTorrentInfo struct {
|
||||
Seeders int `json:"seeders,omitempty"`
|
||||
}
|
||||
|
||||
type RealDebridUnrestrictResponse struct {
|
||||
type UnrestrictResponse struct {
|
||||
Id string `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
MimeType string `json:"mimeType"`
|
||||
Filesize int64 `json:"filesize"`
|
||||
Link string `json:"link"`
|
||||
Host string `json:"host"`
|
||||
Chunks int64 `json:"chunks"`
|
||||
Crc int64 `json:"crc"`
|
||||
Chunks int `json:"chunks"`
|
||||
Crc int `json:"crc"`
|
||||
Download string `json:"download"`
|
||||
Streamable int `json:"streamable"`
|
||||
}
|
||||
|
||||
type TorrentsResponse struct {
|
||||
Id string `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
Hash string `json:"hash"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Host string `json:"host"`
|
||||
Split int64 `json:"split"`
|
||||
Progress float64 `json:"progress"`
|
||||
Status string `json:"status"`
|
||||
Added time.Time `json:"added"`
|
||||
Links []string `json:"links"`
|
||||
Ended time.Time `json:"ended"`
|
||||
}
|
||||
|
||||
type DownloadsResponse struct {
|
||||
Id string `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
MimeType string `json:"mimeType"`
|
||||
Filesize int64 `json:"filesize"`
|
||||
Link string `json:"link"`
|
||||
Host string `json:"host"`
|
||||
HostIcon string `json:"host_icon"`
|
||||
Chunks int64 `json:"chunks"`
|
||||
Download string `json:"download"`
|
||||
Streamable int `json:"streamable"`
|
||||
Generated time.Time `json:"generated"`
|
||||
}
|
||||
|
||||
type ErrorResponse struct {
|
||||
Error string `json:"error"`
|
||||
ErrorCode int `json:"error_code"`
|
||||
}
|
||||
|
||||
type profileResponse struct {
|
||||
Id int64 `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Email string `json:"email"`
|
||||
Points int64 `json:"points"`
|
||||
Locale string `json:"locale"`
|
||||
Avatar string `json:"avatar"`
|
||||
Type string `json:"type"`
|
||||
Premium int `json:"premium"`
|
||||
Expiration time.Time `json:"expiration"`
|
||||
}
|
||||
|
||||
type AvailableSlotsResponse struct {
|
||||
ActiveSlots int `json:"nb"`
|
||||
TotalSlots int `json:"limit"`
|
||||
}
|
||||
637
pkg/debrid/providers/torbox/torbox.go
Normal file
@@ -0,0 +1,637 @@
|
||||
package torbox
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"github.com/sirrobot01/decypharr/pkg/version"
|
||||
)
|
||||
|
||||
type Torbox struct {
|
||||
name string
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
accounts *types.Accounts
|
||||
autoExpiresLinksAfter time.Duration
|
||||
|
||||
DownloadUncached bool
|
||||
client *request.Client
|
||||
|
||||
MountPath string
|
||||
logger zerolog.Logger
|
||||
checkCached bool
|
||||
addSamples bool
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetProfile() (*types.Profile, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func New(dc config.Debrid) (*Torbox, error) {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
"User-Agent": fmt.Sprintf("Decypharr/%s (%s; %s)", version.GetInfo(), runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
_log := logger.New(dc.Name)
|
||||
client := request.New(
|
||||
request.WithHeaders(headers),
|
||||
request.WithRateLimiter(rl),
|
||||
request.WithLogger(_log),
|
||||
request.WithProxy(dc.Proxy),
|
||||
)
|
||||
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
|
||||
if autoExpiresLinksAfter == 0 || err != nil {
|
||||
autoExpiresLinksAfter = 48 * time.Hour
|
||||
}
|
||||
|
||||
return &Torbox{
|
||||
name: "torbox",
|
||||
Host: "https://api.torbox.app/v1",
|
||||
APIKey: dc.APIKey,
|
||||
accounts: types.NewAccounts(dc),
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||
client: client,
|
||||
MountPath: dc.Folder,
|
||||
logger: _log,
|
||||
checkCached: dc.CheckCached,
|
||||
addSamples: dc.AddSamples,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) Name() string {
|
||||
return tb.name
|
||||
}
|
||||
|
||||
func (tb *Torbox) Logger() zerolog.Logger {
|
||||
return tb.logger
|
||||
}
|
||||
|
||||
func (tb *Torbox) IsAvailable(hashes []string) map[string]bool {
|
||||
// Check if the infohashes are available in the local cache
|
||||
result := make(map[string]bool)
|
||||
|
||||
// Divide hashes into groups of 100
|
||||
for i := 0; i < len(hashes); i += 100 {
|
||||
end := i + 100
|
||||
if end > len(hashes) {
|
||||
end = len(hashes)
|
||||
}
|
||||
|
||||
// Filter out empty strings
|
||||
validHashes := make([]string, 0, end-i)
|
||||
for _, hash := range hashes[i:end] {
|
||||
if hash != "" {
|
||||
validHashes = append(validHashes, hash)
|
||||
}
|
||||
}
|
||||
|
||||
// If no valid hashes in this batch, continue to the next batch
|
||||
if len(validHashes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
hashStr := strings.Join(validHashes, ",")
|
||||
url := fmt.Sprintf("%s/api/torrents/checkcached?hash=%s", tb.Host, hashStr)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
tb.logger.Error().Err(err).Msgf("Error checking availability")
|
||||
return result
|
||||
}
|
||||
var res AvailableResponse
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
tb.logger.Error().Err(err).Msgf("Error marshalling availability")
|
||||
return result
|
||||
}
|
||||
if res.Data == nil {
|
||||
return result
|
||||
}
|
||||
|
||||
for h, c := range *res.Data {
|
||||
if c.Size > 0 {
|
||||
result[strings.ToUpper(h)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/api/torrents/createtorrent", tb.Host)
|
||||
payload := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(payload)
|
||||
_ = writer.WriteField("magnet", torrent.Magnet.Link)
|
||||
err := writer.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodPost, url, payload)
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data AddMagnetResponse
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if data.Data == nil {
|
||||
return nil, fmt.Errorf("error adding torrent")
|
||||
}
|
||||
dt := *data.Data
|
||||
torrentId := strconv.Itoa(dt.Id)
|
||||
torrent.Id = torrentId
|
||||
torrent.MountPath = tb.MountPath
|
||||
torrent.Debrid = tb.name
|
||||
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) getTorboxStatus(status string, finished bool) string {
|
||||
if finished {
|
||||
return "downloaded"
|
||||
}
|
||||
downloading := []string{"completed", "cached", "paused", "downloading", "uploading",
|
||||
"checkingResumeData", "metaDL", "pausedUP", "queuedUP", "checkingUP",
|
||||
"forcedUP", "allocating", "downloading", "metaDL", "pausedDL",
|
||||
"queuedDL", "checkingDL", "forcedDL", "checkingResumeData", "moving"}
|
||||
|
||||
var determinedStatus string
|
||||
switch {
|
||||
case utils.Contains(downloading, status):
|
||||
determinedStatus = "downloading"
|
||||
default:
|
||||
determinedStatus = "error"
|
||||
}
|
||||
|
||||
return determinedStatus
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", tb.Host, torrentId)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res InfoResponse
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := res.Data
|
||||
if data == nil {
|
||||
return nil, fmt.Errorf("error getting torrent")
|
||||
}
|
||||
t := &types.Torrent{
|
||||
Id: strconv.Itoa(data.Id),
|
||||
Name: data.Name,
|
||||
Bytes: data.Size,
|
||||
Folder: data.Name,
|
||||
Progress: data.Progress * 100,
|
||||
Status: tb.getTorboxStatus(data.DownloadState, data.DownloadFinished),
|
||||
Speed: data.DownloadSpeed,
|
||||
Seeders: data.Seeds,
|
||||
Filename: data.Name,
|
||||
OriginalFilename: data.Name,
|
||||
MountPath: tb.MountPath,
|
||||
Debrid: tb.name,
|
||||
Files: make(map[string]types.File),
|
||||
Added: data.CreatedAt.Format(time.RFC3339),
|
||||
}
|
||||
cfg := config.Get()
|
||||
|
||||
totalFiles := 0
|
||||
skippedSamples := 0
|
||||
skippedFileType := 0
|
||||
skippedSize := 0
|
||||
validFiles := 0
|
||||
filesWithLinks := 0
|
||||
|
||||
for _, f := range data.Files {
|
||||
totalFiles++
|
||||
fileName := filepath.Base(f.Name)
|
||||
|
||||
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
|
||||
skippedSamples++
|
||||
continue
|
||||
}
|
||||
if !cfg.IsAllowedFile(fileName) {
|
||||
skippedFileType++
|
||||
continue
|
||||
}
|
||||
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
skippedSize++
|
||||
continue
|
||||
}
|
||||
|
||||
validFiles++
|
||||
file := types.File{
|
||||
TorrentId: t.Id,
|
||||
Id: strconv.Itoa(f.Id),
|
||||
Name: fileName,
|
||||
Size: f.Size,
|
||||
Path: f.Name,
|
||||
}
|
||||
|
||||
// For downloaded torrents, set a placeholder link to indicate file is available
|
||||
if data.DownloadFinished {
|
||||
file.Link = fmt.Sprintf("torbox://%s/%d", t.Id, f.Id)
|
||||
filesWithLinks++
|
||||
}
|
||||
|
||||
t.Files[fileName] = file
|
||||
}
|
||||
|
||||
// Log summary only if there are issues or for debugging
|
||||
tb.logger.Debug().
|
||||
Str("torrent_id", t.Id).
|
||||
Str("torrent_name", t.Name).
|
||||
Bool("download_finished", data.DownloadFinished).
|
||||
Str("status", t.Status).
|
||||
Int("total_files", totalFiles).
|
||||
Int("valid_files", validFiles).
|
||||
Int("final_file_count", len(t.Files)).
|
||||
Msg("Torrent file processing completed")
|
||||
var cleanPath string
|
||||
if len(t.Files) > 0 {
|
||||
cleanPath = path.Clean(data.Files[0].Name)
|
||||
} else {
|
||||
cleanPath = path.Clean(data.Name)
|
||||
}
|
||||
|
||||
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
||||
t.Debrid = tb.name
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
|
||||
url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", tb.Host, t.Id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var res InfoResponse
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := res.Data
|
||||
name := data.Name
|
||||
|
||||
t.Name = name
|
||||
t.Bytes = data.Size
|
||||
t.Folder = name
|
||||
t.Progress = data.Progress * 100
|
||||
t.Status = tb.getTorboxStatus(data.DownloadState, data.DownloadFinished)
|
||||
t.Speed = data.DownloadSpeed
|
||||
t.Seeders = data.Seeds
|
||||
t.Filename = name
|
||||
t.OriginalFilename = name
|
||||
t.MountPath = tb.MountPath
|
||||
t.Debrid = tb.name
|
||||
|
||||
// Clear existing files map to rebuild it
|
||||
t.Files = make(map[string]types.File)
|
||||
|
||||
cfg := config.Get()
|
||||
validFiles := 0
|
||||
filesWithLinks := 0
|
||||
|
||||
for _, f := range data.Files {
|
||||
fileName := filepath.Base(f.Name)
|
||||
|
||||
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !cfg.IsAllowedFile(fileName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
continue
|
||||
}
|
||||
|
||||
validFiles++
|
||||
file := types.File{
|
||||
TorrentId: t.Id,
|
||||
Id: strconv.Itoa(f.Id),
|
||||
Name: fileName,
|
||||
Size: f.Size,
|
||||
Path: fileName,
|
||||
}
|
||||
|
||||
// For downloaded torrents, set a placeholder link to indicate file is available
|
||||
if data.DownloadFinished {
|
||||
file.Link = fmt.Sprintf("torbox://%s/%s", t.Id, strconv.Itoa(f.Id))
|
||||
filesWithLinks++
|
||||
}
|
||||
|
||||
t.Files[fileName] = file
|
||||
}
|
||||
|
||||
var cleanPath string
|
||||
if len(t.Files) > 0 {
|
||||
cleanPath = path.Clean(data.Files[0].Name)
|
||||
} else {
|
||||
cleanPath = path.Clean(data.Name)
|
||||
}
|
||||
|
||||
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
||||
t.Debrid = tb.name
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
|
||||
for {
|
||||
err := tb.UpdateTorrent(torrent)
|
||||
|
||||
if err != nil || torrent == nil {
|
||||
return torrent, err
|
||||
}
|
||||
status := torrent.Status
|
||||
if status == "downloaded" {
|
||||
tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||
return torrent, nil
|
||||
} else if utils.Contains(tb.GetDownloadingStatus(), status) {
|
||||
if !torrent.DownloadUncached {
|
||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||
}
|
||||
// Break out of the loop if the torrent is downloading.
|
||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||
return torrent, nil
|
||||
} else {
|
||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (tb *Torbox) DeleteTorrent(torrentId string) error {
|
||||
url := fmt.Sprintf("%s/api/torrents/controltorrent/%s", tb.Host, torrentId)
|
||||
payload := map[string]string{"torrent_id": torrentId, "action": "Delete"}
|
||||
jsonPayload, _ := json.Marshal(payload)
|
||||
req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(jsonPayload))
|
||||
if _, err := tb.client.MakeRequest(req); err != nil {
|
||||
return err
|
||||
}
|
||||
tb.logger.Info().Msgf("Torrent %s deleted from Torbox", torrentId)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
filesCh := make(chan types.File, len(t.Files))
|
||||
linkCh := make(chan *types.DownloadLink)
|
||||
errCh := make(chan error, len(t.Files))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(t.Files))
|
||||
for _, file := range t.Files {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
link, err := tb.GetDownloadLink(t, &file)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if link != nil {
|
||||
linkCh <- link
|
||||
file.DownloadLink = link
|
||||
}
|
||||
filesCh <- file
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(filesCh)
|
||||
close(linkCh)
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
// Collect results
|
||||
files := make(map[string]types.File, len(t.Files))
|
||||
for file := range filesCh {
|
||||
files[file.Name] = file
|
||||
}
|
||||
|
||||
// Collect download links
|
||||
for link := range linkCh {
|
||||
if link != nil {
|
||||
tb.accounts.SetDownloadLink(link.Link, link)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for errors
|
||||
for err := range errCh {
|
||||
if err != nil {
|
||||
return err // Return the first error encountered
|
||||
}
|
||||
}
|
||||
|
||||
t.Files = files
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
||||
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
|
||||
query := gourl.Values{}
|
||||
query.Add("torrent_id", t.Id)
|
||||
query.Add("token", tb.APIKey)
|
||||
query.Add("file_id", file.Id)
|
||||
url += "?" + query.Encode()
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
tb.logger.Error().
|
||||
Err(err).
|
||||
Str("torrent_id", t.Id).
|
||||
Str("file_id", file.Id).
|
||||
Msg("Failed to make request to Torbox API")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var data DownloadLinksResponse
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
tb.logger.Error().
|
||||
Err(err).
|
||||
Str("torrent_id", t.Id).
|
||||
Str("file_id", file.Id).
|
||||
Msg("Failed to unmarshal Torbox API response")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if data.Data == nil {
|
||||
tb.logger.Error().
|
||||
Str("torrent_id", t.Id).
|
||||
Str("file_id", file.Id).
|
||||
Bool("success", data.Success).
|
||||
Interface("error", data.Error).
|
||||
Str("detail", data.Detail).
|
||||
Msg("Torbox API returned no data")
|
||||
return nil, fmt.Errorf("error getting download links")
|
||||
}
|
||||
|
||||
link := *data.Data
|
||||
if link == "" {
|
||||
tb.logger.Error().
|
||||
Str("torrent_id", t.Id).
|
||||
Str("file_id", file.Id).
|
||||
Msg("Torbox API returned empty download link")
|
||||
return nil, fmt.Errorf("error getting download links")
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
downloadLink := &types.DownloadLink{
|
||||
Link: file.Link,
|
||||
DownloadLink: link,
|
||||
Id: file.Id,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(tb.autoExpiresLinksAfter),
|
||||
}
|
||||
|
||||
return downloadLink, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetDownloadingStatus() []string {
|
||||
return []string{"downloading"}
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/api/torrents/mylist", tb.Host)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var res TorrentsListResponse
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !res.Success || res.Data == nil {
|
||||
return nil, fmt.Errorf("torbox API error: %v", res.Error)
|
||||
}
|
||||
|
||||
torrents := make([]*types.Torrent, 0, len(*res.Data))
|
||||
cfg := config.Get()
|
||||
|
||||
for _, data := range *res.Data {
|
||||
t := &types.Torrent{
|
||||
Id: strconv.Itoa(data.Id),
|
||||
Name: data.Name,
|
||||
Bytes: data.Size,
|
||||
Folder: data.Name,
|
||||
Progress: data.Progress * 100,
|
||||
Status: tb.getTorboxStatus(data.DownloadState, data.DownloadFinished),
|
||||
Speed: data.DownloadSpeed,
|
||||
Seeders: data.Seeds,
|
||||
Filename: data.Name,
|
||||
OriginalFilename: data.Name,
|
||||
MountPath: tb.MountPath,
|
||||
Debrid: tb.name,
|
||||
Files: make(map[string]types.File),
|
||||
Added: data.CreatedAt.Format(time.RFC3339),
|
||||
InfoHash: data.Hash,
|
||||
}
|
||||
|
||||
// Process files
|
||||
for _, f := range data.Files {
|
||||
fileName := filepath.Base(f.Name)
|
||||
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
|
||||
// Skip sample files
|
||||
continue
|
||||
}
|
||||
if !cfg.IsAllowedFile(fileName) {
|
||||
continue
|
||||
}
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
continue
|
||||
}
|
||||
file := types.File{
|
||||
TorrentId: t.Id,
|
||||
Id: strconv.Itoa(f.Id),
|
||||
Name: fileName,
|
||||
Size: f.Size,
|
||||
Path: f.Name,
|
||||
}
|
||||
|
||||
// For downloaded torrents, set a placeholder link to indicate file is available
|
||||
if data.DownloadFinished {
|
||||
file.Link = fmt.Sprintf("torbox://%s/%d", t.Id, f.Id)
|
||||
}
|
||||
|
||||
t.Files[fileName] = file
|
||||
}
|
||||
|
||||
// Set original filename based on first file or torrent name
|
||||
var cleanPath string
|
||||
if len(t.Files) > 0 {
|
||||
cleanPath = path.Clean(data.Files[0].Name)
|
||||
} else {
|
||||
cleanPath = path.Clean(data.Name)
|
||||
}
|
||||
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
||||
|
||||
torrents = append(torrents, t)
|
||||
}
|
||||
|
||||
return torrents, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetDownloadUncached() bool {
|
||||
return tb.DownloadUncached
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) CheckLink(link string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetMountPath() string {
|
||||
return tb.MountPath
|
||||
}
|
||||
|
||||
func (tb *Torbox) DeleteDownloadLink(linkId string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetAvailableSlots() (int, error) {
|
||||
//TODO: Implement the logic to check available slots for Torbox
|
||||
return 0, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (tb *Torbox) Accounts() *types.Accounts {
|
||||
return tb.accounts
|
||||
}
|
||||
77
pkg/debrid/providers/torbox/types.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package torbox
|
||||
|
||||
import "time"
|
||||
|
||||
type APIResponse[T any] struct {
|
||||
Success bool `json:"success"`
|
||||
Error any `json:"error"`
|
||||
Detail string `json:"detail"`
|
||||
Data *T `json:"data"` // Use pointer to allow nil
|
||||
}
|
||||
|
||||
type AvailableResponse APIResponse[map[string]struct {
|
||||
Name string `json:"name"`
|
||||
Size int `json:"size"`
|
||||
Hash string `json:"hash"`
|
||||
}]
|
||||
|
||||
type AddMagnetResponse APIResponse[struct {
|
||||
Id int `json:"torrent_id"`
|
||||
Hash string `json:"hash"`
|
||||
}]
|
||||
|
||||
type torboxInfo struct {
|
||||
Id int `json:"id"`
|
||||
AuthId string `json:"auth_id"`
|
||||
Server int `json:"server"`
|
||||
Hash string `json:"hash"`
|
||||
Name string `json:"name"`
|
||||
Magnet interface{} `json:"magnet"`
|
||||
Size int64 `json:"size"`
|
||||
Active bool `json:"active"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
DownloadState string `json:"download_state"`
|
||||
Seeds int `json:"seeds"`
|
||||
Peers int `json:"peers"`
|
||||
Ratio float64 `json:"ratio"`
|
||||
Progress float64 `json:"progress"`
|
||||
DownloadSpeed int64 `json:"download_speed"`
|
||||
UploadSpeed int `json:"upload_speed"`
|
||||
Eta int `json:"eta"`
|
||||
TorrentFile bool `json:"torrent_file"`
|
||||
ExpiresAt interface{} `json:"expires_at"`
|
||||
DownloadPresent bool `json:"download_present"`
|
||||
Files []struct {
|
||||
Id int `json:"id"`
|
||||
Md5 interface{} `json:"md5"`
|
||||
Hash string `json:"hash"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Zipped bool `json:"zipped"`
|
||||
S3Path string `json:"s3_path"`
|
||||
Infected bool `json:"infected"`
|
||||
Mimetype string `json:"mimetype"`
|
||||
ShortName string `json:"short_name"`
|
||||
AbsolutePath string `json:"absolute_path"`
|
||||
} `json:"files"`
|
||||
DownloadPath string `json:"download_path"`
|
||||
InactiveCheck int `json:"inactive_check"`
|
||||
Availability float64 `json:"availability"`
|
||||
DownloadFinished bool `json:"download_finished"`
|
||||
Tracker interface{} `json:"tracker"`
|
||||
TotalUploaded int `json:"total_uploaded"`
|
||||
TotalDownloaded int `json:"total_downloaded"`
|
||||
Cached bool `json:"cached"`
|
||||
Owner string `json:"owner"`
|
||||
SeedTorrent bool `json:"seed_torrent"`
|
||||
AllowZipped bool `json:"allow_zipped"`
|
||||
LongTermSeeding bool `json:"long_term_seeding"`
|
||||
TrackerMessage interface{} `json:"tracker_message"`
|
||||
}
|
||||
|
||||
type InfoResponse APIResponse[torboxInfo]
|
||||
|
||||
type DownloadLinksResponse APIResponse[string]
|
||||
|
||||
type TorrentsListResponse APIResponse[[]torboxInfo]
|
||||
@@ -1,287 +0,0 @@
|
||||
package debrid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"goBlack/common"
|
||||
"goBlack/pkg/debrid/structs"
|
||||
"log"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type RealDebrid struct {
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
DownloadUncached bool
|
||||
client *common.RLHTTPClient
|
||||
cache *common.Cache
|
||||
MountPath string
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetMountPath() string {
|
||||
return r.MountPath
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetName() string {
|
||||
return "realdebrid"
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetLogger() *log.Logger {
|
||||
return r.logger
|
||||
}
|
||||
|
||||
func GetTorrentFiles(data structs.RealDebridTorrentInfo) []TorrentFile {
|
||||
files := make([]TorrentFile, 0)
|
||||
for _, f := range data.Files {
|
||||
name := filepath.Base(f.Path)
|
||||
if (!common.RegexMatch(common.VIDEOMATCH, name) &&
|
||||
!common.RegexMatch(common.SUBMATCH, name) &&
|
||||
!common.RegexMatch(common.MUSICMATCH, name)) || common.RegexMatch(common.SAMPLEMATCH, name) {
|
||||
continue
|
||||
}
|
||||
fileId := f.ID
|
||||
file := &TorrentFile{
|
||||
Name: name,
|
||||
Path: name,
|
||||
Size: int64(f.Bytes),
|
||||
Id: strconv.Itoa(fileId),
|
||||
}
|
||||
files = append(files, *file)
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool {
|
||||
// Check if the infohashes are available in the local cache
|
||||
hashes, result := GetLocalCache(infohashes, r.cache)
|
||||
|
||||
if len(hashes) == 0 {
|
||||
// Either all the infohashes are locally cached or none are
|
||||
r.cache.AddMultiple(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// Divide hashes into groups of 100
|
||||
for i := 0; i < len(hashes); i += 200 {
|
||||
end := i + 200
|
||||
if end > len(hashes) {
|
||||
end = len(hashes)
|
||||
}
|
||||
|
||||
// Filter out empty strings
|
||||
validHashes := make([]string, 0, end-i)
|
||||
for _, hash := range hashes[i:end] {
|
||||
if hash != "" {
|
||||
validHashes = append(validHashes, hash)
|
||||
}
|
||||
}
|
||||
|
||||
// If no valid hashes in this batch, continue to the next batch
|
||||
if len(validHashes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
hashStr := strings.Join(validHashes, "/")
|
||||
url := fmt.Sprintf("%s/torrents/instantAvailability/%s", r.Host, hashStr)
|
||||
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
log.Println("Error checking availability:", err)
|
||||
return result
|
||||
}
|
||||
var data structs.RealDebridAvailabilityResponse
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
log.Println("Error marshalling availability:", err)
|
||||
return result
|
||||
}
|
||||
for _, h := range hashes[i:end] {
|
||||
hosters, exists := data[strings.ToLower(h)]
|
||||
if exists && len(hosters.Rd) > 0 {
|
||||
result[h] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
r.cache.AddMultiple(result) // Add the results to the cache
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *RealDebrid) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents/addMagnet", r.Host)
|
||||
payload := gourl.Values{
|
||||
"magnet": {torrent.Magnet.Link},
|
||||
}
|
||||
var data structs.RealDebridAddMagnetSchema
|
||||
resp, err := r.client.MakeRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(resp, &data)
|
||||
log.Printf("Torrent: %s added with id: %s\n", torrent.Name, data.Id)
|
||||
torrent.Id = data.Id
|
||||
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetTorrent(id string) (*Torrent, error) {
|
||||
torrent := &Torrent{}
|
||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, id)
|
||||
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
var data structs.RealDebridTorrentInfo
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
name := common.RemoveInvalidChars(data.OriginalFilename)
|
||||
torrent.Id = id
|
||||
torrent.Name = name
|
||||
torrent.Bytes = data.Bytes
|
||||
torrent.Folder = name
|
||||
torrent.Progress = data.Progress
|
||||
torrent.Status = data.Status
|
||||
torrent.Speed = data.Speed
|
||||
torrent.Seeders = data.Seeders
|
||||
torrent.Filename = data.Filename
|
||||
torrent.OriginalFilename = data.OriginalFilename
|
||||
torrent.Links = data.Links
|
||||
files := GetTorrentFiles(data)
|
||||
torrent.Files = files
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, torrent.Id)
|
||||
for {
|
||||
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
log.Println("ERROR Checking file: ", err)
|
||||
return torrent, err
|
||||
}
|
||||
var data structs.RealDebridTorrentInfo
|
||||
err = json.Unmarshal(resp, &data)
|
||||
status := data.Status
|
||||
name := common.RemoveInvalidChars(data.OriginalFilename)
|
||||
torrent.Name = name // Important because some magnet changes the name
|
||||
torrent.Folder = name
|
||||
torrent.Filename = data.Filename
|
||||
torrent.OriginalFilename = data.OriginalFilename
|
||||
torrent.Bytes = data.Bytes
|
||||
torrent.Progress = data.Progress
|
||||
torrent.Speed = data.Speed
|
||||
torrent.Seeders = data.Seeders
|
||||
torrent.Links = data.Links
|
||||
torrent.Status = status
|
||||
if status == "error" || status == "dead" || status == "magnet_error" {
|
||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||
} else if status == "waiting_files_selection" {
|
||||
files := GetTorrentFiles(data)
|
||||
torrent.Files = files
|
||||
if len(files) == 0 {
|
||||
return torrent, fmt.Errorf("no video files found")
|
||||
}
|
||||
filesId := make([]string, 0)
|
||||
for _, f := range files {
|
||||
filesId = append(filesId, f.Id)
|
||||
}
|
||||
p := gourl.Values{
|
||||
"files": {strings.Join(filesId, ",")},
|
||||
}
|
||||
payload := strings.NewReader(p.Encode())
|
||||
_, err = r.client.MakeRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, torrent.Id), payload)
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
} else if status == "downloaded" {
|
||||
files := GetTorrentFiles(data)
|
||||
torrent.Files = files
|
||||
log.Printf("Torrent: %s downloaded to RD\n", torrent.Name)
|
||||
if !isSymlink {
|
||||
err = r.GetDownloadLinks(torrent)
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
} else if status == "downloading" {
|
||||
if !r.DownloadUncached {
|
||||
go r.DeleteTorrent(torrent)
|
||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||
}
|
||||
// Break out of the loop if the torrent is downloading.
|
||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) DeleteTorrent(torrent *Torrent) {
|
||||
url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrent.Id)
|
||||
_, err := r.client.MakeRequest(http.MethodDelete, url, nil)
|
||||
if err == nil {
|
||||
r.logger.Printf("Torrent: %s deleted\n", torrent.Name)
|
||||
} else {
|
||||
r.logger.Printf("Error deleting torrent: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadLinks(torrent *Torrent) error {
|
||||
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
||||
downloadLinks := make([]TorrentDownloadLinks, 0)
|
||||
for _, link := range torrent.Links {
|
||||
if link == "" {
|
||||
continue
|
||||
}
|
||||
payload := gourl.Values{
|
||||
"link": {link},
|
||||
}
|
||||
resp, err := r.client.MakeRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data structs.RealDebridUnrestrictResponse
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
download := TorrentDownloadLinks{
|
||||
Link: data.Link,
|
||||
Filename: data.Filename,
|
||||
DownloadLink: data.Download,
|
||||
}
|
||||
downloadLinks = append(downloadLinks, download)
|
||||
}
|
||||
torrent.DownloadLinks = downloadLinks
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadUncached() bool {
|
||||
return r.DownloadUncached
|
||||
}
|
||||
|
||||
func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
|
||||
rl := common.ParseRateLimit(dc.RateLimit)
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
}
|
||||
client := common.NewRLHTTPClient(rl, headers)
|
||||
logger := common.NewLogger(dc.Name, os.Stdout)
|
||||
return &RealDebrid{
|
||||
Host: dc.Host,
|
||||
APIKey: dc.APIKey,
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
client: client,
|
||||
cache: cache,
|
||||
MountPath: dc.Folder,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
907
pkg/debrid/store/cache.go
Normal file
@@ -0,0 +1,907 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"cmp"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/pkg/rclone"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
|
||||
"encoding/json"
|
||||
_ "time/tzdata"
|
||||
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
)
|
||||
|
||||
type WebDavFolderNaming string
|
||||
|
||||
const (
|
||||
WebDavUseFileName WebDavFolderNaming = "filename"
|
||||
WebDavUseOriginalName WebDavFolderNaming = "original"
|
||||
WebDavUseFileNameNoExt WebDavFolderNaming = "filename_no_ext"
|
||||
WebDavUseOriginalNameNoExt WebDavFolderNaming = "original_no_ext"
|
||||
WebDavUseID WebDavFolderNaming = "id"
|
||||
WebdavUseHash WebDavFolderNaming = "infohash"
|
||||
)
|
||||
|
||||
type CachedTorrent struct {
|
||||
*types.Torrent
|
||||
AddedOn time.Time `json:"added_on"`
|
||||
IsComplete bool `json:"is_complete"`
|
||||
Bad bool `json:"bad"`
|
||||
}
|
||||
|
||||
func (c CachedTorrent) copy() CachedTorrent {
|
||||
return CachedTorrent{
|
||||
Torrent: c.Torrent,
|
||||
AddedOn: c.AddedOn,
|
||||
IsComplete: c.IsComplete,
|
||||
Bad: c.Bad,
|
||||
}
|
||||
}
|
||||
|
||||
type RepairType string
|
||||
|
||||
const (
|
||||
RepairTypeReinsert RepairType = "reinsert"
|
||||
RepairTypeDelete RepairType = "delete"
|
||||
)
|
||||
|
||||
type RepairRequest struct {
|
||||
Type RepairType
|
||||
TorrentID string
|
||||
Priority int
|
||||
FileName string
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
dir string
|
||||
client types.Client
|
||||
logger zerolog.Logger
|
||||
|
||||
torrents *torrentCache
|
||||
invalidDownloadLinks sync.Map
|
||||
folderNaming WebDavFolderNaming
|
||||
|
||||
listingDebouncer *utils.Debouncer[bool]
|
||||
// monitors
|
||||
repairRequest sync.Map
|
||||
failedToReinsert sync.Map
|
||||
downloadLinkRequests sync.Map
|
||||
|
||||
// repair
|
||||
repairChan chan RepairRequest
|
||||
|
||||
// readiness
|
||||
ready chan struct{}
|
||||
|
||||
// config
|
||||
workers int
|
||||
torrentRefreshInterval string
|
||||
downloadLinksRefreshInterval string
|
||||
|
||||
// refresh mutex
|
||||
downloadLinksRefreshMu sync.RWMutex // for refreshing download links
|
||||
torrentsRefreshMu sync.RWMutex // for refreshing torrents
|
||||
|
||||
scheduler gocron.Scheduler
|
||||
cetScheduler gocron.Scheduler
|
||||
|
||||
saveSemaphore chan struct{}
|
||||
|
||||
config config.Debrid
|
||||
customFolders []string
|
||||
mounter *rclone.Mount
|
||||
}
|
||||
|
||||
func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount) *Cache {
|
||||
cfg := config.Get()
|
||||
cet, err := time.LoadLocation("CET")
|
||||
if err != nil {
|
||||
cet, err = time.LoadLocation("Europe/Berlin") // Fallback to Berlin if CET fails
|
||||
if err != nil {
|
||||
cet = time.FixedZone("CET", 1*60*60) // Fallback to a fixed CET zone
|
||||
}
|
||||
}
|
||||
cetSc, err := gocron.NewScheduler(gocron.WithLocation(cet))
|
||||
if err != nil {
|
||||
// If we can't create a CET scheduler, fallback to local time
|
||||
cetSc, _ = gocron.NewScheduler(gocron.WithLocation(time.Local))
|
||||
}
|
||||
scheduler, err := gocron.NewScheduler(gocron.WithLocation(time.Local))
|
||||
if err != nil {
|
||||
// If we can't create a local scheduler, fallback to CET
|
||||
scheduler = cetSc
|
||||
}
|
||||
|
||||
var customFolders []string
|
||||
dirFilters := map[string][]directoryFilter{}
|
||||
for name, value := range dc.Directories {
|
||||
for filterType, v := range value.Filters {
|
||||
df := directoryFilter{filterType: filterType, value: v}
|
||||
switch filterType {
|
||||
case filterByRegex, filterByNotRegex:
|
||||
df.regex = regexp.MustCompile(v)
|
||||
case filterBySizeGT, filterBySizeLT:
|
||||
df.sizeThreshold, _ = config.ParseSize(v)
|
||||
case filterBLastAdded:
|
||||
df.ageThreshold, _ = time.ParseDuration(v)
|
||||
}
|
||||
dirFilters[name] = append(dirFilters[name], df)
|
||||
}
|
||||
customFolders = append(customFolders, name)
|
||||
|
||||
}
|
||||
_log := logger.New(fmt.Sprintf("%s-webdav", client.Name()))
|
||||
c := &Cache{
|
||||
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
|
||||
|
||||
torrents: newTorrentCache(dirFilters),
|
||||
client: client,
|
||||
logger: _log,
|
||||
workers: dc.Workers,
|
||||
torrentRefreshInterval: dc.TorrentsRefreshInterval,
|
||||
downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval,
|
||||
folderNaming: WebDavFolderNaming(dc.FolderNaming),
|
||||
saveSemaphore: make(chan struct{}, 50),
|
||||
cetScheduler: cetSc,
|
||||
scheduler: scheduler,
|
||||
|
||||
config: dc,
|
||||
customFolders: customFolders,
|
||||
mounter: mounter,
|
||||
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
|
||||
c.listingDebouncer = utils.NewDebouncer[bool](100*time.Millisecond, func(refreshRclone bool) {
|
||||
c.RefreshListings(refreshRclone)
|
||||
})
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Cache) IsReady() chan struct{} {
|
||||
return c.ready
|
||||
}
|
||||
|
||||
func (c *Cache) StreamWithRclone() bool {
|
||||
return c.config.ServeFromRclone
|
||||
}
|
||||
|
||||
// Reset clears all internal state so the Cache can be reused without leaks.
|
||||
// Call this after stopping the old Cache (so no goroutines are holding references),
|
||||
// and before you discard the instance on a restart.
|
||||
func (c *Cache) Reset() {
|
||||
|
||||
// Unmount first
|
||||
if c.mounter != nil && c.mounter.IsMounted() {
|
||||
if err := c.mounter.Unmount(); err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to unmount %s", c.config.Name)
|
||||
} else {
|
||||
c.logger.Info().Msgf("Unmounted %s", c.config.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.scheduler.StopJobs(); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to stop scheduler jobs")
|
||||
}
|
||||
|
||||
if err := c.scheduler.Shutdown(); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to stop scheduler")
|
||||
}
|
||||
|
||||
// Stop the listing debouncer
|
||||
c.listingDebouncer.Stop()
|
||||
|
||||
// Close the repair channel
|
||||
if c.repairChan != nil {
|
||||
close(c.repairChan)
|
||||
}
|
||||
|
||||
// 1. Reset torrent storage
|
||||
c.torrents.reset()
|
||||
|
||||
// 3. Clear any sync.Maps
|
||||
c.invalidDownloadLinks = sync.Map{}
|
||||
c.repairRequest = sync.Map{}
|
||||
c.failedToReinsert = sync.Map{}
|
||||
c.downloadLinkRequests = sync.Map{}
|
||||
|
||||
// 5. Rebuild the listing debouncer
|
||||
c.listingDebouncer = utils.NewDebouncer[bool](
|
||||
100*time.Millisecond,
|
||||
func(refreshRclone bool) {
|
||||
c.RefreshListings(refreshRclone)
|
||||
},
|
||||
)
|
||||
|
||||
// 6. Reset repair channel so the next Start() can spin it up
|
||||
c.repairChan = make(chan RepairRequest, 100)
|
||||
|
||||
// Reset the ready channel
|
||||
c.ready = make(chan struct{})
|
||||
}
|
||||
|
||||
func (c *Cache) Start(ctx context.Context) error {
|
||||
if err := os.MkdirAll(c.dir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create cache directory: %w", err)
|
||||
}
|
||||
|
||||
c.logger.Info().Msgf("Started indexing...")
|
||||
|
||||
if err := c.Sync(ctx); err != nil {
|
||||
return fmt.Errorf("failed to sync cache: %w", err)
|
||||
}
|
||||
// Fire the ready channel
|
||||
close(c.ready)
|
||||
c.logger.Info().Msgf("Indexing complete, %d torrents loaded", len(c.torrents.getAll()))
|
||||
|
||||
// initial download links
|
||||
go c.refreshDownloadLinks(ctx)
|
||||
|
||||
if err := c.StartSchedule(ctx); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to start cache worker")
|
||||
}
|
||||
|
||||
c.repairChan = make(chan RepairRequest, 100) // Initialize the repair channel, max 100 requests buffered
|
||||
go c.repairWorker(ctx)
|
||||
|
||||
cfg := config.Get()
|
||||
name := c.client.Name()
|
||||
addr := cfg.BindAddress + ":" + cfg.Port + cfg.URLBase + "webdav/" + name + "/"
|
||||
c.logger.Info().Msgf("%s WebDav server running at %s", name, addr)
|
||||
|
||||
if c.mounter != nil {
|
||||
if err := c.mounter.Mount(ctx); err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to mount %s", c.config.Name)
|
||||
}
|
||||
} else {
|
||||
c.logger.Warn().Msgf("Mounting is disabled for %s", c.config.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) load(ctx context.Context) (map[string]CachedTorrent, error) {
|
||||
mu := sync.Mutex{}
|
||||
|
||||
if err := os.MkdirAll(c.dir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create cache directory: %w", err)
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(c.dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read cache directory: %w", err)
|
||||
}
|
||||
|
||||
// Get only json files
|
||||
var jsonFiles []os.DirEntry
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && filepath.Ext(file.Name()) == ".json" {
|
||||
jsonFiles = append(jsonFiles, file)
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonFiles) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Create channels with appropriate buffering
|
||||
workChan := make(chan os.DirEntry, min(c.workers, len(jsonFiles)))
|
||||
|
||||
// Create a wait group for workers
|
||||
var wg sync.WaitGroup
|
||||
|
||||
torrents := make(map[string]CachedTorrent, len(jsonFiles))
|
||||
|
||||
// Start workers
|
||||
for i := 0; i < c.workers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for {
|
||||
file, ok := <-workChan
|
||||
if !ok {
|
||||
return // Channel closed, exit goroutine
|
||||
}
|
||||
|
||||
fileName := file.Name()
|
||||
filePath := filepath.Join(c.dir, fileName)
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to read file: %s", filePath)
|
||||
continue
|
||||
}
|
||||
|
||||
var ct CachedTorrent
|
||||
if err := json.Unmarshal(data, &ct); err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to unmarshal file: %s", filePath)
|
||||
continue
|
||||
}
|
||||
|
||||
isComplete := true
|
||||
if len(ct.GetFiles()) != 0 {
|
||||
// Check if all files are valid, if not, delete the file.json and remove from cache.
|
||||
fs := make(map[string]types.File, len(ct.GetFiles()))
|
||||
for _, f := range ct.GetFiles() {
|
||||
if f.Link == "" {
|
||||
isComplete = false
|
||||
break
|
||||
}
|
||||
f.TorrentId = ct.Id
|
||||
fs[f.Name] = f
|
||||
}
|
||||
|
||||
if isComplete {
|
||||
|
||||
if addedOn, err := time.Parse(time.RFC3339, ct.Added); err == nil {
|
||||
ct.AddedOn = addedOn
|
||||
}
|
||||
ct.IsComplete = true
|
||||
ct.Files = fs
|
||||
ct.Name = path.Clean(ct.Name)
|
||||
mu.Lock()
|
||||
torrents[ct.Id] = ct
|
||||
mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Feed work to workers
|
||||
for _, file := range jsonFiles {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break // Context cancelled
|
||||
default:
|
||||
workChan <- file
|
||||
}
|
||||
}
|
||||
|
||||
// Signal workers that no more work is coming
|
||||
close(workChan)
|
||||
|
||||
// Wait for all workers to complete
|
||||
wg.Wait()
|
||||
|
||||
return torrents, nil
|
||||
}
|
||||
|
||||
func (c *Cache) Sync(ctx context.Context) error {
|
||||
cachedTorrents, err := c.load(ctx)
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to load cache")
|
||||
}
|
||||
|
||||
torrents, err := c.client.GetTorrents()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sync torrents: %v", err)
|
||||
}
|
||||
|
||||
totalTorrents := len(torrents)
|
||||
|
||||
c.logger.Info().Msgf("%d torrents found from %s", totalTorrents, c.client.Name())
|
||||
|
||||
newTorrents := make([]*types.Torrent, 0)
|
||||
idStore := make(map[string]struct{}, totalTorrents)
|
||||
for _, t := range torrents {
|
||||
idStore[t.Id] = struct{}{}
|
||||
if _, ok := cachedTorrents[t.Id]; !ok {
|
||||
newTorrents = append(newTorrents, t)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for deleted torrents
|
||||
deletedTorrents := make([]string, 0)
|
||||
for _, t := range cachedTorrents {
|
||||
if _, ok := idStore[t.Id]; !ok {
|
||||
deletedTorrents = append(deletedTorrents, t.Id)
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletedTorrents) > 0 {
|
||||
c.logger.Info().Msgf("Found %d deleted torrents", len(deletedTorrents))
|
||||
for _, id := range deletedTorrents {
|
||||
// Remove from cache and debrid service
|
||||
delete(cachedTorrents, id)
|
||||
// Remove the json file from disk
|
||||
c.removeFile(id, false)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Write these torrents to the cache
|
||||
c.setTorrents(cachedTorrents, func() {
|
||||
c.listingDebouncer.Call(false)
|
||||
}) // Initial calls
|
||||
c.logger.Info().Msgf("Loaded %d torrents from cache", len(cachedTorrents))
|
||||
|
||||
if len(newTorrents) > 0 {
|
||||
c.logger.Info().Msgf("Found %d new torrents", len(newTorrents))
|
||||
if err := c.sync(ctx, newTorrents); err != nil {
|
||||
return fmt.Errorf("failed to sync torrents: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) sync(ctx context.Context, torrents []*types.Torrent) error {
|
||||
|
||||
// Create channels with appropriate buffering
|
||||
workChan := make(chan *types.Torrent, min(c.workers, len(torrents)))
|
||||
|
||||
// Use an atomic counter for progress tracking
|
||||
var processed int64
|
||||
var errorCount int64
|
||||
|
||||
// Create a wait group for workers
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Start workers
|
||||
for i := 0; i < c.workers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case t, ok := <-workChan:
|
||||
if !ok {
|
||||
return // Channel closed, exit goroutine
|
||||
}
|
||||
|
||||
if err := c.ProcessTorrent(t); err != nil {
|
||||
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("sync error")
|
||||
atomic.AddInt64(&errorCount, 1)
|
||||
}
|
||||
|
||||
count := atomic.AddInt64(&processed, 1)
|
||||
if count%1000 == 0 {
|
||||
c.logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents))
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return // Context cancelled, exit goroutine
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Feed work to workers
|
||||
for _, t := range torrents {
|
||||
select {
|
||||
case workChan <- t:
|
||||
// Work sent successfully
|
||||
case <-ctx.Done():
|
||||
break // Context cancelled
|
||||
}
|
||||
}
|
||||
|
||||
// Signal workers that no more work is coming
|
||||
close(workChan)
|
||||
|
||||
// Wait for all workers to complete
|
||||
wg.Wait()
|
||||
|
||||
c.listingDebouncer.Call(false) // final refresh
|
||||
c.logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) GetTorrentFolder(torrent *types.Torrent) string {
|
||||
switch c.folderNaming {
|
||||
case WebDavUseFileName:
|
||||
return path.Clean(torrent.Filename)
|
||||
case WebDavUseOriginalName:
|
||||
return path.Clean(torrent.OriginalFilename)
|
||||
case WebDavUseFileNameNoExt:
|
||||
return path.Clean(utils.RemoveExtension(torrent.Filename))
|
||||
case WebDavUseOriginalNameNoExt:
|
||||
return path.Clean(utils.RemoveExtension(torrent.OriginalFilename))
|
||||
case WebDavUseID:
|
||||
return torrent.Id
|
||||
case WebdavUseHash:
|
||||
return strings.ToLower(torrent.InfoHash)
|
||||
default:
|
||||
return path.Clean(torrent.Filename)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) setTorrent(t CachedTorrent, callback func(torrent CachedTorrent)) {
|
||||
torrentName := c.GetTorrentFolder(t.Torrent)
|
||||
updatedTorrent := t.copy()
|
||||
if o, ok := c.torrents.getByName(torrentName); ok && o.Id != t.Id {
|
||||
// If another torrent with the same name exists, merge the files, if the same file exists,
|
||||
// keep the one with the most recent added date
|
||||
|
||||
// Save the most recent torrent
|
||||
mergedFiles := mergeFiles(o, updatedTorrent) // Useful for merging files across multiple torrents, while keeping the most recent
|
||||
updatedTorrent.Files = mergedFiles
|
||||
}
|
||||
c.torrents.set(torrentName, t, updatedTorrent)
|
||||
go c.SaveTorrent(t)
|
||||
if callback != nil {
|
||||
go callback(updatedTorrent)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) setTorrents(torrents map[string]CachedTorrent, callback func()) {
|
||||
for _, t := range torrents {
|
||||
torrentName := c.GetTorrentFolder(t.Torrent)
|
||||
updatedTorrent := t.copy()
|
||||
if o, ok := c.torrents.getByName(torrentName); ok && o.Id != t.Id {
|
||||
// Save the most recent torrent
|
||||
mergedFiles := mergeFiles(o, updatedTorrent)
|
||||
updatedTorrent.Files = mergedFiles
|
||||
}
|
||||
c.torrents.set(torrentName, t, updatedTorrent)
|
||||
}
|
||||
c.SaveTorrents()
|
||||
if callback != nil {
|
||||
callback()
|
||||
}
|
||||
}
|
||||
|
||||
// GetListing returns a sorted list of torrents(READ-ONLY)
|
||||
func (c *Cache) GetListing(folder string) []os.FileInfo {
|
||||
switch folder {
|
||||
case "__all__", "torrents":
|
||||
return c.torrents.getListing()
|
||||
default:
|
||||
return c.torrents.getFolderListing(folder)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) GetCustomFolders() []string {
|
||||
return c.customFolders
|
||||
}
|
||||
|
||||
func (c *Cache) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) GetTorrents() map[string]CachedTorrent {
|
||||
return c.torrents.getAll()
|
||||
}
|
||||
|
||||
func (c *Cache) TotalTorrents() int {
|
||||
return c.torrents.getAllCount()
|
||||
}
|
||||
|
||||
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
|
||||
if torrent, ok := c.torrents.getByName(name); ok {
|
||||
return &torrent
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) GetTorrentsName() map[string]CachedTorrent {
|
||||
return c.torrents.getAllByName()
|
||||
}
|
||||
|
||||
func (c *Cache) GetTorrent(torrentId string) *CachedTorrent {
|
||||
if torrent, ok := c.torrents.getByID(torrentId); ok {
|
||||
return &torrent
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) SaveTorrents() {
|
||||
torrents := c.torrents.getAll()
|
||||
for _, torrent := range torrents {
|
||||
c.SaveTorrent(torrent)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) SaveTorrent(ct CachedTorrent) {
|
||||
marshaled, err := json.MarshalIndent(ct, "", " ")
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to marshal torrent: %s", ct.Id)
|
||||
return
|
||||
}
|
||||
|
||||
// Store just the essential info needed for the file operation
|
||||
saveInfo := struct {
|
||||
id string
|
||||
jsonData []byte
|
||||
}{
|
||||
id: ct.Torrent.Id,
|
||||
jsonData: marshaled,
|
||||
}
|
||||
|
||||
// Try to acquire semaphore without blocking
|
||||
select {
|
||||
case c.saveSemaphore <- struct{}{}:
|
||||
go func() {
|
||||
defer func() { <-c.saveSemaphore }()
|
||||
c.saveTorrent(saveInfo.id, saveInfo.jsonData)
|
||||
}()
|
||||
default:
|
||||
c.saveTorrent(saveInfo.id, saveInfo.jsonData)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) saveTorrent(id string, data []byte) {
|
||||
|
||||
fileName := id + ".json"
|
||||
filePath := filepath.Join(c.dir, fileName)
|
||||
|
||||
// Use a unique temporary filename for concurrent safety
|
||||
tmpFile := filePath + ".tmp." + strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||
|
||||
f, err := os.Create(tmpFile)
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to create file: %s", tmpFile)
|
||||
return
|
||||
}
|
||||
|
||||
// Track if we've closed the file
|
||||
fileClosed := false
|
||||
defer func() {
|
||||
// Only close if not already closed
|
||||
if !fileClosed {
|
||||
_ = f.Close()
|
||||
}
|
||||
// Clean up the temp file if it still exists and rename failed
|
||||
_ = os.Remove(tmpFile)
|
||||
}()
|
||||
|
||||
w := bufio.NewWriter(f)
|
||||
if _, err := w.Write(data); err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to write data: %s", tmpFile)
|
||||
return
|
||||
}
|
||||
|
||||
if err := w.Flush(); err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to flush data: %s", tmpFile)
|
||||
return
|
||||
}
|
||||
|
||||
// Close the file before renaming
|
||||
_ = f.Close()
|
||||
fileClosed = true
|
||||
|
||||
if err := os.Rename(tmpFile, filePath); err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to rename file: %s", tmpFile)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) ProcessTorrent(t *types.Torrent) error {
|
||||
|
||||
isComplete := func(files map[string]types.File) bool {
|
||||
_complete := len(files) > 0
|
||||
for _, file := range files {
|
||||
if file.Link == "" {
|
||||
_complete = false
|
||||
break
|
||||
}
|
||||
}
|
||||
return _complete
|
||||
}
|
||||
|
||||
if !isComplete(t.Files) {
|
||||
if err := c.client.UpdateTorrent(t); err != nil {
|
||||
return fmt.Errorf("failed to update torrent: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !isComplete(t.Files) {
|
||||
c.logger.Debug().
|
||||
Str("torrent_id", t.Id).
|
||||
Str("torrent_name", t.Name).
|
||||
Int("total_files", len(t.Files)).
|
||||
Msg("Torrent still not complete after refresh")
|
||||
} else {
|
||||
|
||||
addedOn, err := time.Parse(time.RFC3339, t.Added)
|
||||
if err != nil {
|
||||
addedOn = time.Now()
|
||||
}
|
||||
ct := CachedTorrent{
|
||||
Torrent: t,
|
||||
IsComplete: len(t.Files) > 0,
|
||||
AddedOn: addedOn,
|
||||
}
|
||||
c.setTorrent(ct, func(tor CachedTorrent) {
|
||||
c.listingDebouncer.Call(false)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) Add(t *types.Torrent) error {
|
||||
if len(t.Files) == 0 {
|
||||
c.logger.Warn().Msgf("Torrent %s has no files to add. Refreshing", t.Id)
|
||||
if err := c.client.UpdateTorrent(t); err != nil {
|
||||
return fmt.Errorf("failed to update torrent: %w", err)
|
||||
}
|
||||
}
|
||||
addedOn, err := time.Parse(time.RFC3339, t.Added)
|
||||
if err != nil {
|
||||
addedOn = time.Now()
|
||||
}
|
||||
ct := CachedTorrent{
|
||||
Torrent: t,
|
||||
IsComplete: len(t.Files) > 0,
|
||||
AddedOn: addedOn,
|
||||
}
|
||||
c.setTorrent(ct, func(tor CachedTorrent) {
|
||||
c.RefreshListings(true)
|
||||
})
|
||||
go c.GetFileDownloadLinks(ct)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (c *Cache) Client() types.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
func (c *Cache) DeleteTorrent(id string) error {
|
||||
c.torrentsRefreshMu.Lock()
|
||||
defer c.torrentsRefreshMu.Unlock()
|
||||
|
||||
if c.deleteTorrent(id, true) {
|
||||
go c.RefreshListings(true)
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) validateAndDeleteTorrents(torrents []string) {
|
||||
wg := sync.WaitGroup{}
|
||||
for _, torrent := range torrents {
|
||||
wg.Add(1)
|
||||
go func(t string) {
|
||||
defer wg.Done()
|
||||
// Check if torrent is truly deleted
|
||||
if _, err := c.client.GetTorrent(t); err != nil {
|
||||
c.deleteTorrent(t, false) // Since it's removed from debrid already
|
||||
}
|
||||
}(torrent)
|
||||
}
|
||||
wg.Wait()
|
||||
c.listingDebouncer.Call(true)
|
||||
}
|
||||
|
||||
// deleteTorrent deletes the torrent from the cache and debrid service
|
||||
// It also handles torrents with the same name but different IDs
|
||||
func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool {
|
||||
|
||||
if torrent, ok := c.torrents.getByID(id); ok {
|
||||
c.torrents.removeId(id) // Delete id from cache
|
||||
defer func() {
|
||||
c.removeFile(id, false)
|
||||
if removeFromDebrid {
|
||||
_ = c.client.DeleteTorrent(id) // Skip error handling, we don't care if it fails
|
||||
}
|
||||
}() // defer delete from debrid
|
||||
|
||||
torrentName := c.GetTorrentFolder(torrent.Torrent)
|
||||
|
||||
if t, ok := c.torrents.getByName(torrentName); ok {
|
||||
|
||||
newFiles := map[string]types.File{}
|
||||
newId := ""
|
||||
for _, file := range t.GetFiles() {
|
||||
if file.TorrentId != "" && file.TorrentId != id {
|
||||
if newId == "" && file.TorrentId != "" {
|
||||
newId = file.TorrentId
|
||||
}
|
||||
newFiles[file.Name] = file
|
||||
}
|
||||
}
|
||||
if len(newFiles) == 0 {
|
||||
// Delete the torrent since no files are left
|
||||
c.torrents.remove(torrentName)
|
||||
} else {
|
||||
t.Files = newFiles
|
||||
newId = cmp.Or(newId, t.Id)
|
||||
t.Id = newId
|
||||
c.setTorrent(t, nil) // This gets called after calling deleteTorrent
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Cache) DeleteTorrents(ids []string) {
|
||||
c.logger.Info().Msgf("Deleting %d torrents", len(ids))
|
||||
for _, id := range ids {
|
||||
_ = c.deleteTorrent(id, true)
|
||||
}
|
||||
c.listingDebouncer.Call(true)
|
||||
}
|
||||
|
||||
func (c *Cache) removeFile(torrentId string, moveToTrash bool) {
|
||||
// Moves the torrent file to the trash
|
||||
filePath := filepath.Join(c.dir, torrentId+".json")
|
||||
|
||||
// Check if the file exists
|
||||
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
|
||||
return
|
||||
}
|
||||
|
||||
if !moveToTrash {
|
||||
// If not moving to trash, delete the file directly
|
||||
if err := os.Remove(filePath); err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to remove file: %s", filePath)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
// Move the file to the trash
|
||||
trashPath := filepath.Join(c.dir, "trash", torrentId+".json")
|
||||
if err := os.MkdirAll(filepath.Dir(trashPath), 0755); err != nil {
|
||||
return
|
||||
}
|
||||
if err := os.Rename(filePath, trashPath); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) OnRemove(torrentId string) {
|
||||
c.logger.Debug().Msgf("OnRemove triggered for %s", torrentId)
|
||||
err := c.DeleteTorrent(torrentId)
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to delete torrent: %s", torrentId)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveFile removes a file from the torrent cache
|
||||
// TODO sends a re-insert that removes the file from debrid
|
||||
func (c *Cache) RemoveFile(torrentId string, filename string) error {
|
||||
c.logger.Debug().Str("torrent_id", torrentId).Msgf("Removing file %s", filename)
|
||||
torrent, ok := c.torrents.getByID(torrentId)
|
||||
if !ok {
|
||||
return fmt.Errorf("torrent %s not found", torrentId)
|
||||
}
|
||||
file, ok := torrent.GetFile(filename)
|
||||
if !ok {
|
||||
return fmt.Errorf("file %s not found in torrent %s", filename, torrentId)
|
||||
}
|
||||
file.Deleted = true
|
||||
torrent.Files[filename] = file
|
||||
|
||||
// If the torrent has no files left, delete it
|
||||
if len(torrent.GetFiles()) == 0 {
|
||||
c.logger.Debug().Msgf("Torrent %s has no files left, deleting it", torrentId)
|
||||
if err := c.DeleteTorrent(torrentId); err != nil {
|
||||
return fmt.Errorf("failed to delete torrent %s: %w", torrentId, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
c.setTorrent(torrent, func(torrent CachedTorrent) {
|
||||
c.listingDebouncer.Call(true)
|
||||
}) // Update the torrent in the cache
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) Logger() zerolog.Logger {
|
||||
return c.logger
|
||||
}
|
||||
|
||||
func (c *Cache) GetConfig() config.Debrid {
|
||||
return c.config
|
||||
}
|
||||
198
pkg/debrid/store/download_link.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
)
|
||||
|
||||
type downloadLinkRequest struct {
|
||||
result string
|
||||
err error
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func newDownloadLinkRequest() *downloadLinkRequest {
|
||||
return &downloadLinkRequest{
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *downloadLinkRequest) Complete(result string, err error) {
|
||||
r.result = result
|
||||
r.err = err
|
||||
close(r.done)
|
||||
}
|
||||
|
||||
func (r *downloadLinkRequest) Wait() (string, error) {
|
||||
<-r.done
|
||||
return r.result, r.err
|
||||
}
|
||||
|
||||
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
|
||||
// Check link cache
|
||||
if dl, err := c.checkDownloadLink(fileLink); dl != "" && err == nil {
|
||||
return dl, nil
|
||||
}
|
||||
|
||||
if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight {
|
||||
// Wait for the other request to complete and use its result
|
||||
result := req.(*downloadLinkRequest)
|
||||
return result.Wait()
|
||||
}
|
||||
|
||||
// Create a new request object
|
||||
req := newDownloadLinkRequest()
|
||||
c.downloadLinkRequests.Store(fileLink, req)
|
||||
|
||||
dl, err := c.fetchDownloadLink(torrentName, filename, fileLink)
|
||||
if err != nil {
|
||||
req.Complete("", err)
|
||||
c.downloadLinkRequests.Delete(fileLink)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if dl == nil || dl.DownloadLink == "" {
|
||||
err = fmt.Errorf("download link is empty for %s in torrent %s", filename, torrentName)
|
||||
req.Complete("", err)
|
||||
c.downloadLinkRequests.Delete(fileLink)
|
||||
return "", err
|
||||
}
|
||||
req.Complete(dl.DownloadLink, err)
|
||||
c.downloadLinkRequests.Delete(fileLink)
|
||||
return dl.DownloadLink, err
|
||||
}
|
||||
|
||||
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*types.DownloadLink, error) {
|
||||
ct := c.GetTorrentByName(torrentName)
|
||||
if ct == nil {
|
||||
return nil, fmt.Errorf("torrent not found")
|
||||
}
|
||||
file, ok := ct.GetFile(filename)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("file %s not found in torrent %s", filename, torrentName)
|
||||
}
|
||||
|
||||
if file.Link == "" {
|
||||
// file link is empty, refresh the torrent to get restricted links
|
||||
ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid
|
||||
if ct == nil {
|
||||
return nil, fmt.Errorf("failed to refresh torrent")
|
||||
} else {
|
||||
file, ok = ct.GetFile(filename)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If file.Link is still empty, return
|
||||
if file.Link == "" {
|
||||
// Try to reinsert the torrent?
|
||||
newCt, err := c.reInsertTorrent(ct)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reinsert torrent. %w", err)
|
||||
}
|
||||
ct = newCt
|
||||
file, ok = ct.GetFile(filename)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
|
||||
downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file)
|
||||
if err != nil {
|
||||
if errors.Is(err, utils.HosterUnavailableError) {
|
||||
c.logger.Trace().
|
||||
Str("filename", filename).
|
||||
Str("torrent_id", ct.Id).
|
||||
Msg("Hoster unavailable, attempting to reinsert torrent")
|
||||
|
||||
newCt, err := c.reInsertTorrent(ct)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reinsert torrent: %w", err)
|
||||
}
|
||||
ct = newCt
|
||||
file, ok = ct.GetFile(filename)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
|
||||
}
|
||||
// Retry getting the download link
|
||||
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retry failed to get download link: %w", err)
|
||||
}
|
||||
if downloadLink == nil {
|
||||
return nil, fmt.Errorf("download link is empty after retry")
|
||||
}
|
||||
return nil, nil
|
||||
} else if errors.Is(err, utils.TrafficExceededError) {
|
||||
// This is likely a fair usage limit error
|
||||
return nil, err
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to get download link: %w", err)
|
||||
}
|
||||
}
|
||||
if downloadLink == nil {
|
||||
return nil, fmt.Errorf("download link is empty")
|
||||
}
|
||||
|
||||
// Set link to cache
|
||||
go c.client.Accounts().SetDownloadLink(fileLink, downloadLink)
|
||||
return downloadLink, nil
|
||||
}
|
||||
|
||||
func (c *Cache) GetFileDownloadLinks(t CachedTorrent) {
|
||||
if err := c.client.GetFileDownloadLinks(t.Torrent); err != nil {
|
||||
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("Failed to generate download links")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) checkDownloadLink(link string) (string, error) {
|
||||
|
||||
dl, err := c.client.Accounts().GetDownloadLink(link)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !c.downloadLinkIsInvalid(dl.DownloadLink) {
|
||||
return dl.DownloadLink, nil
|
||||
}
|
||||
return "", fmt.Errorf("download link not found for %s", link)
|
||||
}
|
||||
|
||||
func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
|
||||
c.invalidDownloadLinks.Store(downloadLink, reason)
|
||||
// Remove the download api key from active
|
||||
if reason == "bandwidth_exceeded" {
|
||||
// Disable the account
|
||||
_, account, err := c.client.Accounts().GetDownloadLinkWithAccount(link)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
c.client.Accounts().Disable(account)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) downloadLinkIsInvalid(downloadLink string) bool {
|
||||
if reason, ok := c.invalidDownloadLinks.Load(downloadLink); ok {
|
||||
c.logger.Debug().Msgf("Download link %s is invalid: %s", downloadLink, reason)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, error) {
|
||||
ct := c.GetTorrentByName(torrentName)
|
||||
if ct == nil {
|
||||
return nil, fmt.Errorf("torrent not found")
|
||||
}
|
||||
file := ct.Files[filename]
|
||||
return file.ByteRange, nil
|
||||
}
|
||||
|
||||
func (c *Cache) GetTotalActiveDownloadLinks() int {
|
||||
return c.client.Accounts().GetLinksCount()
|
||||
}
|
||||
42
pkg/debrid/store/misc.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// MergeFiles merges the files from multiple torrents into a single map.
|
||||
// It uses the file name as the key and the file object as the value.
|
||||
// This is useful for deduplicating files across multiple torrents.
|
||||
// The order of the torrents is determined by the AddedOn time, with the earliest added torrent first.
|
||||
// If a file with the same name exists in multiple torrents, the last one will be used.
|
||||
func mergeFiles(torrents ...CachedTorrent) map[string]types.File {
|
||||
merged := make(map[string]types.File)
|
||||
|
||||
// order torrents by added time
|
||||
sort.Slice(torrents, func(i, j int) bool {
|
||||
return torrents[i].AddedOn.Before(torrents[j].AddedOn)
|
||||
})
|
||||
|
||||
for _, torrent := range torrents {
|
||||
for _, file := range torrent.GetFiles() {
|
||||
merged[file.Name] = file
|
||||
}
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
||||
func (c *Cache) GetIngests() ([]types.IngestData, error) {
|
||||
torrents := c.GetTorrents()
|
||||
debridName := c.client.Name()
|
||||
var ingests []types.IngestData
|
||||
for _, torrent := range torrents {
|
||||
ingests = append(ingests, types.IngestData{
|
||||
Debrid: debridName,
|
||||
Name: torrent.Filename,
|
||||
Hash: torrent.InfoHash,
|
||||
Size: torrent.Bytes,
|
||||
})
|
||||
}
|
||||
return ingests, nil
|
||||
}
|
||||
256
pkg/debrid/store/refresh.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type fileInfo struct {
|
||||
id string
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Name() string { return fi.name }
|
||||
func (fi *fileInfo) Size() int64 { return fi.size }
|
||||
func (fi *fileInfo) Mode() os.FileMode { return fi.mode }
|
||||
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
|
||||
func (fi *fileInfo) IsDir() bool { return fi.isDir }
|
||||
func (fi *fileInfo) ID() string { return fi.id }
|
||||
func (fi *fileInfo) Sys() interface{} { return nil }
|
||||
|
||||
func (c *Cache) RefreshListings(refreshRclone bool) {
|
||||
// Copy the torrents to a string|time map
|
||||
c.torrents.refreshListing() // refresh torrent listings
|
||||
|
||||
if refreshRclone {
|
||||
if err := c.refreshRclone(); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to refresh rclone") // silent error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) refreshTorrents(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if !c.torrentsRefreshMu.TryLock() {
|
||||
return
|
||||
}
|
||||
defer c.torrentsRefreshMu.Unlock()
|
||||
|
||||
// Get all torrents from the debrid service
|
||||
debTorrents, err := c.client.GetTorrents()
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to get torrents")
|
||||
return
|
||||
}
|
||||
|
||||
if len(debTorrents) == 0 {
|
||||
// Maybe an error occurred
|
||||
return
|
||||
}
|
||||
|
||||
currentTorrentIds := make(map[string]struct{}, len(debTorrents))
|
||||
for _, t := range debTorrents {
|
||||
currentTorrentIds[t.Id] = struct{}{}
|
||||
}
|
||||
|
||||
// Let's implement deleting torrents removed from debrid
|
||||
deletedTorrents := make([]string, 0)
|
||||
cachedTorrents := c.torrents.getIdMaps()
|
||||
for id := range cachedTorrents {
|
||||
if _, exists := currentTorrentIds[id]; !exists {
|
||||
deletedTorrents = append(deletedTorrents, id)
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletedTorrents) > 0 {
|
||||
go c.validateAndDeleteTorrents(deletedTorrents)
|
||||
}
|
||||
|
||||
newTorrents := make([]*types.Torrent, 0)
|
||||
for _, t := range debTorrents {
|
||||
if _, exists := cachedTorrents[t.Id]; !exists {
|
||||
newTorrents = append(newTorrents, t)
|
||||
}
|
||||
}
|
||||
|
||||
if len(newTorrents) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Trace().Msgf("Found %d new torrents", len(newTorrents))
|
||||
|
||||
workChan := make(chan *types.Torrent, min(100, len(newTorrents)))
|
||||
errChan := make(chan error, len(newTorrents))
|
||||
var wg sync.WaitGroup
|
||||
counter := 0
|
||||
|
||||
for i := 0; i < c.workers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for t := range workChan {
|
||||
if err := c.ProcessTorrent(t); err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to process new torrent %s", t.Id)
|
||||
errChan <- err
|
||||
}
|
||||
counter++
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for _, t := range newTorrents {
|
||||
workChan <- t
|
||||
}
|
||||
close(workChan)
|
||||
wg.Wait()
|
||||
|
||||
c.listingDebouncer.Call(false)
|
||||
|
||||
c.logger.Debug().Msgf("Processed %d new torrents", counter)
|
||||
}
|
||||
|
||||
func (c *Cache) refreshRclone() error {
|
||||
cfg := c.config
|
||||
dirs := strings.FieldsFunc(cfg.RcRefreshDirs, func(r rune) bool {
|
||||
return r == ',' || r == '&'
|
||||
})
|
||||
if len(dirs) == 0 {
|
||||
dirs = []string{"__all__"}
|
||||
}
|
||||
if c.mounter != nil {
|
||||
return c.mounter.RefreshDir(dirs)
|
||||
} else {
|
||||
return c.refreshRcloneWithRC(dirs)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) refreshRcloneWithRC(dirs []string) error {
|
||||
cfg := c.config
|
||||
|
||||
if cfg.RcUrl == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
client := http.DefaultClient
|
||||
// Create form data
|
||||
data := c.buildRcloneRequestData(dirs)
|
||||
|
||||
if err := c.sendRcloneRequest(client, "vfs/forget", data); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to send rclone vfs/forget request")
|
||||
}
|
||||
|
||||
if err := c.sendRcloneRequest(client, "vfs/refresh", data); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to send rclone vfs/refresh request")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) buildRcloneRequestData(dirs []string) string {
|
||||
var data strings.Builder
|
||||
for index, dir := range dirs {
|
||||
if dir != "" {
|
||||
if index == 0 {
|
||||
data.WriteString("dir=" + dir)
|
||||
} else {
|
||||
data.WriteString("&dir" + fmt.Sprint(index+1) + "=" + dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
return data.String()
|
||||
}
|
||||
|
||||
func (c *Cache) sendRcloneRequest(client *http.Client, endpoint, data string) error {
|
||||
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", c.config.RcUrl, endpoint), strings.NewReader(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
if c.config.RcUser != "" && c.config.RcPass != "" {
|
||||
req.SetBasicAuth(c.config.RcUser, c.config.RcPass)
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
|
||||
}
|
||||
|
||||
_, _ = io.Copy(io.Discard, resp.Body)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) refreshTorrent(torrentId string) *CachedTorrent {
|
||||
|
||||
if torrentId == "" {
|
||||
c.logger.Error().Msg("Torrent ID is empty")
|
||||
return nil
|
||||
}
|
||||
|
||||
torrent, err := c.client.GetTorrent(torrentId)
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Msgf("Failed to get torrent %s", torrentId)
|
||||
return nil
|
||||
}
|
||||
addedOn, err := time.Parse(time.RFC3339, torrent.Added)
|
||||
if err != nil {
|
||||
addedOn = time.Now()
|
||||
}
|
||||
ct := CachedTorrent{
|
||||
Torrent: torrent,
|
||||
AddedOn: addedOn,
|
||||
IsComplete: len(torrent.Files) > 0,
|
||||
}
|
||||
c.setTorrent(ct, func(torrent CachedTorrent) {
|
||||
go c.listingDebouncer.Call(true)
|
||||
})
|
||||
|
||||
return &ct
|
||||
}
|
||||
|
||||
func (c *Cache) refreshDownloadLinks(ctx context.Context) {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if !c.downloadLinksRefreshMu.TryLock() {
|
||||
return
|
||||
}
|
||||
defer c.downloadLinksRefreshMu.Unlock()
|
||||
|
||||
links, err := c.client.GetDownloadLinks()
|
||||
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to get download links")
|
||||
return
|
||||
}
|
||||
|
||||
c.client.Accounts().SetDownloadLinks(links)
|
||||
|
||||
c.logger.Debug().Msgf("Refreshed download %d links", c.client.Accounts().GetLinksCount())
|
||||
}
|
||||
311
pkg/debrid/store/repair.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type reInsertRequest struct {
|
||||
result *CachedTorrent
|
||||
err error
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func newReInsertRequest() *reInsertRequest {
|
||||
return &reInsertRequest{
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reInsertRequest) Complete(result *CachedTorrent, err error) {
|
||||
r.result = result
|
||||
r.err = err
|
||||
close(r.done)
|
||||
}
|
||||
|
||||
func (r *reInsertRequest) Wait() (*CachedTorrent, error) {
|
||||
<-r.done
|
||||
return r.result, r.err
|
||||
}
|
||||
|
||||
func (c *Cache) markAsFailedToReinsert(torrentId string) {
|
||||
c.failedToReinsert.Store(torrentId, struct{}{})
|
||||
|
||||
// Remove the torrent from the directory if it has failed to reinsert, max retries are hardcoded to 5
|
||||
if torrent, ok := c.torrents.getByID(torrentId); ok {
|
||||
torrent.Bad = true
|
||||
c.setTorrent(torrent, func(t CachedTorrent) {
|
||||
c.RefreshListings(false)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) markAsSuccessfullyReinserted(torrentId string) {
|
||||
if _, ok := c.failedToReinsert.Load(torrentId); !ok {
|
||||
return
|
||||
}
|
||||
c.failedToReinsert.Delete(torrentId)
|
||||
if torrent, ok := c.torrents.getByID(torrentId); ok {
|
||||
torrent.Bad = false
|
||||
c.setTorrent(torrent, func(torrent CachedTorrent) {
|
||||
c.RefreshListings(false)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
|
||||
files := make(map[string]types.File)
|
||||
repairStrategy := config.Get().Repair.Strategy
|
||||
brokenFiles := make([]string, 0)
|
||||
if len(filenames) > 0 {
|
||||
for name, f := range t.Files {
|
||||
if utils.Contains(filenames, name) {
|
||||
files[name] = f
|
||||
}
|
||||
}
|
||||
} else {
|
||||
files = t.Files
|
||||
}
|
||||
for _, f := range files {
|
||||
// Check if file is missing
|
||||
if f.Link == "" {
|
||||
// refresh torrent and then break
|
||||
if newT := c.refreshTorrent(f.TorrentId); newT != nil {
|
||||
t = newT
|
||||
} else {
|
||||
c.logger.Error().Str("torrentId", t.Torrent.Id).Msg("Failed to refresh torrent")
|
||||
return filenames // Return original filenames if refresh fails(torrent is somehow botched)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.Torrent == nil {
|
||||
c.logger.Error().Str("torrentId", t.Torrent.Id).Msg("Failed to refresh torrent")
|
||||
return filenames // Return original filenames if refresh fails(torrent is somehow botched)
|
||||
}
|
||||
|
||||
files = t.Files
|
||||
var wg sync.WaitGroup
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Use a mutex to protect brokenFiles slice and torrent-wide failure flag
|
||||
var mu sync.Mutex
|
||||
torrentWideFailed := false
|
||||
|
||||
wg.Add(len(files))
|
||||
|
||||
for _, f := range files {
|
||||
go func(f types.File) {
|
||||
defer wg.Done()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if f.Link == "" {
|
||||
mu.Lock()
|
||||
if repairStrategy == config.RepairStrategyPerTorrent {
|
||||
torrentWideFailed = true
|
||||
mu.Unlock()
|
||||
cancel() // Signal all other goroutines to stop
|
||||
return
|
||||
} else {
|
||||
// per_file strategy - only mark this file as broken
|
||||
brokenFiles = append(brokenFiles, f.Name)
|
||||
}
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.client.CheckLink(f.Link); err != nil {
|
||||
if errors.Is(err, utils.HosterUnavailableError) {
|
||||
mu.Lock()
|
||||
if repairStrategy == config.RepairStrategyPerTorrent {
|
||||
torrentWideFailed = true
|
||||
mu.Unlock()
|
||||
cancel() // Signal all other goroutines to stop
|
||||
return
|
||||
} else {
|
||||
// per_file strategy - only mark this file as broken
|
||||
brokenFiles = append(brokenFiles, f.Name)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
}
|
||||
}(f)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Handle the result based on strategy
|
||||
if repairStrategy == config.RepairStrategyPerTorrent && torrentWideFailed {
|
||||
// Mark all files as broken for per_torrent strategy
|
||||
for _, f := range files {
|
||||
brokenFiles = append(brokenFiles, f.Name)
|
||||
}
|
||||
}
|
||||
// For per_file strategy, brokenFiles already contains only the broken ones
|
||||
|
||||
// Try to reinsert the torrent if it's broken
|
||||
if len(brokenFiles) > 0 && t.Torrent != nil {
|
||||
// Check if the torrent is already in progress
|
||||
if _, err := c.reInsertTorrent(t); err != nil {
|
||||
c.logger.Error().Err(err).Str("torrentId", t.Torrent.Id).Msg("Failed to reinsert torrent")
|
||||
return brokenFiles // Return broken files if reinsert fails
|
||||
}
|
||||
return nil // Return nil if the torrent was successfully reinserted
|
||||
}
|
||||
|
||||
return brokenFiles
|
||||
}
|
||||
|
||||
func (c *Cache) repairWorker(ctx context.Context) {
|
||||
// This watches a channel for torrents to repair and can be cancelled via context
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case req, ok := <-c.repairChan:
|
||||
// Channel was closed
|
||||
if !ok {
|
||||
c.logger.Debug().Msg("Repair channel closed, shutting down worker")
|
||||
return
|
||||
}
|
||||
|
||||
torrentId := req.TorrentID
|
||||
c.logger.Debug().Str("torrentId", req.TorrentID).Msg("Received repair request")
|
||||
|
||||
// Get the torrent from the cache
|
||||
cachedTorrent := c.GetTorrent(torrentId)
|
||||
if cachedTorrent == nil {
|
||||
c.logger.Warn().Str("torrentId", torrentId).Msg("Torrent not found in cache")
|
||||
continue
|
||||
}
|
||||
|
||||
switch req.Type {
|
||||
case RepairTypeReinsert:
|
||||
c.logger.Debug().Str("torrentId", torrentId).Msg("Reinserting torrent")
|
||||
if _, err := c.reInsertTorrent(cachedTorrent); err != nil {
|
||||
c.logger.Error().Err(err).Str("torrentId", cachedTorrent.Id).Msg("Failed to reinsert torrent")
|
||||
continue
|
||||
}
|
||||
case RepairTypeDelete:
|
||||
c.logger.Debug().Str("torrentId", torrentId).Msg("Deleting torrent")
|
||||
if err := c.DeleteTorrent(torrentId); err != nil {
|
||||
c.logger.Error().Err(err).Str("torrentId", torrentId).Msg("Failed to delete torrent")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
|
||||
// Check if Magnet is not empty, if empty, reconstruct the magnet
|
||||
torrent := ct.Torrent
|
||||
oldID := torrent.Id // Store the old ID
|
||||
if _, ok := c.failedToReinsert.Load(oldID); ok {
|
||||
return ct, fmt.Errorf("can't retry re-insert for %s", torrent.Id)
|
||||
}
|
||||
if reqI, inFlight := c.repairRequest.Load(oldID); inFlight {
|
||||
req := reqI.(*reInsertRequest)
|
||||
c.logger.Debug().Msgf("Waiting for existing reinsert request to complete for torrent %s", oldID)
|
||||
return req.Wait()
|
||||
}
|
||||
req := newReInsertRequest()
|
||||
c.repairRequest.Store(oldID, req)
|
||||
|
||||
// Make sure we clean up even if there's a panic
|
||||
defer func() {
|
||||
c.repairRequest.Delete(oldID)
|
||||
}()
|
||||
|
||||
// Submit the magnet to the debrid service
|
||||
newTorrent := &types.Torrent{
|
||||
Name: torrent.Name,
|
||||
Magnet: utils.ConstructMagnet(torrent.InfoHash, torrent.Name),
|
||||
InfoHash: torrent.InfoHash,
|
||||
Size: torrent.Size,
|
||||
Files: make(map[string]types.File),
|
||||
Arr: torrent.Arr,
|
||||
}
|
||||
var err error
|
||||
newTorrent, err = c.client.SubmitMagnet(newTorrent)
|
||||
if err != nil {
|
||||
c.markAsFailedToReinsert(oldID)
|
||||
// Remove the old torrent from the cache and debrid service
|
||||
return ct, fmt.Errorf("failed to submit magnet: %w", err)
|
||||
}
|
||||
|
||||
// Check if the torrent was submitted
|
||||
if newTorrent == nil || newTorrent.Id == "" {
|
||||
c.markAsFailedToReinsert(oldID)
|
||||
return ct, fmt.Errorf("failed to submit magnet: empty torrent")
|
||||
}
|
||||
newTorrent.DownloadUncached = false // Set to false, avoid re-downloading
|
||||
newTorrent, err = c.client.CheckStatus(newTorrent)
|
||||
if err != nil {
|
||||
if newTorrent != nil && newTorrent.Id != "" {
|
||||
// Delete the torrent if it was not downloaded
|
||||
_ = c.client.DeleteTorrent(newTorrent.Id)
|
||||
}
|
||||
c.markAsFailedToReinsert(oldID)
|
||||
return ct, err
|
||||
}
|
||||
|
||||
// Update the torrent in the cache
|
||||
addedOn, err := time.Parse(time.RFC3339, newTorrent.Added)
|
||||
if err != nil {
|
||||
addedOn = time.Now()
|
||||
}
|
||||
for _, f := range newTorrent.GetFiles() {
|
||||
if f.Link == "" {
|
||||
c.markAsFailedToReinsert(oldID)
|
||||
return ct, fmt.Errorf("failed to reinsert torrent: empty link")
|
||||
}
|
||||
}
|
||||
// Set torrent to newTorrent
|
||||
newCt := CachedTorrent{
|
||||
Torrent: newTorrent,
|
||||
AddedOn: addedOn,
|
||||
IsComplete: len(newTorrent.Files) > 0,
|
||||
}
|
||||
c.setTorrent(newCt, func(torrent CachedTorrent) {
|
||||
c.RefreshListings(true)
|
||||
})
|
||||
|
||||
ct = &newCt // Update ct to point to the new torrent
|
||||
|
||||
// We can safely delete the old torrent here
|
||||
if oldID != "" {
|
||||
if err := c.DeleteTorrent(oldID); err != nil {
|
||||
return ct, fmt.Errorf("failed to delete old torrent: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
req.Complete(ct, err)
|
||||
c.markAsSuccessfullyReinserted(oldID)
|
||||
|
||||
c.logger.Debug().Str("torrentId", torrent.Id).Msg("Torrent successfully reinserted")
|
||||
|
||||
return ct, nil
|
||||
}
|
||||
|
||||
func (c *Cache) resetInvalidLinks(ctx context.Context) {
|
||||
c.logger.Debug().Msgf("Resetting accounts")
|
||||
c.invalidDownloadLinks = sync.Map{}
|
||||
c.client.Accounts().Reset() // Reset the active download keys
|
||||
|
||||
// Refresh the download links
|
||||
c.refreshDownloadLinks(ctx)
|
||||
}
|
||||
328
pkg/debrid/store/torrent.go
Normal file
@@ -0,0 +1,328 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
filterByInclude string = "include"
|
||||
filterByExclude string = "exclude"
|
||||
|
||||
filterByStartsWith string = "starts_with"
|
||||
filterByEndsWith string = "ends_with"
|
||||
filterByNotStartsWith string = "not_starts_with"
|
||||
filterByNotEndsWith string = "not_ends_with"
|
||||
|
||||
filterByRegex string = "regex"
|
||||
filterByNotRegex string = "not_regex"
|
||||
|
||||
filterByExactMatch string = "exact_match"
|
||||
filterByNotExactMatch string = "not_exact_match"
|
||||
|
||||
filterBySizeGT string = "size_gt"
|
||||
filterBySizeLT string = "size_lt"
|
||||
|
||||
filterBLastAdded string = "last_added"
|
||||
)
|
||||
|
||||
type directoryFilter struct {
|
||||
filterType string
|
||||
value string
|
||||
regex *regexp.Regexp // only for regex/not_regex
|
||||
sizeThreshold int64 // only for size_gt/size_lt
|
||||
ageThreshold time.Duration // only for last_added
|
||||
}
|
||||
|
||||
type torrents struct {
|
||||
sync.RWMutex
|
||||
byID map[string]CachedTorrent
|
||||
byName map[string]CachedTorrent
|
||||
}
|
||||
|
||||
type folders struct {
|
||||
sync.RWMutex
|
||||
listing map[string][]os.FileInfo // folder name to file listing
|
||||
}
|
||||
|
||||
type torrentCache struct {
|
||||
torrents torrents
|
||||
|
||||
listing atomic.Value
|
||||
folders folders
|
||||
directoriesFilters map[string][]directoryFilter
|
||||
sortNeeded atomic.Bool
|
||||
}
|
||||
|
||||
type sortableFile struct {
|
||||
id string
|
||||
name string
|
||||
modTime time.Time
|
||||
size int64
|
||||
bad bool
|
||||
}
|
||||
|
||||
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
|
||||
|
||||
tc := &torrentCache{
|
||||
torrents: torrents{
|
||||
byID: make(map[string]CachedTorrent),
|
||||
byName: make(map[string]CachedTorrent),
|
||||
},
|
||||
folders: folders{
|
||||
listing: make(map[string][]os.FileInfo),
|
||||
},
|
||||
directoriesFilters: dirFilters,
|
||||
}
|
||||
|
||||
tc.sortNeeded.Store(false)
|
||||
tc.listing.Store(make([]os.FileInfo, 0))
|
||||
return tc
|
||||
}
|
||||
|
||||
func (tc *torrentCache) reset() {
|
||||
tc.torrents.Lock()
|
||||
tc.torrents.byID = make(map[string]CachedTorrent)
|
||||
tc.torrents.byName = make(map[string]CachedTorrent)
|
||||
tc.torrents.Unlock()
|
||||
|
||||
// reset the sorted listing
|
||||
tc.sortNeeded.Store(false)
|
||||
tc.listing.Store(make([]os.FileInfo, 0))
|
||||
|
||||
// reset any per-folder views
|
||||
tc.folders.Lock()
|
||||
tc.folders.listing = make(map[string][]os.FileInfo)
|
||||
tc.folders.Unlock()
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
torrent, exists := tc.torrents.byID[id]
|
||||
return torrent, exists
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
torrent, exists := tc.torrents.byName[name]
|
||||
return torrent, exists
|
||||
}
|
||||
|
||||
func (tc *torrentCache) set(name string, torrent, newTorrent CachedTorrent) {
|
||||
tc.torrents.Lock()
|
||||
// Set the id first
|
||||
|
||||
tc.torrents.byName[name] = torrent
|
||||
tc.torrents.byID[torrent.Id] = torrent // This is the unadulterated torrent
|
||||
tc.torrents.Unlock()
|
||||
tc.sortNeeded.Store(true)
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getListing() []os.FileInfo {
|
||||
// Fast path: if we have a sorted list and no changes since last sort
|
||||
if !tc.sortNeeded.Load() {
|
||||
return tc.listing.Load().([]os.FileInfo)
|
||||
}
|
||||
|
||||
// Slow path: need to sort
|
||||
tc.refreshListing()
|
||||
return tc.listing.Load().([]os.FileInfo)
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
|
||||
tc.folders.RLock()
|
||||
defer tc.folders.RUnlock()
|
||||
if folderName == "" {
|
||||
return tc.getListing()
|
||||
}
|
||||
if folder, ok := tc.folders.listing[folderName]; ok {
|
||||
return folder
|
||||
}
|
||||
// If folder not found, return empty slice
|
||||
return []os.FileInfo{}
|
||||
}
|
||||
|
||||
func (tc *torrentCache) refreshListing() {
|
||||
|
||||
tc.torrents.RLock()
|
||||
all := make([]sortableFile, 0, len(tc.torrents.byName))
|
||||
for name, t := range tc.torrents.byName {
|
||||
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
|
||||
}
|
||||
tc.sortNeeded.Store(false)
|
||||
tc.torrents.RUnlock()
|
||||
|
||||
sort.Slice(all, func(i, j int) bool {
|
||||
if all[i].name != all[j].name {
|
||||
return all[i].name < all[j].name
|
||||
}
|
||||
return all[i].modTime.Before(all[j].modTime)
|
||||
})
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
wg.Add(1) // for all listing
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
listing := make([]os.FileInfo, len(all))
|
||||
for i, sf := range all {
|
||||
listing[i] = &fileInfo{sf.id, sf.name, sf.size, 0755 | os.ModeDir, sf.modTime, true}
|
||||
}
|
||||
tc.listing.Store(listing)
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
// For __bad__
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
listing := make([]os.FileInfo, 0)
|
||||
for _, sf := range all {
|
||||
if sf.bad {
|
||||
listing = append(listing, &fileInfo{
|
||||
id: sf.id,
|
||||
name: fmt.Sprintf("%s || %s", sf.name, sf.id),
|
||||
size: sf.size,
|
||||
mode: 0755 | os.ModeDir,
|
||||
modTime: sf.modTime,
|
||||
isDir: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
tc.folders.Lock()
|
||||
if len(listing) > 0 {
|
||||
tc.folders.listing["__bad__"] = listing
|
||||
} else {
|
||||
delete(tc.folders.listing, "__bad__")
|
||||
}
|
||||
tc.folders.Unlock()
|
||||
}()
|
||||
|
||||
now := time.Now()
|
||||
wg.Add(len(tc.directoriesFilters)) // for each directory filter
|
||||
for dir, filters := range tc.directoriesFilters {
|
||||
go func(dir string, filters []directoryFilter) {
|
||||
defer wg.Done()
|
||||
var matched []os.FileInfo
|
||||
for _, sf := range all {
|
||||
if tc.torrentMatchDirectory(filters, sf, now) {
|
||||
matched = append(matched, &fileInfo{
|
||||
id: sf.id,
|
||||
name: sf.name, size: sf.size,
|
||||
mode: 0755 | os.ModeDir, modTime: sf.modTime, isDir: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
tc.folders.Lock()
|
||||
if len(matched) > 0 {
|
||||
tc.folders.listing[dir] = matched
|
||||
} else {
|
||||
delete(tc.folders.listing, dir)
|
||||
}
|
||||
tc.folders.Unlock()
|
||||
}(dir, filters)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
|
||||
|
||||
torrentName := strings.ToLower(file.name)
|
||||
for _, filter := range filters {
|
||||
matched := false
|
||||
|
||||
switch filter.filterType {
|
||||
case filterByInclude:
|
||||
matched = strings.Contains(torrentName, filter.value)
|
||||
case filterByStartsWith:
|
||||
matched = strings.HasPrefix(torrentName, filter.value)
|
||||
case filterByEndsWith:
|
||||
matched = strings.HasSuffix(torrentName, filter.value)
|
||||
case filterByExactMatch:
|
||||
matched = torrentName == filter.value
|
||||
case filterByExclude:
|
||||
matched = !strings.Contains(torrentName, filter.value)
|
||||
case filterByNotStartsWith:
|
||||
matched = !strings.HasPrefix(torrentName, filter.value)
|
||||
case filterByNotEndsWith:
|
||||
matched = !strings.HasSuffix(torrentName, filter.value)
|
||||
case filterByRegex:
|
||||
matched = filter.regex.MatchString(torrentName)
|
||||
case filterByNotRegex:
|
||||
matched = !filter.regex.MatchString(torrentName)
|
||||
case filterByNotExactMatch:
|
||||
matched = torrentName != filter.value
|
||||
case filterBySizeGT:
|
||||
matched = file.size > filter.sizeThreshold
|
||||
case filterBySizeLT:
|
||||
matched = file.size < filter.sizeThreshold
|
||||
case filterBLastAdded:
|
||||
matched = file.modTime.After(now.Add(-filter.ageThreshold))
|
||||
}
|
||||
if !matched {
|
||||
return false // All filters must match
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, all filters matched
|
||||
return true
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getAll() map[string]CachedTorrent {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
result := make(map[string]CachedTorrent, len(tc.torrents.byID))
|
||||
for name, torrent := range tc.torrents.byID {
|
||||
result[name] = torrent
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getAllCount() int {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
return len(tc.torrents.byID)
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getAllByName() map[string]CachedTorrent {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
results := make(map[string]CachedTorrent, len(tc.torrents.byName))
|
||||
for name, torrent := range tc.torrents.byName {
|
||||
results[name] = torrent
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getIdMaps() map[string]struct{} {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
res := make(map[string]struct{}, len(tc.torrents.byID))
|
||||
for id := range tc.torrents.byID {
|
||||
res[id] = struct{}{}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (tc *torrentCache) removeId(id string) {
|
||||
tc.torrents.Lock()
|
||||
defer tc.torrents.Unlock()
|
||||
delete(tc.torrents.byID, id)
|
||||
tc.sortNeeded.Store(true)
|
||||
}
|
||||
|
||||
func (tc *torrentCache) remove(name string) {
|
||||
tc.torrents.Lock()
|
||||
defer tc.torrents.Unlock()
|
||||
delete(tc.torrents.byName, name)
|
||||
tc.sortNeeded.Store(true)
|
||||
}
|
||||
63
pkg/debrid/store/worker.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
)
|
||||
|
||||
func (c *Cache) StartSchedule(ctx context.Context) error {
|
||||
// For now, we just want to refresh the listing and download links
|
||||
|
||||
// Stop any existing jobs before starting new ones
|
||||
c.scheduler.RemoveByTags("decypharr")
|
||||
|
||||
// Schedule download link refresh job
|
||||
if jd, err := utils.ConvertToJobDef(c.downloadLinksRefreshInterval); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to convert download link refresh interval to job definition")
|
||||
} else {
|
||||
// Schedule the job
|
||||
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
|
||||
c.refreshDownloadLinks(ctx)
|
||||
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to create download link refresh job")
|
||||
} else {
|
||||
c.logger.Debug().Msgf("Download link refresh job scheduled for every %s", c.downloadLinksRefreshInterval)
|
||||
}
|
||||
}
|
||||
|
||||
// Schedule torrent refresh job
|
||||
if jd, err := utils.ConvertToJobDef(c.torrentRefreshInterval); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to convert torrent refresh interval to job definition")
|
||||
} else {
|
||||
// Schedule the job
|
||||
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
|
||||
c.refreshTorrents(ctx)
|
||||
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to create torrent refresh job")
|
||||
} else {
|
||||
c.logger.Debug().Msgf("Torrent refresh job scheduled for every %s", c.torrentRefreshInterval)
|
||||
}
|
||||
}
|
||||
|
||||
// Schedule the reset invalid links job
|
||||
// This job will run every at 00:00 CET
|
||||
// and reset the invalid links in the cache
|
||||
if jd, err := utils.ConvertToJobDef("00:00"); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to convert link reset interval to job definition")
|
||||
} else {
|
||||
// Schedule the job
|
||||
if _, err := c.cetScheduler.NewJob(jd, gocron.NewTask(func() {
|
||||
c.resetInvalidLinks(ctx)
|
||||
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to create link reset job")
|
||||
} else {
|
||||
c.logger.Debug().Msgf("Link reset job scheduled for every midnight, CET")
|
||||
}
|
||||
}
|
||||
|
||||
// Start the scheduler
|
||||
c.scheduler.Start()
|
||||
c.cetScheduler.Start()
|
||||
return nil
|
||||
}
|
||||
1
pkg/debrid/store/xml.go
Normal file
@@ -0,0 +1 @@
|
||||
package store
|
||||
@@ -1,104 +0,0 @@
|
||||
package debrid
|
||||
|
||||
import (
|
||||
"goBlack/common"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type Arr struct {
|
||||
Name string `json:"name"`
|
||||
Token string `json:"token"`
|
||||
Host string `json:"host"`
|
||||
}
|
||||
|
||||
type ArrHistorySchema struct {
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
SortKey string `json:"sortKey"`
|
||||
SortDirection string `json:"sortDirection"`
|
||||
TotalRecords int `json:"totalRecords"`
|
||||
Records []struct {
|
||||
ID int `json:"id"`
|
||||
DownloadID string `json:"downloadId"`
|
||||
} `json:"records"`
|
||||
}
|
||||
|
||||
type Torrent struct {
|
||||
Id string `json:"id"`
|
||||
InfoHash string `json:"info_hash"`
|
||||
Name string `json:"name"`
|
||||
Folder string `json:"folder"`
|
||||
Filename string `json:"filename"`
|
||||
OriginalFilename string `json:"original_filename"`
|
||||
Size int64 `json:"size"`
|
||||
Bytes int64 `json:"bytes"` // Size of only the files that are downloaded
|
||||
Magnet *common.Magnet `json:"magnet"`
|
||||
Files []TorrentFile `json:"files"`
|
||||
Status string `json:"status"`
|
||||
Added string `json:"added"`
|
||||
Progress float64 `json:"progress"`
|
||||
Speed int64 `json:"speed"`
|
||||
Seeders int `json:"seeders"`
|
||||
Links []string `json:"links"`
|
||||
DownloadLinks []TorrentDownloadLinks `json:"download_links"`
|
||||
|
||||
Debrid *Debrid
|
||||
Arr *Arr
|
||||
}
|
||||
|
||||
type TorrentDownloadLinks struct {
|
||||
Filename string `json:"filename"`
|
||||
Link string `json:"link"`
|
||||
DownloadLink string `json:"download_link"`
|
||||
}
|
||||
|
||||
func (t *Torrent) GetSymlinkFolder(parent string) string {
|
||||
return filepath.Join(parent, t.Arr.Name, t.Folder)
|
||||
}
|
||||
|
||||
func (t *Torrent) GetMountFolder(rClonePath string) string {
|
||||
pathWithNoExt := common.RemoveExtension(t.OriginalFilename)
|
||||
if common.FileReady(filepath.Join(rClonePath, t.OriginalFilename)) {
|
||||
return t.OriginalFilename
|
||||
} else if common.FileReady(filepath.Join(rClonePath, t.Filename)) {
|
||||
return t.Filename
|
||||
} else if common.FileReady(filepath.Join(rClonePath, pathWithNoExt)) {
|
||||
return pathWithNoExt
|
||||
} else {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
type TorrentFile struct {
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
func getEventId(eventType string) int {
|
||||
switch eventType {
|
||||
case "grabbed":
|
||||
return 1
|
||||
case "seriesFolderDownloaded":
|
||||
return 2
|
||||
case "DownloadFolderImported":
|
||||
return 3
|
||||
case "DownloadFailed":
|
||||
return 4
|
||||
case "DownloadIgnored":
|
||||
return 7
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Torrent) Cleanup(remove bool) {
|
||||
if remove {
|
||||
err := os.Remove(t.Filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
243
pkg/debrid/types/account.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Accounts struct {
|
||||
current *Account
|
||||
accounts []*Account
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewAccounts(debridConf config.Debrid) *Accounts {
|
||||
accounts := make([]*Account, 0)
|
||||
for idx, token := range debridConf.DownloadAPIKeys {
|
||||
if token == "" {
|
||||
continue
|
||||
}
|
||||
account := newAccount(debridConf.Name, token, idx)
|
||||
accounts = append(accounts, account)
|
||||
}
|
||||
|
||||
var current *Account
|
||||
if len(accounts) > 0 {
|
||||
current = accounts[0]
|
||||
}
|
||||
return &Accounts{
|
||||
accounts: accounts,
|
||||
current: current,
|
||||
}
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
Debrid string // e.g., "realdebrid", "torbox", etc.
|
||||
Order int
|
||||
Disabled bool
|
||||
Token string
|
||||
links map[string]*DownloadLink
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (a *Accounts) All() []*Account {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
activeAccounts := make([]*Account, 0)
|
||||
for _, acc := range a.accounts {
|
||||
if !acc.Disabled {
|
||||
activeAccounts = append(activeAccounts, acc)
|
||||
}
|
||||
}
|
||||
return activeAccounts
|
||||
}
|
||||
|
||||
func (a *Accounts) Current() *Account {
|
||||
a.mu.RLock()
|
||||
if a.current != nil {
|
||||
current := a.current
|
||||
a.mu.RUnlock()
|
||||
return current
|
||||
}
|
||||
a.mu.RUnlock()
|
||||
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
// Double-check after acquiring write lock
|
||||
if a.current != nil {
|
||||
return a.current
|
||||
}
|
||||
|
||||
activeAccounts := make([]*Account, 0)
|
||||
for _, acc := range a.accounts {
|
||||
if !acc.Disabled {
|
||||
activeAccounts = append(activeAccounts, acc)
|
||||
}
|
||||
}
|
||||
|
||||
if len(activeAccounts) > 0 {
|
||||
a.current = activeAccounts[0]
|
||||
}
|
||||
return a.current
|
||||
}
|
||||
|
||||
func (a *Accounts) Disable(account *Account) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
account.disable()
|
||||
|
||||
if a.current == account {
|
||||
var newCurrent *Account
|
||||
for _, acc := range a.accounts {
|
||||
if !acc.Disabled {
|
||||
newCurrent = acc
|
||||
break
|
||||
}
|
||||
}
|
||||
a.current = newCurrent
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Accounts) Reset() {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
for _, acc := range a.accounts {
|
||||
acc.resetDownloadLinks()
|
||||
acc.Disabled = false
|
||||
}
|
||||
if len(a.accounts) > 0 {
|
||||
a.current = a.accounts[0]
|
||||
} else {
|
||||
a.current = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Accounts) GetDownloadLink(fileLink string) (*DownloadLink, error) {
|
||||
if a.Current() == nil {
|
||||
return nil, NoActiveAccountsError
|
||||
}
|
||||
dl, ok := a.Current().getLink(fileLink)
|
||||
if !ok {
|
||||
return nil, NoDownloadLinkError
|
||||
}
|
||||
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
|
||||
return nil, DownloadLinkExpiredError
|
||||
}
|
||||
if dl.DownloadLink == "" {
|
||||
return nil, EmptyDownloadLinkError
|
||||
}
|
||||
return dl, nil
|
||||
}
|
||||
|
||||
func (a *Accounts) GetDownloadLinkWithAccount(fileLink string) (*DownloadLink, *Account, error) {
|
||||
currentAccount := a.Current()
|
||||
if currentAccount == nil {
|
||||
return nil, nil, NoActiveAccountsError
|
||||
}
|
||||
dl, ok := currentAccount.getLink(fileLink)
|
||||
if !ok {
|
||||
return nil, nil, NoDownloadLinkError
|
||||
}
|
||||
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
|
||||
return nil, currentAccount, DownloadLinkExpiredError
|
||||
}
|
||||
if dl.DownloadLink == "" {
|
||||
return nil, currentAccount, EmptyDownloadLinkError
|
||||
}
|
||||
return dl, currentAccount, nil
|
||||
}
|
||||
|
||||
func (a *Accounts) SetDownloadLink(fileLink string, dl *DownloadLink) {
|
||||
if a.Current() == nil {
|
||||
return
|
||||
}
|
||||
a.Current().setLink(fileLink, dl)
|
||||
}
|
||||
|
||||
func (a *Accounts) DeleteDownloadLink(fileLink string) {
|
||||
if a.Current() == nil {
|
||||
return
|
||||
}
|
||||
a.Current().deleteLink(fileLink)
|
||||
}
|
||||
|
||||
func (a *Accounts) GetLinksCount() int {
|
||||
if a.Current() == nil {
|
||||
return 0
|
||||
}
|
||||
return a.Current().LinksCount()
|
||||
}
|
||||
|
||||
func (a *Accounts) SetDownloadLinks(links map[string]*DownloadLink) {
|
||||
if a.Current() == nil {
|
||||
return
|
||||
}
|
||||
a.Current().setLinks(links)
|
||||
}
|
||||
|
||||
func newAccount(debridName, token string, index int) *Account {
|
||||
return &Account{
|
||||
Debrid: debridName,
|
||||
Token: token,
|
||||
Order: index,
|
||||
links: make(map[string]*DownloadLink),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Account) getLink(fileLink string) (*DownloadLink, bool) {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
dl, ok := a.links[a.sliceFileLink(fileLink)]
|
||||
return dl, ok
|
||||
}
|
||||
func (a *Account) setLink(fileLink string, dl *DownloadLink) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
a.links[a.sliceFileLink(fileLink)] = dl
|
||||
}
|
||||
func (a *Account) deleteLink(fileLink string) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
delete(a.links, a.sliceFileLink(fileLink))
|
||||
}
|
||||
func (a *Account) resetDownloadLinks() {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
a.links = make(map[string]*DownloadLink)
|
||||
}
|
||||
func (a *Account) LinksCount() int {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
return len(a.links)
|
||||
}
|
||||
|
||||
func (a *Account) disable() {
|
||||
a.Disabled = true
|
||||
}
|
||||
|
||||
func (a *Account) setLinks(links map[string]*DownloadLink) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
now := time.Now()
|
||||
for _, dl := range links {
|
||||
if !dl.ExpiresAt.IsZero() && dl.ExpiresAt.Before(now) {
|
||||
// Expired, continue
|
||||
continue
|
||||
}
|
||||
a.links[a.sliceFileLink(dl.Link)] = dl
|
||||
}
|
||||
}
|
||||
|
||||
// slice download link
|
||||
func (a *Account) sliceFileLink(fileLink string) string {
|
||||
if a.Debrid != "realdebrid" {
|
||||
return fileLink
|
||||
}
|
||||
if len(fileLink) < 39 {
|
||||
return fileLink
|
||||
}
|
||||
return fileLink[0:39]
|
||||
}
|
||||
28
pkg/debrid/types/client.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
SubmitMagnet(tr *Torrent) (*Torrent, error)
|
||||
CheckStatus(tr *Torrent) (*Torrent, error)
|
||||
GetFileDownloadLinks(tr *Torrent) error
|
||||
GetDownloadLink(tr *Torrent, file *File) (*DownloadLink, error)
|
||||
DeleteTorrent(torrentId string) error
|
||||
IsAvailable(infohashes []string) map[string]bool
|
||||
GetDownloadUncached() bool
|
||||
UpdateTorrent(torrent *Torrent) error
|
||||
GetTorrent(torrentId string) (*Torrent, error)
|
||||
GetTorrents() ([]*Torrent, error)
|
||||
Name() string
|
||||
Logger() zerolog.Logger
|
||||
GetDownloadingStatus() []string
|
||||
GetDownloadLinks() (map[string]*DownloadLink, error)
|
||||
CheckLink(link string) error
|
||||
GetMountPath() string
|
||||
Accounts() *Accounts // Returns the active download account/token
|
||||
DeleteDownloadLink(linkId string) error
|
||||
GetProfile() (*Profile, error)
|
||||
GetAvailableSlots() (int, error)
|
||||
}
|
||||
30
pkg/debrid/types/error.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package types
|
||||
|
||||
type Error struct {
|
||||
Message string `json:"message"`
|
||||
Code string `json:"code"`
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
var NoActiveAccountsError = &Error{
|
||||
Message: "No active accounts",
|
||||
Code: "no_active_accounts",
|
||||
}
|
||||
|
||||
var NoDownloadLinkError = &Error{
|
||||
Message: "No download link found",
|
||||
Code: "no_download_link",
|
||||
}
|
||||
|
||||
var DownloadLinkExpiredError = &Error{
|
||||
Message: "Download link expired",
|
||||
Code: "download_link_expired",
|
||||
}
|
||||
|
||||
var EmptyDownloadLinkError = &Error{
|
||||
Message: "Download link is empty",
|
||||
Code: "empty_download_link",
|
||||
}
|
||||
144
pkg/debrid/types/torrent.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
)
|
||||
|
||||
type Torrent struct {
|
||||
Id string `json:"id"`
|
||||
InfoHash string `json:"info_hash"`
|
||||
Name string `json:"name"`
|
||||
Folder string `json:"folder"`
|
||||
Filename string `json:"filename"`
|
||||
OriginalFilename string `json:"original_filename"`
|
||||
Size int64 `json:"size"`
|
||||
Bytes int64 `json:"bytes"` // Size of only the files that are downloaded
|
||||
Magnet *utils.Magnet `json:"magnet"`
|
||||
Files map[string]File `json:"files"`
|
||||
Status string `json:"status"`
|
||||
Added string `json:"added"`
|
||||
Progress float64 `json:"progress"`
|
||||
Speed int64 `json:"speed"`
|
||||
Seeders int `json:"seeders"`
|
||||
Links []string `json:"links"`
|
||||
MountPath string `json:"mount_path"`
|
||||
DeletedFiles []string `json:"deleted_files"`
|
||||
|
||||
Debrid string `json:"debrid"`
|
||||
|
||||
Arr *arr.Arr `json:"arr"`
|
||||
|
||||
SizeDownloaded int64 `json:"-"` // This is used for local download
|
||||
DownloadUncached bool `json:"-"`
|
||||
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (t *Torrent) GetSymlinkFolder(parent string) string {
|
||||
return filepath.Join(parent, t.Arr.Name, t.Folder)
|
||||
}
|
||||
|
||||
func (t *Torrent) GetMountFolder(rClonePath string) (string, error) {
|
||||
_log := logger.Default()
|
||||
possiblePaths := []string{
|
||||
t.OriginalFilename,
|
||||
t.Filename,
|
||||
utils.RemoveExtension(t.OriginalFilename),
|
||||
}
|
||||
|
||||
for _, path := range possiblePaths {
|
||||
_p := filepath.Join(rClonePath, path)
|
||||
_log.Trace().Msgf("Checking path: %s", _p)
|
||||
_, err := os.Stat(_p)
|
||||
if !os.IsNotExist(err) {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no path found")
|
||||
}
|
||||
|
||||
func (t *Torrent) GetFile(filename string) (File, bool) {
|
||||
f, ok := t.Files[filename]
|
||||
if !ok {
|
||||
return File{}, false
|
||||
}
|
||||
return f, !f.Deleted
|
||||
}
|
||||
|
||||
func (t *Torrent) GetFiles() []File {
|
||||
files := make([]File, 0, len(t.Files))
|
||||
for _, f := range t.Files {
|
||||
if !f.Deleted {
|
||||
files = append(files, f)
|
||||
}
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
type File struct {
|
||||
TorrentId string `json:"torrent_id"`
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsRar bool `json:"is_rar"`
|
||||
ByteRange *[2]int64 `json:"byte_range,omitempty"`
|
||||
Path string `json:"path"`
|
||||
Link string `json:"link"`
|
||||
AccountId string `json:"account_id"`
|
||||
Generated time.Time `json:"generated"`
|
||||
Deleted bool `json:"deleted"`
|
||||
DownloadLink *DownloadLink `json:"-"`
|
||||
}
|
||||
|
||||
func (t *Torrent) Cleanup(remove bool) {
|
||||
if remove {
|
||||
err := os.Remove(t.Filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type IngestData struct {
|
||||
Debrid string `json:"debrid"`
|
||||
Name string `json:"name"`
|
||||
Hash string `json:"hash"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
type Profile struct {
|
||||
Name string `json:"name"`
|
||||
Id int64 `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Email string `json:"email"`
|
||||
Points int64 `json:"points"`
|
||||
Type string `json:"type"`
|
||||
Premium int `json:"premium"`
|
||||
Expiration time.Time `json:"expiration"`
|
||||
|
||||
LibrarySize int `json:"library_size"`
|
||||
BadTorrents int `json:"bad_torrents"`
|
||||
ActiveLinks int `json:"active_links"`
|
||||
}
|
||||
|
||||
type DownloadLink struct {
|
||||
Filename string `json:"filename"`
|
||||
Link string `json:"link"`
|
||||
DownloadLink string `json:"download_link"`
|
||||
Generated time.Time `json:"generated"`
|
||||
Size int64 `json:"size"`
|
||||
Id string `json:"id"`
|
||||
ExpiresAt time.Time
|
||||
}
|
||||
|
||||
func (d *DownloadLink) String() string {
|
||||
return d.DownloadLink
|
||||
}
|
||||
@@ -1,333 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/elazarl/goproxy"
|
||||
"github.com/elazarl/goproxy/ext/auth"
|
||||
"github.com/valyala/fastjson"
|
||||
"goBlack/common"
|
||||
"goBlack/pkg/debrid"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type RSS struct {
|
||||
XMLName xml.Name `xml:"rss"`
|
||||
Text string `xml:",chardata"`
|
||||
Version string `xml:"version,attr"`
|
||||
Atom string `xml:"atom,attr"`
|
||||
Torznab string `xml:"torznab,attr"`
|
||||
Channel struct {
|
||||
Text string `xml:",chardata"`
|
||||
Link struct {
|
||||
Text string `xml:",chardata"`
|
||||
Rel string `xml:"rel,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
} `xml:"link"`
|
||||
Title string `xml:"title"`
|
||||
Items []Item `xml:"item"`
|
||||
} `xml:"channel"`
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
Text string `xml:",chardata"`
|
||||
Title string `xml:"title"`
|
||||
Description string `xml:"description"`
|
||||
GUID string `xml:"guid"`
|
||||
ProwlarrIndexer struct {
|
||||
Text string `xml:",chardata"`
|
||||
ID string `xml:"id,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
} `xml:"prowlarrindexer"`
|
||||
Comments string `xml:"comments"`
|
||||
PubDate string `xml:"pubDate"`
|
||||
Size string `xml:"size"`
|
||||
Link string `xml:"link"`
|
||||
Category []string `xml:"category"`
|
||||
Enclosure struct {
|
||||
Text string `xml:",chardata"`
|
||||
URL string `xml:"url,attr"`
|
||||
Length string `xml:"length,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
} `xml:"enclosure"`
|
||||
TorznabAttrs []struct {
|
||||
Text string `xml:",chardata"`
|
||||
Name string `xml:"name,attr"`
|
||||
Value string `xml:"value,attr"`
|
||||
} `xml:"attr"`
|
||||
}
|
||||
|
||||
type Proxy struct {
|
||||
port string
|
||||
enabled bool
|
||||
debug bool
|
||||
username string
|
||||
password string
|
||||
cachedOnly bool
|
||||
debrid debrid.Service
|
||||
cache *common.Cache
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func NewProxy(config common.Config, deb debrid.Service, cache *common.Cache) *Proxy {
|
||||
cfg := config.Proxy
|
||||
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
|
||||
return &Proxy{
|
||||
port: port,
|
||||
enabled: cfg.Enabled,
|
||||
debug: cfg.Debug,
|
||||
username: cfg.Username,
|
||||
password: cfg.Password,
|
||||
cachedOnly: *cfg.CachedOnly,
|
||||
debrid: deb,
|
||||
cache: cache,
|
||||
logger: common.NewLogger("Proxy", os.Stdout),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proxy) ProcessJSONResponse(resp *http.Response) *http.Response {
|
||||
if resp == nil || resp.Body == nil {
|
||||
return resp
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return resp
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var par fastjson.Parser
|
||||
v, err := par.ParseBytes(body)
|
||||
if err != nil {
|
||||
// If it's not JSON, return the original response
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
|
||||
// Modify the JSON
|
||||
|
||||
// Serialize the modified JSON back to bytes
|
||||
modifiedBody := v.MarshalTo(nil)
|
||||
|
||||
// Set the modified body back to the response
|
||||
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
||||
resp.ContentLength = int64(len(modifiedBody))
|
||||
resp.Header.Set("Content-Length", string(rune(len(modifiedBody))))
|
||||
|
||||
return resp
|
||||
|
||||
}
|
||||
|
||||
func (p *Proxy) ProcessResponse(resp *http.Response) *http.Response {
|
||||
if resp == nil || resp.Body == nil {
|
||||
return resp
|
||||
}
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
switch contentType {
|
||||
case "application/json":
|
||||
return resp // p.ProcessJSONResponse(resp)
|
||||
case "application/xml":
|
||||
return p.ProcessXMLResponse(resp)
|
||||
case "application/rss+xml":
|
||||
return p.ProcessXMLResponse(resp)
|
||||
default:
|
||||
return resp
|
||||
}
|
||||
}
|
||||
|
||||
func getItemsHash(items []Item) map[string]string {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
idHashMap := sync.Map{} // Use sync.Map for concurrent access
|
||||
|
||||
for _, item := range items {
|
||||
wg.Add(1)
|
||||
go func(item Item) {
|
||||
defer wg.Done()
|
||||
hash := strings.ToLower(item.getHash())
|
||||
if hash != "" {
|
||||
idHashMap.Store(item.GUID, hash) // Store directly into sync.Map
|
||||
}
|
||||
}(item)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Convert sync.Map to regular map
|
||||
finalMap := make(map[string]string)
|
||||
idHashMap.Range(func(key, value interface{}) bool {
|
||||
finalMap[key.(string)] = value.(string)
|
||||
return true
|
||||
})
|
||||
|
||||
return finalMap
|
||||
}
|
||||
|
||||
func (item Item) getHash() string {
|
||||
infohash := ""
|
||||
|
||||
for _, attr := range item.TorznabAttrs {
|
||||
if attr.Name == "infohash" {
|
||||
return attr.Value
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(item.GUID, "magnet:?") {
|
||||
magnet, err := common.GetMagnetInfo(item.GUID)
|
||||
if err == nil && magnet != nil && magnet.InfoHash != "" {
|
||||
return magnet.InfoHash
|
||||
}
|
||||
}
|
||||
|
||||
magnetLink := item.Link
|
||||
|
||||
if magnetLink == "" {
|
||||
// We can't check the availability of the torrent without a magnet link or infohash
|
||||
return ""
|
||||
}
|
||||
|
||||
if strings.Contains(magnetLink, "magnet:?") {
|
||||
magnet, err := common.GetMagnetInfo(magnetLink)
|
||||
if err == nil && magnet != nil && magnet.InfoHash != "" {
|
||||
return magnet.InfoHash
|
||||
}
|
||||
}
|
||||
|
||||
//Check Description for infohash
|
||||
hash := common.ExtractInfoHash(item.Description)
|
||||
if hash == "" {
|
||||
// Check Title for infohash
|
||||
hash = common.ExtractInfoHash(item.Comments)
|
||||
}
|
||||
infohash = hash
|
||||
if infohash == "" {
|
||||
if strings.Contains(magnetLink, "http") {
|
||||
h, _ := common.GetInfohashFromURL(magnetLink)
|
||||
if h != "" {
|
||||
infohash = h
|
||||
}
|
||||
}
|
||||
}
|
||||
return infohash
|
||||
|
||||
}
|
||||
|
||||
func (p *Proxy) ProcessXMLResponse(resp *http.Response) *http.Response {
|
||||
if resp == nil || resp.Body == nil {
|
||||
return resp
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
p.logger.Println("Error reading response body:", err)
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var rss RSS
|
||||
err = xml.Unmarshal(body, &rss)
|
||||
if err != nil {
|
||||
p.logger.Printf("Error unmarshalling XML: %v", err)
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
indexer := ""
|
||||
if len(rss.Channel.Items) > 0 {
|
||||
indexer = rss.Channel.Items[0].ProwlarrIndexer.Text
|
||||
} else {
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
|
||||
// Step 4: Extract infohash or magnet URI, manipulate data
|
||||
IdsHashMap := getItemsHash(rss.Channel.Items)
|
||||
hashes := make([]string, 0)
|
||||
for _, hash := range IdsHashMap {
|
||||
if hash != "" {
|
||||
hashes = append(hashes, hash)
|
||||
}
|
||||
}
|
||||
availableHashesMap := p.debrid.IsAvailable(hashes)
|
||||
newItems := make([]Item, 0, len(rss.Channel.Items))
|
||||
|
||||
if len(hashes) > 0 {
|
||||
for _, item := range rss.Channel.Items {
|
||||
hash := IdsHashMap[item.GUID]
|
||||
if hash == "" {
|
||||
continue
|
||||
}
|
||||
isCached, exists := availableHashesMap[hash]
|
||||
if !exists || !isCached {
|
||||
continue
|
||||
}
|
||||
newItems = append(newItems, item)
|
||||
}
|
||||
}
|
||||
|
||||
if len(newItems) > 0 {
|
||||
p.logger.Printf("[%s Report]: %d/%d items are cached || Found %d infohash", indexer, len(newItems), len(rss.Channel.Items), len(hashes))
|
||||
} else {
|
||||
// This will prevent the indexer from being disabled by the arr
|
||||
p.logger.Printf("[%s Report]: No Items are cached; Return only first item with [UnCached]", indexer)
|
||||
item := rss.Channel.Items[0]
|
||||
item.Title = fmt.Sprintf("%s [UnCached]", item.Title)
|
||||
newItems = append(newItems, item)
|
||||
}
|
||||
|
||||
rss.Channel.Items = newItems
|
||||
modifiedBody, err := xml.MarshalIndent(rss, "", " ")
|
||||
if err != nil {
|
||||
p.logger.Printf("Error marshalling XML: %v", err)
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
modifiedBody = append([]byte(xml.Header), modifiedBody...)
|
||||
|
||||
// Set the modified body back to the response
|
||||
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
||||
return resp
|
||||
}
|
||||
|
||||
func UrlMatches(re *regexp.Regexp) goproxy.ReqConditionFunc {
|
||||
return func(req *http.Request, ctx *goproxy.ProxyCtx) bool {
|
||||
return re.MatchString(req.URL.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proxy) Start() {
|
||||
username, password := p.username, p.password
|
||||
proxy := goproxy.NewProxyHttpServer()
|
||||
if username != "" || password != "" {
|
||||
// Set up basic auth for proxy
|
||||
auth.ProxyBasic(proxy, "my_realm", func(user, pwd string) bool {
|
||||
return user == username && password == pwd
|
||||
})
|
||||
}
|
||||
|
||||
proxy.OnRequest(goproxy.ReqHostMatches(regexp.MustCompile("^.443$"))).HandleConnect(goproxy.AlwaysMitm)
|
||||
proxy.OnResponse(
|
||||
UrlMatches(regexp.MustCompile("^.*/api\\?t=(search|tvsearch|movie)(&.*)?$")),
|
||||
goproxy.StatusCodeIs(http.StatusOK, http.StatusAccepted)).DoFunc(
|
||||
func(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {
|
||||
return p.ProcessResponse(resp)
|
||||
})
|
||||
|
||||
proxy.Verbose = p.debug
|
||||
portFmt := fmt.Sprintf(":%s", p.port)
|
||||
p.logger.Printf("[*] Starting proxy server on %s\n", portFmt)
|
||||
p.logger.Fatal(http.ListenAndServe(fmt.Sprintf("%s", portFmt), proxy))
|
||||
}
|
||||
103
pkg/qbit/arr.go
@@ -1,103 +0,0 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"goBlack/common"
|
||||
"goBlack/pkg/debrid"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (q *QBit) RefreshArr(arr *debrid.Arr) {
|
||||
if arr.Token == "" || arr.Host == "" {
|
||||
return
|
||||
}
|
||||
url, err := common.JoinURL(arr.Host, "api/v3/command")
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
payload := map[string]string{"name": "RefreshMonitoredDownloads"}
|
||||
jsonPayload, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-Api-Key", arr.Token)
|
||||
|
||||
resp, reqErr := client.Do(req)
|
||||
if reqErr == nil {
|
||||
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
|
||||
if statusOk {
|
||||
if q.debug {
|
||||
q.logger.Printf("Refreshed monitored downloads for %s", cmp.Or(arr.Name, arr.Host))
|
||||
}
|
||||
}
|
||||
}
|
||||
if reqErr != nil {
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QBit) GetArrHistory(arr *debrid.Arr, downloadId, eventType string) *debrid.ArrHistorySchema {
|
||||
query := gourl.Values{}
|
||||
if downloadId != "" {
|
||||
query.Add("downloadId", downloadId)
|
||||
}
|
||||
query.Add("eventType", eventType)
|
||||
query.Add("pageSize", "100")
|
||||
url, _ := common.JoinURL(arr.Host, "history")
|
||||
url += "?" + query.Encode()
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var data *debrid.ArrHistorySchema
|
||||
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil
|
||||
}
|
||||
return data
|
||||
|
||||
}
|
||||
|
||||
func (q *QBit) MarkArrAsFailed(torrent *Torrent, arr *debrid.Arr) error {
|
||||
downloadId := strings.ToUpper(torrent.Hash)
|
||||
history := q.GetArrHistory(arr, downloadId, "grabbed")
|
||||
if history == nil {
|
||||
return nil
|
||||
}
|
||||
torrentId := 0
|
||||
for _, record := range history.Records {
|
||||
if strings.EqualFold(record.DownloadID, downloadId) {
|
||||
torrentId = record.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
if torrentId != 0 {
|
||||
url, err := common.JoinURL(arr.Host, "history/failed/", strconv.Itoa(torrentId))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := &http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err == nil {
|
||||
q.logger.Printf("Marked torrent: %s as failed", torrent.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
207
pkg/qbit/context.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type contextKey string
|
||||
|
||||
const (
|
||||
categoryKey contextKey = "category"
|
||||
hashesKey contextKey = "hashes"
|
||||
arrKey contextKey = "arr"
|
||||
)
|
||||
|
||||
func validateServiceURL(urlStr string) error {
|
||||
if urlStr == "" {
|
||||
return fmt.Errorf("URL cannot be empty")
|
||||
}
|
||||
|
||||
// Try parsing as full URL first
|
||||
u, err := url.Parse(urlStr)
|
||||
if err == nil && u.Scheme != "" && u.Host != "" {
|
||||
// It's a full URL, validate scheme
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return fmt.Errorf("URL scheme must be http or https")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if it's a host:port format (no scheme)
|
||||
if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") {
|
||||
// Try parsing with http:// prefix
|
||||
testURL := "http://" + urlStr
|
||||
u, err := url.Parse(testURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid host:port format: %w", err)
|
||||
}
|
||||
|
||||
if u.Host == "" {
|
||||
return fmt.Errorf("host is required in host:port format")
|
||||
}
|
||||
|
||||
// Validate port number
|
||||
if u.Port() == "" {
|
||||
return fmt.Errorf("port is required in host:port format")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid URL format: %s", urlStr)
|
||||
}
|
||||
|
||||
func getCategory(ctx context.Context) string {
|
||||
if category, ok := ctx.Value(categoryKey).(string); ok {
|
||||
return category
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getHashes(ctx context.Context) []string {
|
||||
if hashes, ok := ctx.Value(hashesKey).([]string); ok {
|
||||
return hashes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getArrFromContext(ctx context.Context) *arr.Arr {
|
||||
if a, ok := ctx.Value(arrKey).(*arr.Arr); ok {
|
||||
return a
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeAuthHeader(header string) (string, string, error) {
|
||||
encodedTokens := strings.Split(header, " ")
|
||||
if len(encodedTokens) != 2 {
|
||||
return "", "", nil
|
||||
}
|
||||
encodedToken := encodedTokens[1]
|
||||
|
||||
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
bearer := string(bytes)
|
||||
|
||||
colonIndex := strings.LastIndex(bearer, ":")
|
||||
host := bearer[:colonIndex]
|
||||
token := bearer[colonIndex+1:]
|
||||
|
||||
return host, token, nil
|
||||
}
|
||||
|
||||
func (q *QBit) categoryContext(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
category := strings.Trim(r.URL.Query().Get("category"), "")
|
||||
if category == "" {
|
||||
// Get from form
|
||||
_ = r.ParseForm()
|
||||
category = r.Form.Get("category")
|
||||
if category == "" {
|
||||
// Get from multipart form
|
||||
_ = r.ParseMultipartForm(32 << 20)
|
||||
category = r.FormValue("category")
|
||||
}
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), categoryKey, strings.TrimSpace(category))
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
// authContext creates a middleware that extracts the Arr host and token from the Authorization header
|
||||
// and adds it to the request context.
|
||||
// This is used to identify the Arr instance for the request.
|
||||
// Only a valid host and token will be added to the context/config. The rest are manual
|
||||
func (q *QBit) authContext(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
cfg := config.Get()
|
||||
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
|
||||
category := getCategory(r.Context())
|
||||
arrs := store.Get().Arr()
|
||||
// Check if arr exists
|
||||
a := arrs.Get(category)
|
||||
if a == nil {
|
||||
// Arr is not configured, create a new one
|
||||
downloadUncached := false
|
||||
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
|
||||
}
|
||||
if err == nil {
|
||||
host = strings.TrimSpace(host)
|
||||
if host != "" {
|
||||
a.Host = host
|
||||
}
|
||||
token = strings.TrimSpace(token)
|
||||
if token != "" {
|
||||
a.Token = token
|
||||
}
|
||||
}
|
||||
if cfg.NeedsAuth() {
|
||||
if a.Host == "" || a.Token == "" {
|
||||
http.Error(w, "Unauthorized: Host and token are required for authentication", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
// try to use either Arr validate, or user auth validation
|
||||
if err := a.Validate(); err != nil {
|
||||
// If this failed, try to use user auth validation
|
||||
if !verifyAuth(host, token) {
|
||||
http.Error(w, "Unauthorized: Invalid host or token", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
a.Source = "auto"
|
||||
arrs.AddOrUpdate(a)
|
||||
ctx := context.WithValue(r.Context(), arrKey, a)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func hashesContext(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_hashes := chi.URLParam(r, "hashes")
|
||||
var hashes []string
|
||||
if _hashes != "" {
|
||||
hashes = strings.Split(_hashes, "|")
|
||||
}
|
||||
if hashes == nil {
|
||||
// Get hashes from form
|
||||
_ = r.ParseForm()
|
||||
hashes = r.Form["hashes"]
|
||||
}
|
||||
for i, hash := range hashes {
|
||||
hashes[i] = strings.TrimSpace(hash)
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), hashesKey, hashes)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func verifyAuth(username, password string) bool {
|
||||
// If you're storing hashed password, use bcrypt to compare
|
||||
if username == "" {
|
||||
return false
|
||||
}
|
||||
auth := config.Get().GetAuth()
|
||||
if auth == nil {
|
||||
return false
|
||||
}
|
||||
if username != auth.Username {
|
||||
return false
|
||||
}
|
||||
err := bcrypt.CompareHashAndPassword([]byte(auth.Password), []byte(password))
|
||||
return err == nil
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"goBlack/common"
|
||||
"goBlack/pkg/debrid"
|
||||
"goBlack/pkg/qbit/downloaders"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (q *QBit) processManualFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr) {
|
||||
q.logger.Printf("Downloading %d files...", len(debridTorrent.DownloadLinks))
|
||||
torrentPath := common.RemoveExtension(debridTorrent.OriginalFilename)
|
||||
parent := common.RemoveInvalidChars(filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath))
|
||||
err := os.MkdirAll(parent, os.ModePerm)
|
||||
if err != nil {
|
||||
q.logger.Printf("Failed to create directory: %s\n", parent)
|
||||
q.MarkAsFailed(torrent)
|
||||
return
|
||||
}
|
||||
torrent.TorrentPath = torrentPath
|
||||
q.downloadFiles(debridTorrent, parent)
|
||||
q.UpdateTorrent(torrent, debridTorrent)
|
||||
q.RefreshArr(arr)
|
||||
}
|
||||
|
||||
func (q *QBit) downloadFiles(debridTorrent *debrid.Torrent, parent string) {
|
||||
var wg sync.WaitGroup
|
||||
semaphore := make(chan struct{}, 5)
|
||||
client := downloaders.GetHTTPClient()
|
||||
for _, link := range debridTorrent.DownloadLinks {
|
||||
if link.DownloadLink == "" {
|
||||
q.logger.Printf("No download link found for %s\n", link.Filename)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
semaphore <- struct{}{}
|
||||
go func(link debrid.TorrentDownloadLinks) {
|
||||
defer wg.Done()
|
||||
defer func() { <-semaphore }()
|
||||
err := downloaders.NormalHTTP(client, link.DownloadLink, filepath.Join(parent, link.Filename))
|
||||
if err != nil {
|
||||
q.logger.Printf("Error downloading %s: %v\n", link.DownloadLink, err)
|
||||
} else {
|
||||
q.logger.Printf("Downloaded %s successfully\n", link.DownloadLink)
|
||||
}
|
||||
}(link)
|
||||
}
|
||||
wg.Wait()
|
||||
q.logger.Printf("Downloaded all files for %s\n", debridTorrent.Name)
|
||||
}
|
||||
|
||||
func (q *QBit) processSymlink(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr) {
|
||||
var wg sync.WaitGroup
|
||||
files := debridTorrent.Files
|
||||
ready := make(chan debrid.TorrentFile, len(files))
|
||||
|
||||
q.logger.Printf("Checking %d files...", len(files))
|
||||
rCloneBase := q.debrid.GetMountPath()
|
||||
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
|
||||
if err != nil {
|
||||
q.MarkAsFailed(torrent)
|
||||
q.logger.Printf("Error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath) // /mnt/symlinks/{category}/MyTVShow/
|
||||
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
||||
if err != nil {
|
||||
q.logger.Printf("Failed to create directory: %s\n", torrentSymlinkPath)
|
||||
q.MarkAsFailed(torrent)
|
||||
return
|
||||
}
|
||||
torrentRclonePath := filepath.Join(rCloneBase, torrentPath)
|
||||
for _, file := range files {
|
||||
wg.Add(1)
|
||||
go checkFileLoop(&wg, torrentRclonePath, file, ready)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(ready)
|
||||
}()
|
||||
|
||||
for f := range ready {
|
||||
q.logger.Println("File is ready:", f.Path)
|
||||
q.createSymLink(torrentSymlinkPath, torrentRclonePath, f)
|
||||
}
|
||||
// Update the torrent when all files are ready
|
||||
torrent.TorrentPath = filepath.Base(torrentPath) // Quite important
|
||||
q.UpdateTorrent(torrent, debridTorrent)
|
||||
q.RefreshArr(arr)
|
||||
}
|
||||
|
||||
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
|
||||
pathChan := make(chan string)
|
||||
errChan := make(chan error)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
torrentPath := debridTorrent.GetMountFolder(rclonePath)
|
||||
if torrentPath != "" {
|
||||
pathChan <- torrentPath
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case path := <-pathChan:
|
||||
return path, nil
|
||||
case err := <-errChan:
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QBit) createSymLink(path string, torrentMountPath string, file debrid.TorrentFile) {
|
||||
|
||||
// Combine the directory and filename to form a full path
|
||||
fullPath := filepath.Join(path, file.Name) // /mnt/symlinks/{category}/MyTVShow/MyTVShow.S01E01.720p.mkv
|
||||
// Create a symbolic link if file doesn't exist
|
||||
torrentFilePath := filepath.Join(torrentMountPath, file.Name) // debridFolder/MyTVShow/MyTVShow.S01E01.720p.mkv
|
||||
err := os.Symlink(torrentFilePath, fullPath)
|
||||
if err != nil {
|
||||
q.logger.Printf("Failed to create symlink: %s: %v\n", fullPath, err)
|
||||
}
|
||||
// Check if the file exists
|
||||
if !common.FileReady(fullPath) {
|
||||
q.logger.Printf("Symlink not ready: %s\n", fullPath)
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package downloaders
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"github.com/valyala/fasthttp"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func GetFastHTTPClient() *fasthttp.Client {
|
||||
return &fasthttp.Client{
|
||||
TLSConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
StreamResponseBody: true,
|
||||
}
|
||||
}
|
||||
|
||||
func NormalFastHTTP(client *fasthttp.Client, url, filename string) error {
|
||||
req := fasthttp.AcquireRequest()
|
||||
resp := fasthttp.AcquireResponse()
|
||||
defer fasthttp.ReleaseRequest(req)
|
||||
defer fasthttp.ReleaseResponse(resp)
|
||||
|
||||
req.SetRequestURI(url)
|
||||
req.Header.SetMethod(fasthttp.MethodGet)
|
||||
|
||||
if err := client.Do(req, resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the response status code
|
||||
if resp.StatusCode() != fasthttp.StatusOK {
|
||||
return fmt.Errorf("unexpected status code: %d", resp.StatusCode())
|
||||
}
|
||||
file, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func(file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
fmt.Println("Error closing file:", err)
|
||||
return
|
||||
}
|
||||
}(file)
|
||||
bodyStream := resp.BodyStream()
|
||||
if bodyStream == nil {
|
||||
// Write to memory and then to file
|
||||
_, err := file.Write(resp.Body())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := io.Copy(file, bodyStream); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||