Compare commits
105 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1c06407900 | ||
|
|
b1a3d8b762 | ||
|
|
0e25de0e3c | ||
|
|
e741a0e32b | ||
|
|
84bd93805f | ||
|
|
fce2ce28c7 | ||
|
|
302a461efd | ||
|
|
7eb021aac1 | ||
|
|
7a989ccf2b | ||
|
|
f04d7ac86e | ||
|
|
65fb2d1e7c | ||
|
|
46beac7227 | ||
|
|
e0e71b0f7e | ||
|
|
3b463adf09 | ||
|
|
7af0de76cc | ||
|
|
108da305b3 | ||
|
|
9a7bff04ef | ||
|
|
325e6c912c | ||
|
|
6a24c372f5 | ||
|
|
32ecc08a72 | ||
|
|
4b2f601df2 | ||
|
|
bfd2596367 | ||
|
|
6f4f72d781 | ||
|
|
14341d30bc | ||
|
|
878f78468f | ||
|
|
c386495d3d | ||
|
|
1614e29f8f | ||
|
|
186a24cc4a | ||
|
|
16c825d5ba | ||
|
|
8ca3cb32f3 | ||
|
|
c5b975a721 | ||
|
|
2fa6737f31 | ||
|
|
f40cd9ba56 | ||
|
|
bca96dd858 | ||
|
|
1b9b7e203e | ||
|
|
99b4a3152d | ||
|
|
297715bf6e | ||
|
|
64995d0bf3 | ||
|
|
92504cc8e0 | ||
|
|
12f89b3047 | ||
|
|
5d7ddbd208 | ||
|
|
84b70464da | ||
|
|
092a028ad9 | ||
|
|
530de20276 | ||
|
|
e2eb11056d | ||
|
|
1a2504ff6c | ||
|
|
07d632309a | ||
|
|
d9b06fb518 | ||
|
|
d58b327957 | ||
|
|
bba90cb89a | ||
|
|
fc5c6e2869 | ||
|
|
66f4965ec8 | ||
|
|
dc16f0d8a1 | ||
|
|
0741ddf999 | ||
|
|
2ae4bd571e | ||
|
|
0b1c1af8b8 | ||
|
|
74a55149fc | ||
|
|
cfb0051b04 | ||
|
|
a986c4b5d0 | ||
|
|
3841b7751e | ||
|
|
ea73572557 | ||
|
|
7cb41a0e8b | ||
|
|
451c17cdf7 | ||
|
|
c39eebea0d | ||
|
|
03c9657945 | ||
|
|
28e5342c66 | ||
|
|
eeb3a31b05 | ||
|
|
e9d3e120f3 | ||
|
|
104df3c33c | ||
|
|
810c9d705e | ||
|
|
4ff00859a3 | ||
|
|
b77dbcc4f4 | ||
|
|
58c0aafab1 | ||
|
|
357da54083 | ||
|
|
88a7196eaf | ||
|
|
abc86a0460 | ||
|
|
dd0b7efdff | ||
|
|
7359f280b0 | ||
|
|
4eb3539347 | ||
|
|
9fb1118475 | ||
|
|
07491b43fe | ||
|
|
8f7c9a19c5 | ||
|
|
a51364d150 | ||
|
|
df2aa4e361 | ||
|
|
b51cb954f8 | ||
|
|
8bdb2e3547 | ||
|
|
2c9a076cd2 | ||
|
|
d2a77620bc | ||
|
|
4b8f1ccfb6 | ||
|
|
f118c5b794 | ||
|
|
f6c6144601 | ||
|
|
ff74e279d9 | ||
|
|
ba147ac56c | ||
|
|
01981114cb | ||
|
|
2ec0354881 | ||
|
|
329e4c60f5 | ||
|
|
d5e07dc961 | ||
|
|
f622cbfe63 | ||
|
|
9511f3e99e | ||
|
|
60c6cb32d3 | ||
|
|
d405e0d8e0 | ||
|
|
74791d6e62 | ||
|
|
511df1a296 | ||
|
|
ad1ba7b505 | ||
|
|
7f0f71ba64 |
52
.air.toml
Normal file
52
.air.toml
Normal file
@@ -0,0 +1,52 @@
|
||||
root = "."
|
||||
testdata_dir = "testdata"
|
||||
tmp_dir = "tmp"
|
||||
|
||||
[build]
|
||||
args_bin = ["--config", "data/"]
|
||||
bin = "./tmp/main"
|
||||
cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/debrid-blackhole/pkg/version.Version=0.0.0 -X github.com/sirrobot01/debrid-blackhole/pkg/version.Channel=beta\" -o ./tmp/main .'"
|
||||
delay = 1000
|
||||
exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"]
|
||||
exclude_file = []
|
||||
exclude_regex = ["_test.go"]
|
||||
exclude_unchanged = false
|
||||
follow_symlink = false
|
||||
full_bin = ""
|
||||
include_dir = []
|
||||
include_ext = ["go", "tpl", "tmpl", "html", ".json"]
|
||||
include_file = []
|
||||
kill_delay = "0s"
|
||||
log = "build-errors.log"
|
||||
poll = false
|
||||
poll_interval = 0
|
||||
post_cmd = []
|
||||
pre_cmd = []
|
||||
rerun = false
|
||||
rerun_delay = 500
|
||||
send_interrupt = false
|
||||
stop_on_error = false
|
||||
|
||||
[color]
|
||||
app = ""
|
||||
build = "yellow"
|
||||
main = "magenta"
|
||||
runner = "green"
|
||||
watcher = "cyan"
|
||||
|
||||
[log]
|
||||
main_only = false
|
||||
silent = false
|
||||
time = false
|
||||
|
||||
[misc]
|
||||
clean_on_exit = false
|
||||
|
||||
[proxy]
|
||||
app_port = 0
|
||||
enabled = false
|
||||
proxy_port = 0
|
||||
|
||||
[screen]
|
||||
clear_on_rebuild = false
|
||||
keep_scroll = true
|
||||
@@ -5,4 +5,7 @@ docker-compose.yml
|
||||
.DS_Store
|
||||
**/.idea/
|
||||
*.magnet
|
||||
**.torrent
|
||||
**.torrent
|
||||
torrents.json
|
||||
**/dist/
|
||||
*.json
|
||||
|
||||
2
.github/FUNDING.yml
vendored
Normal file
2
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
github: sirrobot01
|
||||
buy_me_a_coffee: sirrobot01
|
||||
85
.github/workflows/beta-docker.yml
vendored
Normal file
85
.github/workflows/beta-docker.yml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: Beta Docker Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- beta
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Calculate beta version
|
||||
id: calculate_version
|
||||
run: |
|
||||
LATEST_TAG=$(git tag | grep -v 'beta' | sort -V | tail -n1)
|
||||
echo "Found latest tag: ${LATEST_TAG}"
|
||||
|
||||
IFS='.' read -r -a VERSION_PARTS <<< "$LATEST_TAG"
|
||||
MAJOR="${VERSION_PARTS[0]}"
|
||||
MINOR="${VERSION_PARTS[1]}"
|
||||
PATCH="${VERSION_PARTS[2]}"
|
||||
|
||||
NEW_PATCH=$((PATCH + 1))
|
||||
BETA_VERSION="${MAJOR}.${MINOR}.${NEW_PATCH}"
|
||||
|
||||
echo "Calculated beta version: ${BETA_VERSION}"
|
||||
echo "beta_version=${BETA_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push beta Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
push: true
|
||||
tags: |
|
||||
cy01/blackhole:beta
|
||||
ghcr.io/${{ github.repository_owner }}/decypharr:beta
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
build-args: |
|
||||
VERSION=${{ env.beta_version }}
|
||||
CHANNEL=beta
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
0
.github/workflows/docker-publish.yml
vendored
0
.github/workflows/docker-publish.yml
vendored
33
.github/workflows/goreleaser.yml
vendored
Normal file
33
.github/workflows/goreleaser.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: GoReleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
RELEASE_CHANNEL: stable
|
||||
77
.github/workflows/release-docker.yml
vendored
Normal file
77
.github/workflows/release-docker.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: Release Docker Build
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Get tag name
|
||||
id: get_tag
|
||||
run: |
|
||||
TAG_NAME=${GITHUB_REF#refs/tags/}
|
||||
echo "tag_name=${TAG_NAME}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push release Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
push: true
|
||||
tags: |
|
||||
cy01/blackhole:latest
|
||||
cy01/blackhole:${{ env.tag_name }}
|
||||
ghcr.io/${{ github.repository_owner }}/decypharr:latest
|
||||
ghcr.io/${{ github.repository_owner }}/decypharr:${{ env.tag_name }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
build-args: |
|
||||
VERSION=${{ env.tag_name }}
|
||||
CHANNEL=stable
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -9,3 +9,7 @@ docker-compose.yml
|
||||
*.log
|
||||
*.log.*
|
||||
dist/
|
||||
tmp/**
|
||||
torrents.json
|
||||
logs/**
|
||||
auth.json
|
||||
|
||||
@@ -1,16 +1,7 @@
|
||||
# This is an example .goreleaser.yml file with some sensible defaults.
|
||||
# Make sure to check the documentation at https://goreleaser.com
|
||||
|
||||
# The lines below are called `modelines`. See `:help modeline`
|
||||
# Feel free to remove those if you don't want/need to use them.
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
|
||||
version: 2
|
||||
version: 1
|
||||
|
||||
before:
|
||||
hooks:
|
||||
# You may remove this if you don't use go modules.
|
||||
- go mod tidy
|
||||
|
||||
builds:
|
||||
@@ -20,6 +11,15 @@ builds:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X github.com/sirrobot01/debrid-blackhole/pkg/version.Version={{.Version}}
|
||||
- -X github.com/sirrobot01/debrid-blackhole/pkg/version.Channel={{.Env.RELEASE_CHANNEL}}
|
||||
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
@@ -42,3 +42,7 @@ changelog:
|
||||
exclude:
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
|
||||
# Environment setup
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
|
||||
147
CHANGELOG.md
Normal file
147
CHANGELOG.md
Normal file
@@ -0,0 +1,147 @@
|
||||
#### 0.1.0
|
||||
- Initial Release
|
||||
- Added Real Debrid Support
|
||||
- Added Arrs Support
|
||||
- Added Proxy Support
|
||||
- Added Basic Authentication for Proxy
|
||||
- Added Rate Limiting for Debrid Providers
|
||||
|
||||
#### 0.1.1
|
||||
- Added support for "No Blackhole" for Arrs
|
||||
- Added support for "Cached Only" for Proxy
|
||||
- Bug Fixes
|
||||
|
||||
#### 0.1.2
|
||||
- Bug fixes
|
||||
- Code cleanup
|
||||
- Get available hashes at once
|
||||
|
||||
#### 0.1.3
|
||||
|
||||
- Searching for infohashes in the xml description/summary/comments
|
||||
- Added local cache support
|
||||
- Added max cache size
|
||||
- Rewrite blackhole.go
|
||||
- Bug fixes
|
||||
- Fixed indexer getting disabled
|
||||
- Fixed blackhole not working
|
||||
|
||||
#### 0.1.4
|
||||
|
||||
- Rewrote Report log
|
||||
- Fix YTS, 1337x not grabbing infohash
|
||||
- Fix Torrent symlink bug
|
||||
|
||||
|
||||
#### 0.2.0-beta
|
||||
|
||||
- Switch to QbitTorrent API instead of Blackhole
|
||||
- Rewrote the whole codebase
|
||||
|
||||
|
||||
### 0.2.0
|
||||
- Implement 0.2.0-beta changes
|
||||
- Removed Blackhole
|
||||
- Added QbitTorrent API
|
||||
- Cleaned up the code
|
||||
|
||||
#### 0.2.1
|
||||
|
||||
- Fix Uncached torrents not being downloaded/downloaded
|
||||
- Minor bug fixed
|
||||
- Fix Race condition in the cache and file system
|
||||
|
||||
#### 0.2.2
|
||||
- Fix name mismatch in the cache
|
||||
- Fix directory mapping with mounts
|
||||
- Add Support for refreshing the *arrs
|
||||
|
||||
#### 0.2.3
|
||||
|
||||
- Delete uncached items from RD
|
||||
- Fail if the torrent is not cached(optional)
|
||||
- Fix cache not being updated
|
||||
|
||||
#### 0.2.4
|
||||
|
||||
- Add file download support(Sequential Download)
|
||||
- Fix http handler error
|
||||
- Fix *arrs map failing concurrently
|
||||
- Fix cache not being updated
|
||||
|
||||
#### 0.2.5
|
||||
- Fix ContentPath not being set prior
|
||||
- Rewrote Readme
|
||||
- Cleaned up the code
|
||||
|
||||
#### 0.2.6
|
||||
- Delete torrent for empty matched files
|
||||
- Update Readme
|
||||
|
||||
#### 0.2.7
|
||||
|
||||
- Add support for multiple debrid providers
|
||||
- Add Torbox support
|
||||
- Add support for configurable debrid cache checks
|
||||
- Add support for configurable debrid download uncached torrents
|
||||
|
||||
#### 0.3.0
|
||||
|
||||
- Add UI for adding torrents
|
||||
- Refraction of the code
|
||||
- -Fix Torbox bug
|
||||
- Update CI/CD
|
||||
- Update Readme
|
||||
|
||||
#### 0.3.1
|
||||
|
||||
- Add DebridLink Support
|
||||
- Refactor error handling
|
||||
|
||||
#### 0.3.2
|
||||
|
||||
- Fix DebridLink not downloading
|
||||
- Fix Torbox with uncached torrents
|
||||
- Add new /internal/cached endpoint to check if an hash is cached
|
||||
- implement per-debrid local cache
|
||||
- Fix file check for torbox
|
||||
- Other minor bug fixes
|
||||
|
||||
#### 0.3.3
|
||||
|
||||
- Add AllDebrid Support
|
||||
- Fix Torbox not downloading uncached torrents
|
||||
- Fix Rar files being downloaded
|
||||
|
||||
#### 0.4.0
|
||||
|
||||
- Add support for multiple debrid providers
|
||||
- A full-fledged UI for adding torrents, repairing files, viewing config and managing torrents
|
||||
- Fix issues with Alldebrid
|
||||
- Fix file transversal bug
|
||||
- Fix files with no parent directory
|
||||
- Logging
|
||||
- Add a more robust logging system
|
||||
- Add logging to a file
|
||||
- Add logging to the UI
|
||||
- Qbittorrent
|
||||
- Add support for tags(creating, deleting, listing)
|
||||
- Add support for categories(creating, deleting, listing)
|
||||
- Fix issues with arr sending torrents using a different content type.
|
||||
|
||||
#### 0.4.1
|
||||
|
||||
- Adds optional UI authentication
|
||||
- Downloaded Torrents persist on restart
|
||||
- Fixes
|
||||
- Fix Alldebrid struggling to find the correct file
|
||||
- Minor bug fixes or speed-gains
|
||||
- A new cleanup worker to clean up ARR queues
|
||||
|
||||
|
||||
#### 0.4.2
|
||||
|
||||
- Hotfixes
|
||||
- Fix saving torrents error
|
||||
- Fix bugs with the UI
|
||||
- Speed improvements
|
||||
71
Dockerfile
71
Dockerfile
@@ -1,24 +1,65 @@
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22 as builder
|
||||
# Stage 1: Build binaries
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23-alpine as builder
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG VERSION=0.0.0
|
||||
ARG CHANNEL=dev
|
||||
|
||||
# Set destination for COPY
|
||||
WORKDIR /app
|
||||
|
||||
# Download Go modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
go mod download -x
|
||||
|
||||
# Copy the source code. Note the slash at the end, as explained in
|
||||
# https://docs.docker.com/reference/dockerfile/#copy
|
||||
ADD . .
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=$(echo $TARGETPLATFORM | cut -d '/' -f1) GOARCH=$(echo $TARGETPLATFORM | cut -d '/' -f2) go build -o /blackhole
|
||||
# Build main binary
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH \
|
||||
go build -trimpath \
|
||||
-ldflags="-w -s -X github.com/sirrobot01/debrid-blackhole/pkg/version.Version=${VERSION} -X github.com/sirrobot01/debrid-blackhole/pkg/version.Channel=${CHANNEL}" \
|
||||
-o /blackhole
|
||||
|
||||
FROM scratch
|
||||
COPY --from=builder /blackhole /blackhole
|
||||
# Build healthcheck (optimized)
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH \
|
||||
go build -trimpath -ldflags="-w -s" \
|
||||
-o /healthcheck cmd/healthcheck/main.go
|
||||
|
||||
# Run
|
||||
CMD ["/blackhole", "--config", "/app/config.json"]
|
||||
# Stage 2: Create directory structure
|
||||
FROM alpine:3.19 as dirsetup
|
||||
RUN mkdir -p /app/logs && \
|
||||
chmod 777 /app/logs && \
|
||||
touch /app/logs/decypharr.log && \
|
||||
chmod 666 /app/logs/decypharr.log
|
||||
|
||||
# Stage 3: Final image
|
||||
FROM gcr.io/distroless/static-debian12:nonroot
|
||||
|
||||
LABEL version = "${VERSION}-${CHANNEL}"
|
||||
|
||||
LABEL org.opencontainers.image.source = "https://github.com/sirrobot01/debrid-blackhole"
|
||||
LABEL org.opencontainers.image.title = "debrid-blackhole"
|
||||
LABEL org.opencontainers.image.authors = "sirrobot01"
|
||||
LABEL org.opencontainers.image.documentation = "https://github.com/sirrobot01/debrid-blackhole/blob/main/README.md"
|
||||
|
||||
# Copy binaries
|
||||
COPY --from=builder --chown=nonroot:nonroot /blackhole /usr/bin/blackhole
|
||||
COPY --from=builder --chown=nonroot:nonroot /healthcheck /usr/bin/healthcheck
|
||||
|
||||
# Copy pre-made directory structure
|
||||
COPY --from=dirsetup --chown=nonroot:nonroot /app /app
|
||||
|
||||
|
||||
# Metadata
|
||||
ENV LOG_PATH=/app/logs
|
||||
EXPOSE 8181 8282
|
||||
VOLUME ["/app"]
|
||||
USER nonroot:nonroot
|
||||
|
||||
HEALTHCHECK CMD ["/usr/bin/healthcheck"]
|
||||
|
||||
CMD ["/usr/bin/blackhole", "--config", "/app"]
|
||||
233
README.md
233
README.md
@@ -1,33 +1,76 @@
|
||||
### GoBlackHole(with Debrid Proxy Support)
|
||||
### DecyphArr(Qbittorent, but with Debrid Support)
|
||||
|
||||
This is a Golang implementation go Torrent Blackhole with a **Real Debrid Proxy Support**.
|
||||

|
||||
|
||||
#### Uses
|
||||
- Torrent Blackhole that supports the Arrs.
|
||||
This is an implementation of QbitTorrent with a **Multiple Debrid service support**. Written in Go.
|
||||
|
||||
### Table of Contents
|
||||
|
||||
- [Features](#features)
|
||||
- [Supported Debrid Providers](#supported-debrid-providers)
|
||||
- [Installation](#installation)
|
||||
- [Docker Compose](#docker-compose)
|
||||
- [Binary](#binary)
|
||||
- [Usage](#usage)
|
||||
- [Connecting to Sonarr/Radarr](#connecting-to-sonarrradarr)
|
||||
- [Sample Config](#sample-config)
|
||||
- [Config Notes](#config-notes)
|
||||
- [Log Level](#log-level)
|
||||
- [Max Cache Size](#max-cache-size)
|
||||
- [Debrid Config](#debrid-config)
|
||||
- [Proxy Config](#proxy-config)
|
||||
- [Qbittorrent Config](#qbittorrent-config)
|
||||
- [Arrs Config](#arrs-config)
|
||||
- [Proxy](#proxy)
|
||||
- [Repair Worker](#repair-worker)
|
||||
- [Changelog](#changelog)
|
||||
- [TODO](#todo)
|
||||
|
||||
### Features
|
||||
|
||||
- Mock Qbittorent API that supports the Arrs(Sonarr, Radarr, etc)
|
||||
- A Full-fledged UI for managing torrents
|
||||
- Proxy support for the Arrs
|
||||
- Real Debrid Support
|
||||
- Torbox Support
|
||||
- Debrid Link Support
|
||||
- Multi-Debrid Providers support
|
||||
- Repair Worker for missing files (**BETA**)
|
||||
|
||||
The proxy is useful in filtering out un-cached Real Debrid torrents
|
||||
The proxy is useful for filtering out un-cached Debrid torrents
|
||||
|
||||
### Supported Debrid Providers
|
||||
- [Real Debrid](https://real-debrid.com)
|
||||
- [Torbox](https://torbox.app)
|
||||
- [Debrid Link](https://debrid-link.com)
|
||||
- [All Debrid](https://alldebrid.com)
|
||||
|
||||
|
||||
#### Installation
|
||||
### Installation
|
||||
|
||||
##### Docker Compose
|
||||
```yaml
|
||||
version: '3.7'
|
||||
services:
|
||||
blackhole:
|
||||
image: cy01/blackhole:latest
|
||||
image: cy01/blackhole:latest # or cy01/blackhole:beta
|
||||
container_name: blackhole
|
||||
ports:
|
||||
- "8282:8282" # qBittorrent
|
||||
- "8181:8181" # Proxy
|
||||
user: "1000:1000"
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
- ~/plex/media:/media
|
||||
- ~/plex/media/symlinks/:/media/symlinks/
|
||||
- ~/plex/configs/blackhole/config.json:/app/config.json # Config file, see below
|
||||
- /mnt/:/mnt
|
||||
- ~/plex/configs/blackhole/:/app # config.json must be in this directory
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- UMASK=002
|
||||
- QBIT_PORT=8282 # qBittorrent Port. This is optional. You can set this in the config file
|
||||
- PORT=8181 # Proxy Port. This is optional. You can set this in the config file
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- rclone # If you are using rclone with docker
|
||||
|
||||
```
|
||||
|
||||
@@ -35,66 +78,150 @@ services:
|
||||
Download the binary from the releases page and run it with the config file.
|
||||
|
||||
```bash
|
||||
./blackhole --config /path/to/config.json
|
||||
./blackhole --config /app
|
||||
```
|
||||
|
||||
#### Config
|
||||
### Usage
|
||||
- The UI is available at `http://localhost:8282`
|
||||
- Setup the config.json file. Scroll down for the sample config file
|
||||
- Setup docker compose/ binary with the config file
|
||||
- Start the service
|
||||
- Connect to Sonarr/Radarr/Lidarr
|
||||
|
||||
#### Connecting to Sonarr/Radarr
|
||||
|
||||
- Sonarr/Radarr
|
||||
- Settings -> Download Client -> Add Client -> qBittorrent
|
||||
- Host: `localhost` # or the IP of the server
|
||||
- Port: `8282` # or the port set in the config file/ docker-compose env
|
||||
- Username: `http://sonarr:8989` # Your arr host with http/https
|
||||
- Password: `sonarr_token` # Your arr token
|
||||
- Category: e.g `sonarr`, `radarr`
|
||||
- Use SSL -> `No`
|
||||
- Sequential Download -> `No`|`Yes` (If you want to download the torrents locally instead of symlink)
|
||||
- Click Test
|
||||
- Click Save
|
||||
|
||||
#### Basic Sample Config
|
||||
|
||||
This is the default config file. You can create a `config.json` file in the root directory of the project or mount it to /app in the docker-compose file.
|
||||
```json
|
||||
{
|
||||
"debrid": {
|
||||
"name": "realdebrid",
|
||||
"host": "https://api.real-debrid.com/rest/1.0",
|
||||
"api_key": "realdebrid_api_key",
|
||||
"folder": "data/realdebrid/torrents/",
|
||||
"rate_limit": "250/minute"
|
||||
},
|
||||
"arrs": [
|
||||
"debrids": [
|
||||
{
|
||||
"watch_folder": "data/sonarr/",
|
||||
"completed_folder": "data/sonarr/completed/",
|
||||
"token": "sonarr_api_key",
|
||||
"url": "http://localhost:8787"
|
||||
},
|
||||
{
|
||||
"watch_folder": "data/radarr/",
|
||||
"completed_folder": "data/radarr/completed/",
|
||||
"token": "radarr_api_key",
|
||||
"url": "http://localhost:7878"
|
||||
},
|
||||
{
|
||||
"watch_folder": "data/radarr4k/",
|
||||
"completed_folder": "data/radarr4k/completed/",
|
||||
"token": "radarr4k_api_key",
|
||||
"url": "http://localhost:7878"
|
||||
"name": "realdebrid",
|
||||
"host": "https://api.real-debrid.com/rest/1.0",
|
||||
"api_key": "realdebrid_key",
|
||||
"folder": "/mnt/remote/realdebrid/__all__/"
|
||||
}
|
||||
],
|
||||
"proxy": {
|
||||
"enabled": true,
|
||||
"port": "8181",
|
||||
"debug": false,
|
||||
"port": "8100",
|
||||
"username": "username",
|
||||
"password": "password"
|
||||
}
|
||||
},
|
||||
"qbittorrent": {
|
||||
"port": "8282",
|
||||
"download_folder": "/mnt/symlinks/",
|
||||
"categories": ["sonarr", "radarr"]
|
||||
},
|
||||
"repair": {
|
||||
"enabled": false,
|
||||
"interval": "12h",
|
||||
"run_on_start": false
|
||||
},
|
||||
"use_auth": false
|
||||
}
|
||||
```
|
||||
|
||||
#### Proxy
|
||||
Full config are [here](doc/config.full.json)
|
||||
|
||||
The proxy is useful in filtering out un-cached Real Debrid torrents.
|
||||
<details>
|
||||
|
||||
<summary>
|
||||
Click Here for the full config notes
|
||||
</summary>
|
||||
|
||||
- The `log_level` key is used to set the log level of the application. The default value is `info`. log level can be set to `debug`, `info`, `warn`, `error`
|
||||
- The `max_cache_size` key is used to set the maximum number of infohashes that can be stored in the availability cache. This is used to prevent round trip to the debrid provider when using the proxy/Qbittorrent. The default value is `1000`
|
||||
- The `allowed_file_types` key is an array of allowed file types that can be downloaded. By default, all movie, tv show and music file types are allowed
|
||||
- The `use_auth` is used to enable basic authentication for the UI. The default value is `false`
|
||||
|
||||
##### Debrid Config
|
||||
- The `debrids` key is an array of debrid providers
|
||||
- The `name` key is the name of the debrid provider
|
||||
- The `host` key is the API endpoint of the debrid provider
|
||||
- The `api_key` key is the API key of the debrid provider
|
||||
- The `folder` key is the folder where your debrid folder is mounted(webdav, rclone, zurg etc). e.g `data/realdebrid/torrents/`, `/media/remote/alldebrid/magnets/`
|
||||
- The `rate_limit` key is the rate limit of the debrid provider(null by default)
|
||||
- The `download_uncached` bool key is used to download uncached torrents(disabled by default)
|
||||
- The `check_cached` bool key is used to check if the torrent is cached(disabled by default)
|
||||
|
||||
##### Repair Config (**BETA**)
|
||||
The `repair` key is used to enable the repair worker
|
||||
- The `enabled` key is used to enable the repair worker
|
||||
- The `interval` key is the interval in either minutes, seconds, hours, days. Use any of this format, e.g 12:00, 5:00, 1h, 1d, 1m, 1s.
|
||||
- The `run_on_start` key is used to run the repair worker on start
|
||||
- The `zurg_url` is the url of the zurg server. Typically `http://localhost:9999` or `http://zurg:9999`
|
||||
- The `skip_deletion`: true if you don't want to delete the files
|
||||
|
||||
##### Proxy Config
|
||||
- The `enabled` key is used to enable the proxy
|
||||
- The `port` key is the port the proxy will listen on
|
||||
- The `log_level` key is used to set the log level of the proxy. The default value is `info`
|
||||
- The `username` and `password` keys are used for basic authentication
|
||||
- The `cached_only` means only cached torrents will be returned
|
||||
|
||||
|
||||
##### Qbittorrent Config
|
||||
- The `port` key is the port the qBittorrent will listen on
|
||||
- The `download_folder` is the folder where the torrents will be downloaded. e.g `/media/symlinks/`
|
||||
- The `categories` key is used to filter out torrents based on the category. e.g `sonarr`, `radarr`
|
||||
- The `refresh_interval` key is used to set the interval in minutes to refresh the Arrs Monitored Downloads(it's in seconds). The default value is `5` seconds
|
||||
|
||||
|
||||
##### Arrs Config
|
||||
This is an array of Arrs(Sonarr, Radarr, etc) that will be used to download the torrents. This is not required if you already set up the Qbittorrent in the Arrs with the host, token.
|
||||
This is particularly useful if you want to use the Repair tool without using Qbittorent
|
||||
- The `name` key is the name of the Arr/ Category
|
||||
- The `host` key is the host of the Arr
|
||||
- The `token` key is the API token of the Arr
|
||||
- THe `cleanup` key is used to cleanup your arr queues. This is usually for removing dangling queues(downloads that all the files have been import, sometimes, some incomplete season packs)
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
### Proxy
|
||||
|
||||
**Note**: Proxy has stopped working for Real Debrid, Debrid Link, and All Debrid. It still works for Torbox. This is due to the changes in the API of the Debrid Providers.
|
||||
|
||||
The proxy is useful in filtering out un-cached Debrid torrents.
|
||||
The proxy is a simple HTTP proxy that requires basic authentication. The proxy can be enabled by setting the `proxy.enabled` to `true` in the config file.
|
||||
The proxy listens on the port `8181` by default. The username and password can be set in the config file.
|
||||
|
||||
Setting Up Proxy in Arr
|
||||
### Repair Worker
|
||||
|
||||
The repair worker is a simple worker that checks for missing files in the Arrs(Sonarr, Radarr, etc). It's particularly useful for files either deleted by the Debrid provider or files with bad symlinks.
|
||||
|
||||
**Note**: If you're using zurg, set the `zurg_url` under repair config. This will speed up the repair process, exponentially.
|
||||
|
||||
- Search for broken symlinks/files
|
||||
- Search for missing files
|
||||
- Search for deleted/unreadable files
|
||||
|
||||
|
||||
### Changelog
|
||||
|
||||
- View the [CHANGELOG.md](CHANGELOG.md) for the latest changes
|
||||
|
||||
- Sonarr/Radarr
|
||||
- Settings -> General -> Use Proxy
|
||||
- Hostname: `localhost` # or the IP of the server
|
||||
- Port: `8181` # or the port set in the config file
|
||||
- Username: `username` # or the username set in the config file
|
||||
- Password: `password` # or the password set in the config file
|
||||
- Bypass Proxy for Local Addresses -> `No`
|
||||
|
||||
### TODO
|
||||
- [ ] Add more Debrid Providers
|
||||
- [ ] Add more Proxy features
|
||||
- [ ] Add more tests
|
||||
- [x] A proper name!!!!
|
||||
- [x] Debrid
|
||||
- [x] Add more Debrid Providers
|
||||
|
||||
- [x] Qbittorrent
|
||||
- [x] Add more Qbittorrent features
|
||||
- [x] Persist torrents on restart/server crash
|
||||
- [ ] Add tests
|
||||
|
||||
169
cmd/blackhole.go
169
cmd/blackhole.go
@@ -1,169 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"goBlack/common"
|
||||
"goBlack/debrid"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func fileReady(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return !os.IsNotExist(err) // Returns true if the file exists
|
||||
}
|
||||
|
||||
func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.TorrentFile, ready chan<- debrid.TorrentFile) {
|
||||
defer wg.Done()
|
||||
ticker := time.NewTicker(1 * time.Second) // Check every second
|
||||
defer ticker.Stop()
|
||||
path := filepath.Join(dir, file.Path)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if fileReady(path) {
|
||||
ready <- file
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ProcessFiles(arr *debrid.Arr, torrent *debrid.Torrent) {
|
||||
var wg sync.WaitGroup
|
||||
files := torrent.Files
|
||||
ready := make(chan debrid.TorrentFile, len(files))
|
||||
|
||||
log.Println("Checking files...")
|
||||
|
||||
for _, file := range files {
|
||||
wg.Add(1)
|
||||
go checkFileLoop(&wg, arr.Debrid.Folder, file, ready)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(ready)
|
||||
}()
|
||||
|
||||
for r := range ready {
|
||||
log.Println("File is ready:", r.Name)
|
||||
CreateSymLink(arr, torrent)
|
||||
|
||||
}
|
||||
go torrent.Cleanup(true)
|
||||
fmt.Printf("%s downloaded", torrent.Name)
|
||||
}
|
||||
|
||||
func CreateSymLink(config *debrid.Arr, torrent *debrid.Torrent) {
|
||||
path := filepath.Join(config.CompletedFolder, torrent.Folder)
|
||||
err := os.MkdirAll(path, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Printf("Failed to create directory: %s\n", path)
|
||||
}
|
||||
for _, file := range torrent.Files {
|
||||
// Combine the directory and filename to form a full path
|
||||
fullPath := filepath.Join(config.CompletedFolder, file.Path)
|
||||
|
||||
// Create a symbolic link if file doesn't exist
|
||||
_ = os.Symlink(filepath.Join(config.Debrid.Folder, file.Path), fullPath)
|
||||
}
|
||||
}
|
||||
|
||||
func watchFiles(watcher *fsnotify.Watcher, events map[string]time.Time) {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||
if filepath.Ext(event.Name) == ".torrent" || filepath.Ext(event.Name) == ".magnet" {
|
||||
events[event.Name] = time.Now()
|
||||
}
|
||||
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("ERROR:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func processFilesDebounced(arr *debrid.Arr, db debrid.Service, events map[string]time.Time, debouncePeriod time.Duration) {
|
||||
ticker := time.NewTicker(1 * time.Second) // Check every second
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
for file, lastEventTime := range events {
|
||||
if time.Since(lastEventTime) >= debouncePeriod {
|
||||
log.Printf("Torrent file detected: %s", file)
|
||||
// Process the torrent file
|
||||
torrent, err := db.Process(arr, file)
|
||||
if err != nil && torrent != nil {
|
||||
// remove torrent file
|
||||
torrent.Cleanup(true)
|
||||
_ = torrent.MarkAsFailed()
|
||||
log.Printf("Error processing torrent file: %s", err)
|
||||
}
|
||||
if err == nil && torrent != nil && len(torrent.Files) > 0 {
|
||||
go ProcessFiles(arr, torrent)
|
||||
}
|
||||
delete(events, file) // remove file from channel
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func StartArr(conf *debrid.Arr, db debrid.Service) {
|
||||
log.Printf("Watching: %s", conf.WatchFolder)
|
||||
w, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func(w *fsnotify.Watcher) {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}(w)
|
||||
events := make(map[string]time.Time)
|
||||
|
||||
go watchFiles(w, events)
|
||||
if err = w.Add(conf.WatchFolder); err != nil {
|
||||
log.Println("Error Watching folder:", err)
|
||||
return
|
||||
}
|
||||
|
||||
processFilesDebounced(conf, db, events, 1*time.Second)
|
||||
}
|
||||
|
||||
func StartBlackhole(config *common.Config, deb debrid.Service) {
|
||||
var wg sync.WaitGroup
|
||||
for _, conf := range config.Arrs {
|
||||
wg.Add(1)
|
||||
defer wg.Done()
|
||||
headers := map[string]string{
|
||||
"X-Api-Key": conf.Token,
|
||||
}
|
||||
client := common.NewRLHTTPClient(nil, headers)
|
||||
|
||||
arr := &debrid.Arr{
|
||||
Debrid: config.Debrid,
|
||||
WatchFolder: conf.WatchFolder,
|
||||
CompletedFolder: conf.CompletedFolder,
|
||||
Token: conf.Token,
|
||||
URL: conf.URL,
|
||||
Client: client,
|
||||
}
|
||||
go StartArr(arr, deb)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
88
cmd/decypharr/main.go
Normal file
88
cmd/decypharr/main.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package decypharr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/proxy"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/qbit"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/server"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/version"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/web"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/worker"
|
||||
"log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func Start(ctx context.Context) error {
|
||||
cfg := config.GetConfig()
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error)
|
||||
|
||||
_log := logger.GetLogger(cfg.LogLevel)
|
||||
|
||||
_log.Info().Msgf("Version: %s", version.GetInfo().String())
|
||||
_log.Debug().Msgf("Config Loaded: %s", cfg.JsonFile())
|
||||
_log.Info().Msgf("Default Log Level: %s", cfg.LogLevel)
|
||||
|
||||
svc := service.New()
|
||||
_qbit := qbit.New()
|
||||
srv := server.New()
|
||||
webRoutes := web.New(_qbit).Routes()
|
||||
qbitRoutes := _qbit.Routes()
|
||||
|
||||
// Register routes
|
||||
srv.Mount("/", webRoutes)
|
||||
srv.Mount("/api/v2", qbitRoutes)
|
||||
|
||||
if cfg.Proxy.Enabled {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := proxy.NewProxy().Start(ctx); err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := srv.Start(ctx); err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := worker.Start(ctx); err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
if cfg.Repair.Enabled {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := svc.Repair.Start(ctx); err != nil {
|
||||
log.Printf("Error during repair: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
// Wait for context cancellation or completion or error
|
||||
select {
|
||||
case err := <-errChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
22
cmd/healthcheck/main.go
Normal file
22
cmd/healthcheck/main.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"net/http"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
port := cmp.Or(os.Getenv("QBIT_PORT"), "8282")
|
||||
resp, err := http.Get("http://localhost:" + port + "/api/v2/app/version")
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
18
cmd/main.go
18
cmd/main.go
@@ -1,18 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"goBlack/common"
|
||||
"goBlack/debrid"
|
||||
"log"
|
||||
)
|
||||
|
||||
func Start(config *common.Config) {
|
||||
|
||||
log.Print("[*] BlackHole running")
|
||||
deb := debrid.NewDebrid(config.Debrid)
|
||||
if config.Proxy.Enabled {
|
||||
go StartProxy(config, deb)
|
||||
}
|
||||
StartBlackhole(config, deb)
|
||||
|
||||
}
|
||||
274
cmd/proxy.go
274
cmd/proxy.go
@@ -1,274 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/elazarl/goproxy"
|
||||
"github.com/elazarl/goproxy/ext/auth"
|
||||
"github.com/valyala/fastjson"
|
||||
"goBlack/common"
|
||||
"goBlack/debrid"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type RSS struct {
|
||||
XMLName xml.Name `xml:"rss"`
|
||||
Version string `xml:"version,attr"`
|
||||
Channel Channel `xml:"channel"`
|
||||
}
|
||||
|
||||
type Channel struct {
|
||||
XMLName xml.Name `xml:"channel"`
|
||||
Title string `xml:"title"`
|
||||
AtomLink AtomLink `xml:"link"`
|
||||
Items []Item `xml:"item"`
|
||||
}
|
||||
|
||||
type AtomLink struct {
|
||||
XMLName xml.Name `xml:"link"`
|
||||
Rel string `xml:"rel,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
XMLName xml.Name `xml:"item"`
|
||||
Title string `xml:"title"`
|
||||
Description string `xml:"description"`
|
||||
GUID string `xml:"guid"`
|
||||
ProwlarrIndexer ProwlarrIndexer `xml:"prowlarrindexer"`
|
||||
Comments string `xml:"comments"`
|
||||
PubDate string `xml:"pubDate"`
|
||||
Size int64 `xml:"size"`
|
||||
Link string `xml:"link"`
|
||||
Categories []string `xml:"category"`
|
||||
Enclosure Enclosure `xml:"enclosure"`
|
||||
TorznabAttrs []TorznabAttr `xml:"torznab:attr"`
|
||||
}
|
||||
|
||||
type ProwlarrIndexer struct {
|
||||
ID string `xml:"id,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type Enclosure struct {
|
||||
URL string `xml:"url,attr"`
|
||||
Length int64 `xml:"length,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
}
|
||||
|
||||
type TorznabAttr struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Value string `xml:"value,attr"`
|
||||
}
|
||||
|
||||
type SafeItems struct {
|
||||
mu sync.Mutex
|
||||
Items []Item
|
||||
}
|
||||
|
||||
func (s *SafeItems) Add(item Item) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.Items = append(s.Items, item)
|
||||
}
|
||||
|
||||
func (s *SafeItems) Get() []Item {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.Items
|
||||
}
|
||||
|
||||
func ProcessJSONResponse(resp *http.Response, deb debrid.Service) *http.Response {
|
||||
if resp == nil || resp.Body == nil {
|
||||
return resp
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Println("Error reading response body:", err)
|
||||
return resp
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var p fastjson.Parser
|
||||
v, err := p.ParseBytes(body)
|
||||
if err != nil {
|
||||
// If it's not JSON, return the original response
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
|
||||
// Modify the JSON
|
||||
|
||||
// Serialize the modified JSON back to bytes
|
||||
modifiedBody := v.MarshalTo(nil)
|
||||
|
||||
// Set the modified body back to the response
|
||||
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
||||
resp.ContentLength = int64(len(modifiedBody))
|
||||
resp.Header.Set("Content-Length", string(rune(len(modifiedBody))))
|
||||
|
||||
return resp
|
||||
|
||||
}
|
||||
|
||||
func ProcessResponse(resp *http.Response, deb debrid.Service) *http.Response {
|
||||
if resp == nil || resp.Body == nil {
|
||||
return resp
|
||||
}
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
switch contentType {
|
||||
case "application/json":
|
||||
return ProcessJSONResponse(resp, deb)
|
||||
case "application/xml":
|
||||
return ProcessXMLResponse(resp, deb)
|
||||
case "application/rss+xml":
|
||||
return ProcessXMLResponse(resp, deb)
|
||||
default:
|
||||
return resp
|
||||
}
|
||||
}
|
||||
|
||||
func XMLItemIsCached(item Item, deb debrid.Service) bool {
|
||||
magnetLink := ""
|
||||
infohash := ""
|
||||
|
||||
// Extract magnet link from the link or comments
|
||||
if strings.Contains(item.Link, "magnet:?") {
|
||||
magnetLink = item.Link
|
||||
} else if strings.Contains(item.GUID, "magnet:?") {
|
||||
magnetLink = item.GUID
|
||||
}
|
||||
|
||||
// Extract infohash from <torznab:attr> elements
|
||||
for _, attr := range item.TorznabAttrs {
|
||||
if attr.Name == "infohash" {
|
||||
infohash = attr.Value
|
||||
}
|
||||
}
|
||||
if magnetLink == "" && infohash == "" {
|
||||
// We can't check the availability of the torrent without a magnet link or infohash
|
||||
return false
|
||||
}
|
||||
var magnet *common.Magnet
|
||||
var err error
|
||||
|
||||
if infohash == "" {
|
||||
magnet, err = common.GetMagnetInfo(magnetLink)
|
||||
if err != nil {
|
||||
log.Println("Error getting magnet info:", err)
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
magnet = &common.Magnet{
|
||||
InfoHash: infohash,
|
||||
Name: item.Title,
|
||||
Link: magnetLink,
|
||||
}
|
||||
}
|
||||
if magnet == nil {
|
||||
log.Println("Error getting magnet info")
|
||||
return false
|
||||
}
|
||||
return deb.IsAvailable(magnet)
|
||||
|
||||
}
|
||||
|
||||
func ProcessXMLResponse(resp *http.Response, deb debrid.Service) *http.Response {
|
||||
if resp == nil || resp.Body == nil {
|
||||
return resp
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Println("Error reading response body:", err)
|
||||
return resp
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var rss RSS
|
||||
err = xml.Unmarshal(body, &rss)
|
||||
if err != nil {
|
||||
log.Fatalf("Error unmarshalling XML: %v", err)
|
||||
return resp
|
||||
}
|
||||
newItems := &SafeItems{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Step 4: Extract infohash or magnet URI, manipulate data
|
||||
for _, item := range rss.Channel.Items {
|
||||
wg.Add(1)
|
||||
go func(item Item) {
|
||||
defer wg.Done()
|
||||
if XMLItemIsCached(item, deb) {
|
||||
newItems.Add(item)
|
||||
}
|
||||
}(item)
|
||||
}
|
||||
wg.Wait()
|
||||
rss.Channel.Items = newItems.Get()
|
||||
|
||||
// rss.Channel.Items = newItems
|
||||
modifiedBody, err := xml.MarshalIndent(rss, "", " ")
|
||||
if err != nil {
|
||||
log.Printf("Error marshalling XML: %v", err)
|
||||
return resp
|
||||
}
|
||||
modifiedBody = append([]byte(xml.Header), modifiedBody...)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Error marshalling XML: %v", err)
|
||||
return resp
|
||||
}
|
||||
|
||||
// Set the modified body back to the response
|
||||
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
||||
resp.ContentLength = int64(len(modifiedBody))
|
||||
resp.Header.Set("Content-Length", string(rune(len(modifiedBody))))
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func UrlMatches(re *regexp.Regexp) goproxy.ReqConditionFunc {
|
||||
return func(req *http.Request, ctx *goproxy.ProxyCtx) bool {
|
||||
return re.MatchString(req.URL.String())
|
||||
}
|
||||
}
|
||||
|
||||
func StartProxy(config *common.Config, deb debrid.Service) {
|
||||
username, password := config.Proxy.Username, config.Proxy.Password
|
||||
cfg := config.Proxy
|
||||
proxy := goproxy.NewProxyHttpServer()
|
||||
if username != "" || password != "" {
|
||||
// Set up basic auth for proxy
|
||||
auth.ProxyBasic(proxy, "my_realm", func(user, pwd string) bool {
|
||||
return user == username && password == pwd
|
||||
})
|
||||
}
|
||||
|
||||
proxy.OnRequest(goproxy.ReqHostMatches(regexp.MustCompile("^.443$"))).HandleConnect(goproxy.AlwaysMitm)
|
||||
proxy.OnResponse(UrlMatches(regexp.MustCompile("^.*/api\\?t=(search|tvsearch|movie)(&.*)?$"))).DoFunc(
|
||||
func(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {
|
||||
return ProcessResponse(resp, deb)
|
||||
})
|
||||
|
||||
port := cmp.Or(cfg.Port, "8181")
|
||||
proxy.Verbose = cfg.Debug
|
||||
port = fmt.Sprintf(":%s", port)
|
||||
log.Printf("Starting proxy server on %s\n", port)
|
||||
log.Fatal(http.ListenAndServe(fmt.Sprintf("%s", port), proxy))
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type DebridConfig struct {
|
||||
Name string `json:"name"`
|
||||
Host string `json:"host"`
|
||||
APIKey string `json:"api_key"`
|
||||
Folder string `json:"folder"`
|
||||
DownloadUncached bool `json:"download_uncached"`
|
||||
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
DbDSN string `json:"db_dsn"`
|
||||
Debrid DebridConfig `json:"debrid"`
|
||||
Arrs []struct {
|
||||
WatchFolder string `json:"watch_folder"`
|
||||
CompletedFolder string `json:"completed_folder"`
|
||||
Token string `json:"token"`
|
||||
URL string `json:"url"`
|
||||
} `json:"arrs"`
|
||||
Proxy struct {
|
||||
Port string `json:"port"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Debug bool `json:"debug"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
}
|
||||
|
||||
func LoadConfig(path string) (*Config, error) {
|
||||
// Load the config file
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func(file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}(file)
|
||||
|
||||
decoder := json.NewDecoder(file)
|
||||
config := &Config{}
|
||||
err = decoder.Decode(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
VIDEOMATCH = "(?i)(\\.)(YUV|WMV|WEBM|VOB|VIV|SVI|ROQ|RMVB|RM|OGV|OGG|NSV|MXF|MTS|M2TS|TS|MPG|MPEG|M2V|MP2|MPE|MPV|MP4|M4P|M4V|MOV|QT|MNG|MKV|FLV|DRC|AVI|ASF|AMV)$"
|
||||
SUBMATCH = "(?i)(\\.)(SRT|SUB|SBV|ASS|VTT|TTML|DFXP|STL|SCC|CAP|SMI|TTXT|TDS|USF|JSS|SSA|PSB|RT|LRC|SSB)$"
|
||||
)
|
||||
|
||||
func RegexMatch(regex string, value string) bool {
|
||||
re := regexp.MustCompile(regex)
|
||||
return re.MatchString(value)
|
||||
}
|
||||
|
||||
func RemoveExtension(value string) string {
|
||||
re := regexp.MustCompile(VIDEOMATCH)
|
||||
|
||||
// Find the last index of the matched extension
|
||||
loc := re.FindStringIndex(value)
|
||||
if loc != nil {
|
||||
return value[:loc[0]]
|
||||
} else {
|
||||
return value
|
||||
}
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Magnet struct {
|
||||
Name string
|
||||
InfoHash string
|
||||
Size int64
|
||||
Link string
|
||||
}
|
||||
|
||||
func OpenMagnetFile(filePath string) string {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
log.Println("Error opening file:", err)
|
||||
return ""
|
||||
}
|
||||
defer func(file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}(file) // Ensure the file is closed after the function ends
|
||||
|
||||
// Create a scanner to read the file line by line
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
magnetLink := scanner.Text()
|
||||
if magnetLink != "" {
|
||||
return magnetLink
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any errors during scanning
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Println("Error reading file:", err)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetMagnetInfo(magnetLink string) (*Magnet, error) {
|
||||
if magnetLink == "" {
|
||||
return nil, fmt.Errorf("error getting magnet from file")
|
||||
}
|
||||
magnetURI, err := url.Parse(magnetLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing magnet link")
|
||||
}
|
||||
query := magnetURI.Query()
|
||||
xt := query.Get("xt")
|
||||
dn := query.Get("dn")
|
||||
|
||||
// Extract BTIH
|
||||
parts := strings.Split(xt, ":")
|
||||
btih := ""
|
||||
if len(parts) > 2 {
|
||||
btih = parts[2]
|
||||
}
|
||||
magnet := &Magnet{
|
||||
InfoHash: btih,
|
||||
Name: dn,
|
||||
Size: 0,
|
||||
Link: magnetLink,
|
||||
}
|
||||
return magnet, nil
|
||||
}
|
||||
|
||||
func RandomString(length int) string {
|
||||
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
b := make([]byte, length)
|
||||
for i := range b {
|
||||
b[i] = charset[rand.Intn(len(charset))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package debrid
|
||||
|
||||
import (
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"goBlack/common"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type Service interface {
|
||||
SubmitMagnet(torrent *Torrent) (*Torrent, error)
|
||||
CheckStatus(torrent *Torrent) (*Torrent, error)
|
||||
DownloadLink(torrent *Torrent) error
|
||||
Process(arr *Arr, magnet string) (*Torrent, error)
|
||||
IsAvailable(magnet *common.Magnet) bool
|
||||
}
|
||||
|
||||
type Debrid struct {
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
DownloadUncached bool
|
||||
}
|
||||
|
||||
func NewDebrid(dc common.DebridConfig) Service {
|
||||
switch dc.Name {
|
||||
case "realdebrid":
|
||||
return NewRealDebrid(dc)
|
||||
default:
|
||||
return NewRealDebrid(dc)
|
||||
}
|
||||
}
|
||||
|
||||
func GetTorrentInfo(filePath string) (*Torrent, error) {
|
||||
// Open and read the .torrent file
|
||||
if filepath.Ext(filePath) == ".torrent" {
|
||||
return getTorrentInfo(filePath)
|
||||
} else {
|
||||
return torrentFromMagnetFile(filePath)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func torrentFromMagnetFile(filePath string) (*Torrent, error) {
|
||||
magnetLink := common.OpenMagnetFile(filePath)
|
||||
magnet, err := common.GetMagnetInfo(magnetLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
torrent := &Torrent{
|
||||
InfoHash: magnet.InfoHash,
|
||||
Name: magnet.Name,
|
||||
Size: magnet.Size,
|
||||
Magnet: magnet,
|
||||
Filename: filePath,
|
||||
}
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func getTorrentInfo(filePath string) (*Torrent, error) {
|
||||
mi, err := metainfo.LoadFromFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := mi.HashInfoBytes()
|
||||
infoHash := hash.HexString()
|
||||
info, err := mi.UnmarshalInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
magnet := &common.Magnet{
|
||||
InfoHash: infoHash,
|
||||
Name: info.Name,
|
||||
Size: info.Length,
|
||||
Link: mi.Magnet(&hash, &info).String(),
|
||||
}
|
||||
torrent := &Torrent{
|
||||
InfoHash: infoHash,
|
||||
Name: info.Name,
|
||||
Size: info.Length,
|
||||
Magnet: magnet,
|
||||
Filename: filePath,
|
||||
}
|
||||
return torrent, nil
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
package debrid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"goBlack/common"
|
||||
"goBlack/debrid/structs"
|
||||
"log"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type RealDebrid struct {
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
DownloadUncached bool
|
||||
client *common.RLHTTPClient
|
||||
}
|
||||
|
||||
func (r *RealDebrid) Process(arr *Arr, magnet string) (*Torrent, error) {
|
||||
torrent, err := GetTorrentInfo(magnet)
|
||||
torrent.Arr = arr
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
log.Printf("Torrent Name: %s", torrent.Name)
|
||||
if !r.DownloadUncached {
|
||||
if !r.IsAvailable(torrent.Magnet) {
|
||||
return torrent, fmt.Errorf("torrent is not cached")
|
||||
}
|
||||
}
|
||||
|
||||
return r.CheckStatus(torrent)
|
||||
}
|
||||
|
||||
func (r *RealDebrid) IsAvailable(magnet *common.Magnet) bool {
|
||||
url := fmt.Sprintf("%s/torrents/instantAvailability/%s", r.Host, magnet.InfoHash)
|
||||
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var data structs.RealDebridAvailabilityResponse
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
hosters, exists := data[strings.ToLower(magnet.InfoHash)]
|
||||
if !exists || len(hosters) < 1 {
|
||||
log.Printf("Torrent: %s not cached", magnet.Name)
|
||||
return false
|
||||
}
|
||||
log.Printf("Torrent: %s is cached", magnet.Name)
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *RealDebrid) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents/addMagnet", r.Host)
|
||||
payload := gourl.Values{
|
||||
"magnet": {torrent.Magnet.Link},
|
||||
}
|
||||
var data structs.RealDebridAddMagnetSchema
|
||||
resp, err := r.client.MakeRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(resp, &data)
|
||||
log.Printf("Torrent: %s added with id: %s\n", torrent.Name, data.Id)
|
||||
torrent.Id = data.Id
|
||||
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) CheckStatus(torrent *Torrent) (*Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, torrent.Id)
|
||||
for {
|
||||
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
var data structs.RealDebridTorrentInfo
|
||||
err = json.Unmarshal(resp, &data)
|
||||
status := data.Status
|
||||
torrent.Folder = common.RemoveExtension(data.OriginalFilename)
|
||||
if status == "error" || status == "dead" || status == "magnet_error" {
|
||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||
} else if status == "waiting_files_selection" {
|
||||
files := make([]TorrentFile, 0)
|
||||
for _, f := range data.Files {
|
||||
name := f.Path
|
||||
if !common.RegexMatch(common.VIDEOMATCH, name) && !common.RegexMatch(common.SUBMATCH, name) {
|
||||
continue
|
||||
}
|
||||
fileId := f.ID
|
||||
file := &TorrentFile{
|
||||
Name: name,
|
||||
Path: filepath.Join(torrent.Folder, name),
|
||||
Size: int64(f.Bytes),
|
||||
Id: strconv.Itoa(fileId),
|
||||
}
|
||||
files = append(files, *file)
|
||||
}
|
||||
torrent.Files = files
|
||||
if len(files) == 0 {
|
||||
return torrent, fmt.Errorf("no video files found")
|
||||
}
|
||||
filesId := make([]string, 0)
|
||||
for _, f := range files {
|
||||
filesId = append(filesId, f.Id)
|
||||
}
|
||||
p := gourl.Values{
|
||||
"files": {strings.Join(filesId, ",")},
|
||||
}
|
||||
payload := strings.NewReader(p.Encode())
|
||||
_, err = r.client.MakeRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, torrent.Id), payload)
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
} else if status == "downloaded" {
|
||||
log.Printf("Torrent: %s downloaded\n", torrent.Name)
|
||||
err = r.DownloadLink(torrent)
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
break
|
||||
} else if status == "downloading" {
|
||||
return torrent, fmt.Errorf("torrent is uncached")
|
||||
}
|
||||
|
||||
}
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) DownloadLink(torrent *Torrent) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewRealDebrid(dc common.DebridConfig) *RealDebrid {
|
||||
rl := common.ParseRateLimit(dc.RateLimit)
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
}
|
||||
client := common.NewRLHTTPClient(rl, headers)
|
||||
return &RealDebrid{
|
||||
Host: dc.Host,
|
||||
APIKey: dc.APIKey,
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
package structs
|
||||
|
||||
type RealDebridAvailabilityResponse map[string]Hosters
|
||||
|
||||
type Hosters map[string][]FileIDs
|
||||
|
||||
type FileIDs map[string]FileVariant
|
||||
|
||||
type FileVariant struct {
|
||||
Filename string `json:"filename"`
|
||||
Filesize int `json:"filesize"`
|
||||
}
|
||||
|
||||
type RealDebridAddMagnetSchema struct {
|
||||
Id string `json:"id"`
|
||||
Uri string `json:"uri"`
|
||||
}
|
||||
|
||||
type RealDebridTorrentInfo struct {
|
||||
ID string `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
OriginalFilename string `json:"original_filename"`
|
||||
Hash string `json:"hash"`
|
||||
Bytes int `json:"bytes"`
|
||||
OriginalBytes int `json:"original_bytes"`
|
||||
Host string `json:"host"`
|
||||
Split int `json:"split"`
|
||||
Progress int `json:"progress"`
|
||||
Status string `json:"status"`
|
||||
Added string `json:"added"`
|
||||
Files []struct {
|
||||
ID int `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Bytes int `json:"bytes"`
|
||||
Selected int `json:"selected"`
|
||||
} `json:"files"`
|
||||
Links []string `json:"links"`
|
||||
Ended string `json:"ended,omitempty"`
|
||||
Speed int `json:"speed,omitempty"`
|
||||
Seeders int `json:"seeders,omitempty"`
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
package debrid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"goBlack/common"
|
||||
"log"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Arr struct {
|
||||
WatchFolder string `json:"watch_folder"`
|
||||
CompletedFolder string `json:"completed_folder"`
|
||||
Debrid common.DebridConfig `json:"debrid"`
|
||||
Token string `json:"token"`
|
||||
URL string `json:"url"`
|
||||
Client *common.RLHTTPClient
|
||||
}
|
||||
|
||||
type ArrHistorySchema struct {
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
SortKey string `json:"sortKey"`
|
||||
SortDirection string `json:"sortDirection"`
|
||||
TotalRecords int `json:"totalRecords"`
|
||||
Records []struct {
|
||||
ID int `json:"id"`
|
||||
DownloadID string `json:"downloadId"`
|
||||
} `json:"records"`
|
||||
}
|
||||
|
||||
type Torrent struct {
|
||||
Id string `json:"id"`
|
||||
InfoHash string `json:"info_hash"`
|
||||
Name string `json:"name"`
|
||||
Folder string `json:"folder"`
|
||||
Filename string `json:"filename"`
|
||||
Size int64 `json:"size"`
|
||||
Magnet *common.Magnet `json:"magnet"`
|
||||
Files []TorrentFile `json:"files"`
|
||||
Status string `json:"status"`
|
||||
|
||||
Arr *Arr
|
||||
}
|
||||
|
||||
type TorrentFile struct {
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
func (arr *Arr) GetHeaders() map[string]string {
|
||||
return map[string]string{
|
||||
"X-Api-Key": arr.Token,
|
||||
}
|
||||
}
|
||||
|
||||
func (arr *Arr) GetURL() string {
|
||||
url, _ := gourl.JoinPath(arr.URL, "api/v3/")
|
||||
return url
|
||||
}
|
||||
|
||||
func getEventId(eventType string) int {
|
||||
switch eventType {
|
||||
case "grabbed":
|
||||
return 1
|
||||
case "seriesFolderDownloaded":
|
||||
return 2
|
||||
case "DownloadFolderImported":
|
||||
return 3
|
||||
case "DownloadFailed":
|
||||
return 4
|
||||
case "DownloadIgnored":
|
||||
return 7
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (arr *Arr) GetHistory(downloadId, eventType string) *ArrHistorySchema {
|
||||
eventId := getEventId(eventType)
|
||||
query := gourl.Values{}
|
||||
if downloadId != "" {
|
||||
query.Add("downloadId", downloadId)
|
||||
}
|
||||
if eventId != 0 {
|
||||
query.Add("eventId", strconv.Itoa(eventId))
|
||||
|
||||
}
|
||||
query.Add("pageSize", "100")
|
||||
url := arr.GetURL() + "history/" + "?" + query.Encode()
|
||||
resp, err := arr.Client.MakeRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var data *ArrHistorySchema
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return data
|
||||
|
||||
}
|
||||
|
||||
func (t *Torrent) Cleanup(remove bool) {
|
||||
if remove {
|
||||
err := os.Remove(t.Filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Torrent) MarkAsFailed() error {
|
||||
downloadId := strings.ToUpper(t.Magnet.InfoHash)
|
||||
history := t.Arr.GetHistory(downloadId, "grabbed")
|
||||
if history == nil {
|
||||
return nil
|
||||
}
|
||||
torrentId := 0
|
||||
for _, record := range history.Records {
|
||||
if strings.EqualFold(record.DownloadID, downloadId) {
|
||||
torrentId = record.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
if torrentId != 0 {
|
||||
url, err := gourl.JoinPath(t.Arr.GetURL(), "history/failed/", strconv.Itoa(torrentId))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = t.Arr.Client.MakeRequest(http.MethodPost, url, nil)
|
||||
if err == nil {
|
||||
log.Printf("Marked torrent: %s as failed", t.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
82
doc/config.full.json
Normal file
82
doc/config.full.json
Normal file
@@ -0,0 +1,82 @@
|
||||
{
|
||||
"debrids": [
|
||||
{
|
||||
"name": "torbox",
|
||||
"host": "https://api.torbox.app/v1",
|
||||
"api_key": "torbox_api_key",
|
||||
"folder": "/mnt/remote/torbox/torrents/",
|
||||
"rate_limit": "250/minute",
|
||||
"download_uncached": false,
|
||||
"check_cached": true
|
||||
},
|
||||
{
|
||||
"name": "realdebrid",
|
||||
"host": "https://api.real-debrid.com/rest/1.0",
|
||||
"api_key": "realdebrid_key",
|
||||
"folder": "/mnt/remote/realdebrid/__all__/",
|
||||
"rate_limit": "250/minute",
|
||||
"download_uncached": false,
|
||||
"check_cached": false
|
||||
},
|
||||
{
|
||||
"name": "debridlink",
|
||||
"host": "https://debrid-link.com/api/v2",
|
||||
"api_key": "debridlink_key",
|
||||
"folder": "/mnt/remote/debridlink/torrents/",
|
||||
"rate_limit": "250/minute",
|
||||
"download_uncached": false,
|
||||
"check_cached": false
|
||||
},
|
||||
{
|
||||
"name": "alldebrid",
|
||||
"host": "http://api.alldebrid.com/v4.1",
|
||||
"api_key": "alldebrid_key",
|
||||
"folder": "/mnt/remote/alldebrid/magnet/",
|
||||
"rate_limit": "600/minute",
|
||||
"download_uncached": false,
|
||||
"check_cached": false
|
||||
}
|
||||
],
|
||||
"proxy": {
|
||||
"enabled": true,
|
||||
"port": "8100",
|
||||
"log_level": "info",
|
||||
"username": "username",
|
||||
"password": "password",
|
||||
"cached_only": true
|
||||
},
|
||||
"max_cache_size": 1000,
|
||||
"qbittorrent": {
|
||||
"port": "8282",
|
||||
"download_folder": "/mnt/symlinks/",
|
||||
"categories": ["sonarr", "radarr"],
|
||||
"refresh_interval": 5,
|
||||
"log_level": "info"
|
||||
},
|
||||
"arrs": [
|
||||
{
|
||||
"name": "sonarr",
|
||||
"host": "http://host:8989",
|
||||
"token": "arr_key",
|
||||
"cleanup": false
|
||||
},
|
||||
{
|
||||
"name": "radarr",
|
||||
"host": "http://host:7878",
|
||||
"token": "arr_key",
|
||||
"cleanup": false
|
||||
}
|
||||
],
|
||||
"repair": {
|
||||
"enabled": false,
|
||||
"interval": "12h",
|
||||
"run_on_start": false,
|
||||
"zurg_url": "http://zurg:9999",
|
||||
"skip_deletion": false
|
||||
},
|
||||
"log_level": "info",
|
||||
"min_file_size": "",
|
||||
"max_file_size": "",
|
||||
"allowed_file_types": [],
|
||||
"use_auth": false
|
||||
}
|
||||
BIN
doc/download.png
Normal file
BIN
doc/download.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 185 KiB |
BIN
doc/main.png
Normal file
BIN
doc/main.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 156 KiB |
29
go.mod
29
go.mod
@@ -1,23 +1,38 @@
|
||||
module goBlack
|
||||
module github.com/sirrobot01/debrid-blackhole
|
||||
|
||||
go 1.22
|
||||
go 1.23
|
||||
|
||||
toolchain go1.23.2
|
||||
|
||||
require (
|
||||
github.com/anacrolix/torrent v1.55.0
|
||||
github.com/cavaliergopher/grab/v3 v3.0.1
|
||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380
|
||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/go-chi/chi/v5 v5.1.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/sessions v1.4.0
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/valyala/fastjson v1.6.4
|
||||
golang.org/x/time v0.6.0
|
||||
golang.org/x/crypto v0.33.0
|
||||
golang.org/x/net v0.33.0
|
||||
golang.org/x/time v0.8.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/anacrolix/missinggo v1.3.0 // indirect
|
||||
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
golang.org/x/net v0.27.0 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
)
|
||||
|
||||
62
go.sum
62
go.sum
@@ -44,11 +44,15 @@ github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2w
|
||||
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og=
|
||||
github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4=
|
||||
github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
@@ -62,19 +66,20 @@ github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
@@ -100,12 +105,20 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
|
||||
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
|
||||
github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ=
|
||||
github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
|
||||
@@ -128,6 +141,12 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -145,8 +164,9 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
@@ -165,8 +185,11 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
|
||||
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@@ -180,8 +203,8 @@ github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
@@ -194,6 +217,8 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
|
||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -209,8 +234,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -229,14 +254,17 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -263,6 +291,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
||||
90
internal/cache/cache.go
vendored
Normal file
90
internal/cache/cache.go
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Cache struct {
|
||||
data map[string]struct{}
|
||||
order []string
|
||||
maxItems int
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func New(maxItems int) *Cache {
|
||||
if maxItems <= 0 {
|
||||
maxItems = 1000
|
||||
}
|
||||
return &Cache{
|
||||
data: make(map[string]struct{}, maxItems),
|
||||
order: make([]string, 0, maxItems),
|
||||
maxItems: maxItems,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) Add(value string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if _, exists := c.data[value]; !exists {
|
||||
if len(c.order) >= c.maxItems {
|
||||
delete(c.data, c.order[0])
|
||||
c.order = c.order[1:]
|
||||
}
|
||||
c.data[value] = struct{}{}
|
||||
c.order = append(c.order, value)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) AddMultiple(values map[string]bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
for value, exists := range values {
|
||||
if !exists {
|
||||
if _, exists := c.data[value]; !exists {
|
||||
if len(c.order) >= c.maxItems {
|
||||
delete(c.data, c.order[0])
|
||||
c.order = c.order[1:]
|
||||
}
|
||||
c.data[value] = struct{}{}
|
||||
c.order = append(c.order, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) Get(index int) (string, bool) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
if index < 0 || index >= len(c.order) {
|
||||
return "", false
|
||||
}
|
||||
return c.order[index], true
|
||||
}
|
||||
|
||||
func (c *Cache) GetMultiple(values []string) map[string]bool {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
result := make(map[string]bool, len(values))
|
||||
for _, value := range values {
|
||||
if _, exists := c.data[value]; exists {
|
||||
result[value] = true
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *Cache) Exists(value string) bool {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
_, exists := c.data[value]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (c *Cache) Len() int {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return len(c.order)
|
||||
}
|
||||
287
internal/config/config.go
Normal file
287
internal/config/config.go
Normal file
@@ -0,0 +1,287 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
instance *Config
|
||||
once sync.Once
|
||||
configPath string
|
||||
)
|
||||
|
||||
type Debrid struct {
|
||||
Name string `json:"name"`
|
||||
Host string `json:"host"`
|
||||
APIKey string `json:"api_key"`
|
||||
Folder string `json:"folder"`
|
||||
DownloadUncached bool `json:"download_uncached"`
|
||||
CheckCached bool `json:"check_cached"`
|
||||
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
|
||||
}
|
||||
|
||||
type Proxy struct {
|
||||
Port string `json:"port"`
|
||||
Enabled bool `json:"enabled"`
|
||||
LogLevel string `json:"log_level"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
CachedOnly bool `json:"cached_only"`
|
||||
}
|
||||
|
||||
type QBitTorrent struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Port string `json:"port"`
|
||||
LogLevel string `json:"log_level"`
|
||||
DownloadFolder string `json:"download_folder"`
|
||||
Categories []string `json:"categories"`
|
||||
RefreshInterval int `json:"refresh_interval"`
|
||||
}
|
||||
|
||||
type Arr struct {
|
||||
Name string `json:"name"`
|
||||
Host string `json:"host"`
|
||||
Token string `json:"token"`
|
||||
Cleanup bool `json:"cleanup"`
|
||||
}
|
||||
|
||||
type Repair struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Interval string `json:"interval"`
|
||||
RunOnStart bool `json:"run_on_start"`
|
||||
ZurgURL string `json:"zurg_url"`
|
||||
SkipDeletion bool `json:"skip_deletion"`
|
||||
}
|
||||
|
||||
type Auth struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
LogLevel string `json:"log_level"`
|
||||
Debrid Debrid `json:"debrid"`
|
||||
Debrids []Debrid `json:"debrids"`
|
||||
Proxy Proxy `json:"proxy"`
|
||||
MaxCacheSize int `json:"max_cache_size"`
|
||||
QBitTorrent QBitTorrent `json:"qbittorrent"`
|
||||
Arrs []Arr `json:"arrs"`
|
||||
Repair Repair `json:"repair"`
|
||||
AllowedExt []string `json:"allowed_file_types"`
|
||||
MinFileSize string `json:"min_file_size"` // Minimum file size to download, 10MB, 1GB, etc
|
||||
MaxFileSize string `json:"max_file_size"` // Maximum file size to download (0 means no limit)
|
||||
Path string `json:"-"` // Path to save the config file
|
||||
UseAuth bool `json:"use_auth"`
|
||||
Auth *Auth `json:"-"`
|
||||
}
|
||||
|
||||
func (c *Config) JsonFile() string {
|
||||
return filepath.Join(c.Path, "config.json")
|
||||
}
|
||||
func (c *Config) AuthFile() string {
|
||||
return filepath.Join(c.Path, "auth.json")
|
||||
}
|
||||
|
||||
func (c *Config) loadConfig() error {
|
||||
// Load the config file
|
||||
if configPath == "" {
|
||||
return fmt.Errorf("config path not set")
|
||||
}
|
||||
c.Path = configPath
|
||||
file, err := os.ReadFile(c.JsonFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(file, &c); err != nil {
|
||||
return fmt.Errorf("error unmarshaling config: %w", err)
|
||||
}
|
||||
|
||||
if c.Debrid.Name != "" {
|
||||
c.Debrids = append(c.Debrids, c.Debrid)
|
||||
}
|
||||
|
||||
if len(c.AllowedExt) == 0 {
|
||||
c.AllowedExt = getDefaultExtensions()
|
||||
}
|
||||
|
||||
// Load the auth file
|
||||
c.Auth = c.GetAuth()
|
||||
|
||||
//Validate the config
|
||||
if err := validateConfig(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDebrids(debrids []Debrid) error {
|
||||
if len(debrids) == 0 {
|
||||
return errors.New("no debrids configured")
|
||||
}
|
||||
|
||||
errChan := make(chan error, len(debrids))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, debrid := range debrids {
|
||||
// Basic field validation
|
||||
if debrid.Host == "" {
|
||||
return errors.New("debrid host is required")
|
||||
}
|
||||
if debrid.APIKey == "" {
|
||||
return errors.New("debrid api key is required")
|
||||
}
|
||||
if debrid.Folder == "" {
|
||||
return errors.New("debrid folder is required")
|
||||
}
|
||||
|
||||
// Check folder existence concurrently
|
||||
//wg.Add(1)
|
||||
//go func(folder string) {
|
||||
// defer wg.Done()
|
||||
// if _, err := os.Stat(folder); os.IsNotExist(err) {
|
||||
// errChan <- fmt.Errorf("debrid folder does not exist: %s", folder)
|
||||
// }
|
||||
//}(debrid.Folder)
|
||||
}
|
||||
|
||||
// Wait for all checks to complete
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
// Return first error if any
|
||||
if err := <-errChan; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateQbitTorrent(config *QBitTorrent) error {
|
||||
if config.DownloadFolder == "" {
|
||||
return errors.New("qbittorent download folder is required")
|
||||
}
|
||||
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
|
||||
return errors.New("qbittorent download folder does not exist")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateConfig(config *Config) error {
|
||||
// Run validations concurrently
|
||||
errChan := make(chan error, 2)
|
||||
|
||||
go func() {
|
||||
errChan <- validateDebrids(config.Debrids)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
errChan <- validateQbitTorrent(&config.QBitTorrent)
|
||||
}()
|
||||
|
||||
// Check for errors
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := <-errChan; err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetConfigPath(path string) error {
|
||||
configPath = path
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetConfig() *Config {
|
||||
once.Do(func() {
|
||||
instance = &Config{} // Initialize instance first
|
||||
if err := instance.loadConfig(); err != nil {
|
||||
_, err := fmt.Fprintf(os.Stderr, "configuration Error: %v\n", err)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
})
|
||||
return instance
|
||||
}
|
||||
|
||||
func (c *Config) GetMinFileSize() int64 {
|
||||
// 0 means no limit
|
||||
if c.MinFileSize == "" {
|
||||
return 0
|
||||
}
|
||||
s, err := parseSize(c.MinFileSize)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *Config) GetMaxFileSize() int64 {
|
||||
// 0 means no limit
|
||||
if c.MaxFileSize == "" {
|
||||
return 0
|
||||
}
|
||||
s, err := parseSize(c.MaxFileSize)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *Config) IsSizeAllowed(size int64) bool {
|
||||
if size == 0 {
|
||||
return true // Maybe the debrid hasn't reported the size yet
|
||||
}
|
||||
if c.GetMinFileSize() > 0 && size < c.GetMinFileSize() {
|
||||
return false
|
||||
}
|
||||
if c.GetMaxFileSize() > 0 && size > c.GetMaxFileSize() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Config) GetAuth() *Auth {
|
||||
if !c.UseAuth {
|
||||
return nil
|
||||
}
|
||||
if c.Auth == nil {
|
||||
c.Auth = &Auth{}
|
||||
if _, err := os.Stat(c.AuthFile()); err == nil {
|
||||
file, err := os.ReadFile(c.AuthFile())
|
||||
if err == nil {
|
||||
_ = json.Unmarshal(file, c.Auth)
|
||||
}
|
||||
}
|
||||
}
|
||||
return c.Auth
|
||||
}
|
||||
|
||||
func (c *Config) SaveAuth(auth *Auth) error {
|
||||
c.Auth = auth
|
||||
data, err := json.Marshal(auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(c.AuthFile(), data, 0644)
|
||||
}
|
||||
|
||||
func (c *Config) NeedsSetup() bool {
|
||||
if c.UseAuth {
|
||||
return c.GetAuth().Username == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
75
internal/config/misc.go
Normal file
75
internal/config/misc.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (c *Config) IsAllowedFile(filename string) bool {
|
||||
ext := strings.ToLower(filepath.Ext(filename))
|
||||
if ext == "" {
|
||||
return false
|
||||
}
|
||||
// Remove the leading dot
|
||||
ext = ext[1:]
|
||||
|
||||
for _, allowed := range c.AllowedExt {
|
||||
if ext == allowed {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getDefaultExtensions() []string {
|
||||
videoExts := strings.Split("YUV,WMV,WEBM,VOB,VIV,SVI,ROQ,RMVB,RM,OGV,OGG,NSV,MXF,MPG,MPEG,M2V,MP2,MPE,MPV,MP4,M4P,M4V,MOV,QT,MNG,MKV,FLV,DRC,AVI,ASF,AMV,MKA,F4V,3GP,3G2,DIVX,X264,X265", ",")
|
||||
musicExts := strings.Split("MP3,WAV,FLAC,AAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",")
|
||||
|
||||
// Combine both slices
|
||||
allExts := append(videoExts, musicExts...)
|
||||
|
||||
// Convert to lowercase
|
||||
for i, ext := range allExts {
|
||||
allExts[i] = strings.ToLower(ext)
|
||||
}
|
||||
|
||||
// Remove duplicates
|
||||
seen := make(map[string]bool)
|
||||
var unique []string
|
||||
|
||||
for _, ext := range allExts {
|
||||
if !seen[ext] {
|
||||
seen[ext] = true
|
||||
unique = append(unique, ext)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(unique)
|
||||
return unique
|
||||
}
|
||||
|
||||
func parseSize(sizeStr string) (int64, error) {
|
||||
sizeStr = strings.ToUpper(strings.TrimSpace(sizeStr))
|
||||
|
||||
// Absolute size-based cache
|
||||
multiplier := 1.0
|
||||
if strings.HasSuffix(sizeStr, "GB") {
|
||||
multiplier = 1024 * 1024 * 1024
|
||||
sizeStr = strings.TrimSuffix(sizeStr, "GB")
|
||||
} else if strings.HasSuffix(sizeStr, "MB") {
|
||||
multiplier = 1024 * 1024
|
||||
sizeStr = strings.TrimSuffix(sizeStr, "MB")
|
||||
} else if strings.HasSuffix(sizeStr, "KB") {
|
||||
multiplier = 1024
|
||||
sizeStr = strings.TrimSuffix(sizeStr, "KB")
|
||||
}
|
||||
|
||||
size, err := strconv.ParseFloat(sizeStr, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return int64(size * multiplier), nil
|
||||
}
|
||||
93
internal/logger/logger.go
Normal file
93
internal/logger/logger.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
logger zerolog.Logger
|
||||
)
|
||||
|
||||
func GetLogPath() string {
|
||||
cfg := config.GetConfig()
|
||||
logsDir := filepath.Join(cfg.Path, "logs")
|
||||
|
||||
if _, err := os.Stat(logsDir); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(logsDir, 0755); err != nil {
|
||||
panic(fmt.Sprintf("Failed to create logs directory: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
return filepath.Join(logsDir, "decypharr.log")
|
||||
}
|
||||
|
||||
func NewLogger(prefix string, level string, output *os.File) zerolog.Logger {
|
||||
|
||||
rotatingLogFile := &lumberjack.Logger{
|
||||
Filename: GetLogPath(),
|
||||
MaxSize: 2,
|
||||
MaxBackups: 2,
|
||||
MaxAge: 28,
|
||||
Compress: true,
|
||||
}
|
||||
|
||||
consoleWriter := zerolog.ConsoleWriter{
|
||||
Out: output,
|
||||
TimeFormat: "2006-01-02 15:04:05",
|
||||
NoColor: false, // Set to true if you don't want colors
|
||||
FormatLevel: func(i interface{}) string {
|
||||
return strings.ToUpper(fmt.Sprintf("| %-6s|", i))
|
||||
},
|
||||
FormatMessage: func(i interface{}) string {
|
||||
return fmt.Sprintf("[%s] %v", prefix, i)
|
||||
},
|
||||
}
|
||||
|
||||
fileWriter := zerolog.ConsoleWriter{
|
||||
Out: rotatingLogFile,
|
||||
TimeFormat: "2006-01-02 15:04:05",
|
||||
NoColor: true, // No colors in file output
|
||||
FormatLevel: func(i interface{}) string {
|
||||
return strings.ToUpper(fmt.Sprintf("| %-6s|", i))
|
||||
},
|
||||
FormatMessage: func(i interface{}) string {
|
||||
return fmt.Sprintf("[%s] %v", prefix, i)
|
||||
},
|
||||
}
|
||||
|
||||
multi := zerolog.MultiLevelWriter(consoleWriter, fileWriter)
|
||||
|
||||
logger := zerolog.New(multi).
|
||||
With().
|
||||
Timestamp().
|
||||
Logger().
|
||||
Level(zerolog.InfoLevel)
|
||||
|
||||
// Set the log level
|
||||
switch level {
|
||||
case "debug":
|
||||
logger = logger.Level(zerolog.DebugLevel)
|
||||
case "info":
|
||||
logger = logger.Level(zerolog.InfoLevel)
|
||||
case "warn":
|
||||
logger = logger.Level(zerolog.WarnLevel)
|
||||
case "error":
|
||||
logger = logger.Level(zerolog.ErrorLevel)
|
||||
}
|
||||
return logger
|
||||
}
|
||||
|
||||
func GetLogger(level string) zerolog.Logger {
|
||||
once.Do(func() {
|
||||
logger = NewLogger("decypharr", level, os.Stdout)
|
||||
})
|
||||
return logger
|
||||
}
|
||||
@@ -1,17 +1,40 @@
|
||||
package common
|
||||
package request
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"golang.org/x/time/rate"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func JoinURL(base string, paths ...string) (string, error) {
|
||||
// Split the last path component to separate query parameters
|
||||
lastPath := paths[len(paths)-1]
|
||||
parts := strings.Split(lastPath, "?")
|
||||
paths[len(paths)-1] = parts[0]
|
||||
|
||||
joined, err := url.JoinPath(base, paths...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Add back query parameters if they exist
|
||||
if len(parts) > 1 {
|
||||
return joined + "?" + parts[1], nil
|
||||
}
|
||||
|
||||
return joined, nil
|
||||
}
|
||||
|
||||
type RLHTTPClient struct {
|
||||
client *http.Client
|
||||
Ratelimiter *rate.Limiter
|
||||
@@ -60,11 +83,7 @@ func (c *RLHTTPClient) Do(req *http.Request) (*http.Response, error) {
|
||||
return resp, fmt.Errorf("max retries exceeded")
|
||||
}
|
||||
|
||||
func (c *RLHTTPClient) MakeRequest(method string, url string, body io.Reader) ([]byte, error) {
|
||||
req, err := http.NewRequest(method, url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (c *RLHTTPClient) MakeRequest(req *http.Request) ([]byte, error) {
|
||||
if c.Headers != nil {
|
||||
for key, value := range c.Headers {
|
||||
req.Header.Set(key, value)
|
||||
@@ -75,22 +94,32 @@ func (c *RLHTTPClient) MakeRequest(method string, url string, body io.Reader) ([
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statusOk := strconv.Itoa(res.StatusCode)[0] == '2'
|
||||
if !statusOk {
|
||||
return nil, fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
||||
}
|
||||
|
||||
defer func(Body io.ReadCloser) {
|
||||
err := Body.Close()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}(res.Body)
|
||||
return io.ReadAll(res.Body)
|
||||
|
||||
b, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statusOk := res.StatusCode >= 200 && res.StatusCode < 300
|
||||
if !statusOk {
|
||||
// Add status code error to the body
|
||||
b = append(b, []byte(fmt.Sprintf("\nstatus code: %d", res.StatusCode))...)
|
||||
return nil, errors.New(string(b))
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func NewRLHTTPClient(rl *rate.Limiter, headers map[string]string) *RLHTTPClient {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
c := &RLHTTPClient{
|
||||
client: &http.Client{
|
||||
@@ -128,3 +157,12 @@ func ParseRateLimit(rateStr string) *rate.Limiter {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(code)
|
||||
err := json.NewEncoder(w).Encode(data)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
2
internal/utils/file.go
Normal file
2
internal/utils/file.go
Normal file
@@ -0,0 +1,2 @@
|
||||
package utils
|
||||
|
||||
235
internal/utils/magnet.go
Normal file
235
internal/utils/magnet.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base32"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Magnet struct {
|
||||
Name string
|
||||
InfoHash string
|
||||
Size int64
|
||||
Link string
|
||||
}
|
||||
|
||||
func GetMagnetFromFile(file io.Reader, filePath string) (*Magnet, error) {
|
||||
if filepath.Ext(filePath) == ".torrent" {
|
||||
torrentData, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return GetMagnetFromBytes(torrentData)
|
||||
} else {
|
||||
// .magnet file
|
||||
magnetLink := ReadMagnetFile(file)
|
||||
return GetMagnetInfo(magnetLink)
|
||||
}
|
||||
}
|
||||
|
||||
func GetMagnetFromUrl(url string) (*Magnet, error) {
|
||||
if strings.HasPrefix(url, "magnet:") {
|
||||
return GetMagnetInfo(url)
|
||||
} else if strings.HasPrefix(url, "http") {
|
||||
return OpenMagnetHttpURL(url)
|
||||
}
|
||||
return nil, fmt.Errorf("invalid url")
|
||||
}
|
||||
|
||||
func GetMagnetFromBytes(torrentData []byte) (*Magnet, error) {
|
||||
// Create a scanner to read the file line by line
|
||||
mi, err := metainfo.Load(bytes.NewReader(torrentData))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := mi.HashInfoBytes()
|
||||
infoHash := hash.HexString()
|
||||
info, err := mi.UnmarshalInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Println("InfoHash: ", infoHash)
|
||||
magnet := &Magnet{
|
||||
InfoHash: infoHash,
|
||||
Name: info.Name,
|
||||
Size: info.Length,
|
||||
Link: mi.Magnet(&hash, &info).String(),
|
||||
}
|
||||
return magnet, nil
|
||||
}
|
||||
|
||||
func OpenMagnetFile(filePath string) string {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
log.Println("Error opening file:", err)
|
||||
return ""
|
||||
}
|
||||
defer func(file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}(file) // Ensure the file is closed after the function ends
|
||||
return ReadMagnetFile(file)
|
||||
}
|
||||
|
||||
func ReadMagnetFile(file io.Reader) string {
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
content := scanner.Text()
|
||||
if content != "" {
|
||||
return content
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any errors during scanning
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Println("Error reading file:", err)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func OpenMagnetHttpURL(magnetLink string) (*Magnet, error) {
|
||||
resp, err := http.Get(magnetLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error making GET request: %v", err)
|
||||
}
|
||||
defer func(resp *http.Response) {
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}(resp) // Ensure the response is closed after the function ends
|
||||
torrentData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
return GetMagnetFromBytes(torrentData)
|
||||
}
|
||||
|
||||
func GetMagnetInfo(magnetLink string) (*Magnet, error) {
|
||||
if magnetLink == "" {
|
||||
return nil, fmt.Errorf("error getting magnet from file")
|
||||
}
|
||||
|
||||
magnetURI, err := url.Parse(magnetLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing magnet link")
|
||||
}
|
||||
|
||||
query := magnetURI.Query()
|
||||
xt := query.Get("xt")
|
||||
dn := query.Get("dn")
|
||||
|
||||
// Extract BTIH
|
||||
parts := strings.Split(xt, ":")
|
||||
btih := ""
|
||||
if len(parts) > 2 {
|
||||
btih = parts[2]
|
||||
}
|
||||
magnet := &Magnet{
|
||||
InfoHash: btih,
|
||||
Name: dn,
|
||||
Size: 0,
|
||||
Link: magnetLink,
|
||||
}
|
||||
return magnet, nil
|
||||
}
|
||||
|
||||
func ExtractInfoHash(magnetDesc string) string {
|
||||
const prefix = "xt=urn:btih:"
|
||||
start := strings.Index(magnetDesc, prefix)
|
||||
if start == -1 {
|
||||
return ""
|
||||
}
|
||||
hash := ""
|
||||
start += len(prefix)
|
||||
end := strings.IndexAny(magnetDesc[start:], "&#")
|
||||
if end == -1 {
|
||||
hash = magnetDesc[start:]
|
||||
} else {
|
||||
hash = magnetDesc[start : start+end]
|
||||
}
|
||||
hash, _ = processInfoHash(hash) // Convert to hex if needed
|
||||
return hash
|
||||
}
|
||||
|
||||
func processInfoHash(input string) (string, error) {
|
||||
// Regular expression for a valid 40-character hex infohash
|
||||
hexRegex := regexp.MustCompile("^[0-9a-fA-F]{40}$")
|
||||
|
||||
// If it's already a valid hex infohash, return it as is
|
||||
if hexRegex.MatchString(input) {
|
||||
return strings.ToLower(input), nil
|
||||
}
|
||||
|
||||
// If it's 32 characters long, it might be Base32 encoded
|
||||
if len(input) == 32 {
|
||||
// Ensure the input is uppercase and remove any padding
|
||||
input = strings.ToUpper(strings.TrimRight(input, "="))
|
||||
|
||||
// Try to decode from Base32
|
||||
decoded, err := base32.StdEncoding.DecodeString(input)
|
||||
if err == nil && len(decoded) == 20 {
|
||||
// If successful and the result is 20 bytes, encode to hex
|
||||
return hex.EncodeToString(decoded), nil
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, it's not a valid infohash and we couldn't convert it
|
||||
return "", fmt.Errorf("invalid infohash: %s", input)
|
||||
}
|
||||
|
||||
func GetInfohashFromURL(url string) (string, error) {
|
||||
// Download the torrent file
|
||||
var magnetLink string
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
client := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||
if len(via) >= 3 {
|
||||
return fmt.Errorf("stopped after 3 redirects")
|
||||
}
|
||||
if strings.HasPrefix(req.URL.String(), "magnet:") {
|
||||
// Stop the redirect chain
|
||||
magnetLink = req.URL.String()
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if magnetLink != "" {
|
||||
return ExtractInfoHash(magnetLink), nil
|
||||
}
|
||||
|
||||
mi, err := metainfo.Load(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hash := mi.HashInfoBytes()
|
||||
infoHash := hash.HexString()
|
||||
return infoHash, nil
|
||||
}
|
||||
15
internal/utils/misc.go
Normal file
15
internal/utils/misc.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package utils
|
||||
|
||||
func RemoveItem[S ~[]E, E comparable](s S, values ...E) S {
|
||||
result := make(S, 0, len(s))
|
||||
outer:
|
||||
for _, item := range s {
|
||||
for _, v := range values {
|
||||
if item == v {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
result = append(result, item)
|
||||
}
|
||||
return result
|
||||
}
|
||||
58
internal/utils/regex.go
Normal file
58
internal/utils/regex.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
VIDEOMATCH = "(?i)(\\.)(YUV|WMV|WEBM|VOB|VIV|SVI|ROQ|RMVB|RM|OGV|OGG|NSV|MXF|MPG|MPEG|M2V|MP2|MPE|MPV|MP4|M4P|M4V|MOV|QT|MNG|MKV|FLV|DRC|AVI|ASF|AMV|MKA|F4V|3GP|3G2|DIVX|X264|X265)$"
|
||||
MUSICMATCH = "(?i)(\\.)(?:MP3|WAV|FLAC|AAC|OGG|WMA|AIFF|ALAC|M4A|APE|AC3|DTS|M4P|MID|MIDI|MKA|MP2|MPA|RA|VOC|WV|AMR)$"
|
||||
)
|
||||
|
||||
var SAMPLEMATCH = `(?i)(^|[\\/]|[._-])(sample|trailer|thumb)s?([._-]|$)`
|
||||
|
||||
func RegexMatch(regex string, value string) bool {
|
||||
re := regexp.MustCompile(regex)
|
||||
return re.MatchString(value)
|
||||
}
|
||||
|
||||
func RemoveInvalidChars(value string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if r == filepath.Separator || r == ':' {
|
||||
return r
|
||||
}
|
||||
if filepath.IsAbs(string(r)) {
|
||||
return r
|
||||
}
|
||||
if strings.ContainsRune(filepath.VolumeName("C:"+string(r)), r) {
|
||||
return r
|
||||
}
|
||||
if r < 32 || strings.ContainsRune(`<>:"/\|?*`, r) {
|
||||
return -1
|
||||
}
|
||||
return r
|
||||
}, value)
|
||||
}
|
||||
|
||||
func RemoveExtension(value string) string {
|
||||
re := regexp.MustCompile(VIDEOMATCH + "|" + SAMPLEMATCH + "|" + MUSICMATCH)
|
||||
|
||||
// Find the last index of the matched extension
|
||||
loc := re.FindStringIndex(value)
|
||||
if loc != nil {
|
||||
return value[:loc[0]]
|
||||
} else {
|
||||
return value
|
||||
}
|
||||
}
|
||||
|
||||
func IsMediaFile(path string) bool {
|
||||
mediaPattern := VIDEOMATCH + "|" + MUSICMATCH
|
||||
return RegexMatch(mediaPattern, path)
|
||||
}
|
||||
|
||||
func IsSampleFile(path string) bool {
|
||||
return RegexMatch(SAMPLEMATCH, path)
|
||||
}
|
||||
17
main.go
17
main.go
@@ -1,22 +1,25 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"goBlack/cmd"
|
||||
"goBlack/common"
|
||||
"github.com/sirrobot01/debrid-blackhole/cmd/decypharr"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var configPath string
|
||||
flag.StringVar(&configPath, "config", "config.json", "path to the config file")
|
||||
flag.StringVar(&configPath, "config", "/data", "path to the data folder")
|
||||
flag.Parse()
|
||||
|
||||
// Load the config file
|
||||
conf, err := common.LoadConfig(configPath)
|
||||
if err != nil {
|
||||
if err := config.SetConfigPath(configPath); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
config.GetConfig()
|
||||
ctx := context.Background()
|
||||
if err := decypharr.Start(ctx); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cmd.Start(conf)
|
||||
|
||||
}
|
||||
|
||||
141
pkg/arr/arr.go
Normal file
141
pkg/arr/arr.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package arr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Type is a type of arr
|
||||
type Type string
|
||||
|
||||
const (
|
||||
Sonarr Type = "sonarr"
|
||||
Radarr Type = "radarr"
|
||||
Lidarr Type = "lidarr"
|
||||
Readarr Type = "readarr"
|
||||
)
|
||||
|
||||
var (
|
||||
client *request.RLHTTPClient = request.NewRLHTTPClient(nil, nil)
|
||||
)
|
||||
|
||||
type Arr struct {
|
||||
Name string `json:"name"`
|
||||
Host string `json:"host"`
|
||||
Token string `json:"token"`
|
||||
Type Type `json:"type"`
|
||||
Cleanup bool `json:"cleanup"`
|
||||
}
|
||||
|
||||
func New(name, host, token string, cleanup bool) *Arr {
|
||||
return &Arr{
|
||||
Name: name,
|
||||
Host: host,
|
||||
Token: token,
|
||||
Type: InferType(host, name),
|
||||
Cleanup: cleanup,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Response, error) {
|
||||
if a.Token == "" || a.Host == "" {
|
||||
return nil, nil
|
||||
}
|
||||
url, err := request.JoinURL(a.Host, endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var jsonPayload []byte
|
||||
|
||||
if payload != nil {
|
||||
jsonPayload, err = json.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest(method, url, bytes.NewBuffer(jsonPayload))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-Api-Key", a.Token)
|
||||
return client.Do(req)
|
||||
}
|
||||
|
||||
func (a *Arr) Validate() error {
|
||||
if a.Token == "" || a.Host == "" {
|
||||
return nil
|
||||
}
|
||||
resp, err := a.Request("GET", "/api/v3/health", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("arr test failed: %s", resp.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Storage struct {
|
||||
Arrs map[string]*Arr // name -> arr
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func InferType(host, name string) Type {
|
||||
switch {
|
||||
case strings.Contains(host, "sonarr") || strings.Contains(name, "sonarr"):
|
||||
return Sonarr
|
||||
case strings.Contains(host, "radarr") || strings.Contains(name, "radarr"):
|
||||
return Radarr
|
||||
case strings.Contains(host, "lidarr") || strings.Contains(name, "lidarr"):
|
||||
return Lidarr
|
||||
case strings.Contains(host, "readarr") || strings.Contains(name, "readarr"):
|
||||
return Readarr
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func NewStorage() *Storage {
|
||||
arrs := make(map[string]*Arr)
|
||||
for _, a := range config.GetConfig().Arrs {
|
||||
name := a.Name
|
||||
arrs[name] = New(name, a.Host, a.Token, a.Cleanup)
|
||||
}
|
||||
return &Storage{
|
||||
Arrs: arrs,
|
||||
}
|
||||
}
|
||||
|
||||
func (as *Storage) AddOrUpdate(arr *Arr) {
|
||||
as.mu.Lock()
|
||||
defer as.mu.Unlock()
|
||||
if arr.Name == "" {
|
||||
return
|
||||
}
|
||||
as.Arrs[arr.Name] = arr
|
||||
}
|
||||
|
||||
func (as *Storage) Get(name string) *Arr {
|
||||
as.mu.RLock()
|
||||
defer as.mu.RUnlock()
|
||||
return as.Arrs[name]
|
||||
}
|
||||
|
||||
func (as *Storage) GetAll() []*Arr {
|
||||
as.mu.RLock()
|
||||
defer as.mu.RUnlock()
|
||||
arrs := make([]*Arr, 0, len(as.Arrs))
|
||||
for _, arr := range as.Arrs {
|
||||
if arr.Host != "" && arr.Token != "" {
|
||||
arrs = append(arrs, arr)
|
||||
}
|
||||
}
|
||||
return arrs
|
||||
}
|
||||
192
pkg/arr/content.go
Normal file
192
pkg/arr/content.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package arr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func (a *Arr) GetMedia(tvId string) ([]Content, error) {
|
||||
// Get series
|
||||
resp, err := a.Request(http.MethodGet, fmt.Sprintf("api/v3/series?tvdbId=%s", tvId), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
// This is likely Radarr
|
||||
return GetMovies(a, tvId)
|
||||
}
|
||||
a.Type = Sonarr
|
||||
defer resp.Body.Close()
|
||||
type series struct {
|
||||
Title string `json:"title"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
var data []series
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Get series files
|
||||
contents := make([]Content, 0)
|
||||
for _, d := range data {
|
||||
resp, err = a.Request(http.MethodGet, fmt.Sprintf("api/v3/episodefile?seriesId=%d", d.Id), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var seriesFiles []seriesFile
|
||||
if err = json.NewDecoder(resp.Body).Decode(&seriesFiles); err != nil {
|
||||
continue
|
||||
}
|
||||
ct := Content{
|
||||
Title: d.Title,
|
||||
Id: d.Id,
|
||||
}
|
||||
|
||||
type episode struct {
|
||||
Id int `json:"id"`
|
||||
EpisodeFileID int `json:"episodeFileId"`
|
||||
}
|
||||
resp, err = a.Request(http.MethodGet, fmt.Sprintf("api/v3/episode?seriesId=%d", d.Id), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var episodes []episode
|
||||
if err = json.NewDecoder(resp.Body).Decode(&episodes); err != nil {
|
||||
continue
|
||||
}
|
||||
episodeFileIDMap := make(map[int]int)
|
||||
for _, e := range episodes {
|
||||
episodeFileIDMap[e.EpisodeFileID] = e.Id
|
||||
}
|
||||
files := make([]ContentFile, 0)
|
||||
for _, file := range seriesFiles {
|
||||
eId, ok := episodeFileIDMap[file.Id]
|
||||
if !ok {
|
||||
eId = 0
|
||||
}
|
||||
files = append(files, ContentFile{
|
||||
FileId: file.Id,
|
||||
Path: file.Path,
|
||||
Id: eId,
|
||||
})
|
||||
}
|
||||
ct.Files = files
|
||||
contents = append(contents, ct)
|
||||
}
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
func GetMovies(a *Arr, tvId string) ([]Content, error) {
|
||||
resp, err := a.Request(http.MethodGet, fmt.Sprintf("api/v3/movie?tmdbId=%s", tvId), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
// This is likely Lidarr or Readarr
|
||||
return nil, fmt.Errorf("failed to get movies: %s", resp.Status)
|
||||
}
|
||||
a.Type = Radarr
|
||||
defer resp.Body.Close()
|
||||
var movies []Movie
|
||||
if err = json.NewDecoder(resp.Body).Decode(&movies); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contents := make([]Content, 0)
|
||||
for _, movie := range movies {
|
||||
ct := Content{
|
||||
Title: movie.Title,
|
||||
Id: movie.Id,
|
||||
}
|
||||
files := make([]ContentFile, 0)
|
||||
files = append(files, ContentFile{
|
||||
FileId: movie.MovieFile.Id,
|
||||
Id: movie.Id,
|
||||
Path: movie.MovieFile.Path,
|
||||
})
|
||||
ct.Files = files
|
||||
contents = append(contents, ct)
|
||||
}
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
func (a *Arr) search(ids []int) error {
|
||||
var payload interface{}
|
||||
switch a.Type {
|
||||
case Sonarr:
|
||||
payload = struct {
|
||||
Name string `json:"name"`
|
||||
EpisodeIds []int `json:"episodeIds"`
|
||||
}{
|
||||
Name: "EpisodeSearch",
|
||||
EpisodeIds: ids,
|
||||
}
|
||||
case Radarr:
|
||||
payload = struct {
|
||||
Name string `json:"name"`
|
||||
MovieIds []int `json:"movieIds"`
|
||||
}{
|
||||
Name: "MoviesSearch",
|
||||
MovieIds: ids,
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown arr type: %s", a.Type)
|
||||
}
|
||||
|
||||
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to automatic search: %v", err)
|
||||
}
|
||||
if statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'; !statusOk {
|
||||
return fmt.Errorf("failed to automatic search. Status Code: %s", resp.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Arr) SearchMissing(files []ContentFile) error {
|
||||
|
||||
ids := make([]int, 0)
|
||||
for _, f := range files {
|
||||
ids = append(ids, f.Id)
|
||||
}
|
||||
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
return a.search(ids)
|
||||
}
|
||||
|
||||
func (a *Arr) DeleteFiles(files []ContentFile) error {
|
||||
ids := make([]int, 0)
|
||||
for _, f := range files {
|
||||
ids = append(ids, f.FileId)
|
||||
}
|
||||
var payload interface{}
|
||||
switch a.Type {
|
||||
case Sonarr:
|
||||
payload = struct {
|
||||
EpisodeFileIds []int `json:"episodeFileIds"`
|
||||
}{
|
||||
EpisodeFileIds: ids,
|
||||
}
|
||||
_, err := a.Request(http.MethodDelete, "api/v3/episodefile/bulk", payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case Radarr:
|
||||
payload = struct {
|
||||
MovieFileIds []int `json:"movieFileIds"`
|
||||
}{
|
||||
MovieFileIds: ids,
|
||||
}
|
||||
_, err := a.Request(http.MethodDelete, "api/v3/moviefile/bulk", payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown arr type: %s", a.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
187
pkg/arr/history.go
Normal file
187
pkg/arr/history.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package arr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type HistorySchema struct {
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
SortKey string `json:"sortKey"`
|
||||
SortDirection string `json:"sortDirection"`
|
||||
TotalRecords int `json:"totalRecords"`
|
||||
Records []struct {
|
||||
ID int `json:"id"`
|
||||
DownloadID string `json:"downloadId"`
|
||||
} `json:"records"`
|
||||
}
|
||||
|
||||
type QueueResponseScheme struct {
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
SortKey string `json:"sortKey"`
|
||||
SortDirection string `json:"sortDirection"`
|
||||
TotalRecords int `json:"totalRecords"`
|
||||
Records []QueueSchema `json:"records"`
|
||||
}
|
||||
|
||||
type QueueSchema struct {
|
||||
SeriesId int `json:"seriesId"`
|
||||
EpisodeId int `json:"episodeId"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
TrackedDownloadStatus string `json:"trackedDownloadStatus"`
|
||||
TrackedDownloadState string `json:"trackedDownloadState"`
|
||||
StatusMessages []struct {
|
||||
Title string `json:"title"`
|
||||
Messages []string `json:"messages"`
|
||||
} `json:"statusMessages"`
|
||||
DownloadId string `json:"downloadId"`
|
||||
Protocol string `json:"protocol"`
|
||||
DownloadClient string `json:"downloadClient"`
|
||||
DownloadClientHasPostImportCategory bool `json:"downloadClientHasPostImportCategory"`
|
||||
Indexer string `json:"indexer"`
|
||||
OutputPath string `json:"outputPath"`
|
||||
EpisodeHasFile bool `json:"episodeHasFile"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
func (a *Arr) GetHistory(downloadId, eventType string) *HistorySchema {
|
||||
query := gourl.Values{}
|
||||
if downloadId != "" {
|
||||
query.Add("downloadId", downloadId)
|
||||
}
|
||||
query.Add("eventType", eventType)
|
||||
query.Add("pageSize", "100")
|
||||
url := "api/v3/history" + "?" + query.Encode()
|
||||
resp, err := a.Request(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var data *HistorySchema
|
||||
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil
|
||||
}
|
||||
return data
|
||||
|
||||
}
|
||||
|
||||
func (a *Arr) GetQueue() []QueueSchema {
|
||||
query := gourl.Values{}
|
||||
query.Add("page", "1")
|
||||
query.Add("pageSize", "200")
|
||||
results := make([]QueueSchema, 0)
|
||||
|
||||
for {
|
||||
url := "api/v3/queue" + "?" + query.Encode()
|
||||
resp, err := a.Request(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
func() {
|
||||
defer func(Body io.ReadCloser) {
|
||||
err := Body.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}(resp.Body)
|
||||
|
||||
var data QueueResponseScheme
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
results = append(results, data.Records...)
|
||||
|
||||
if len(results) >= data.TotalRecords {
|
||||
// We've fetched all records
|
||||
err = io.EOF // Signal to exit the loop
|
||||
return
|
||||
}
|
||||
|
||||
query.Set("page", strconv.Itoa(data.Page+1))
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (a *Arr) CleanupQueue() error {
|
||||
queue := a.GetQueue()
|
||||
type messedUp struct {
|
||||
id int
|
||||
episodeId int
|
||||
seasonNum int
|
||||
}
|
||||
cleanups := make(map[int][]messedUp)
|
||||
for _, q := range queue {
|
||||
isMessedUp := false
|
||||
if q.Protocol == "torrent" && q.Status == "completed" && q.TrackedDownloadStatus == "warning" && q.TrackedDownloadState == "importPending" {
|
||||
messages := q.StatusMessages
|
||||
if len(messages) > 0 {
|
||||
for _, m := range messages {
|
||||
if strings.Contains(strings.Join(m.Messages, " "), "No files found are eligible for import in") {
|
||||
isMessedUp = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if isMessedUp {
|
||||
cleanups[q.SeriesId] = append(cleanups[q.SeriesId], messedUp{
|
||||
id: q.Id,
|
||||
episodeId: q.EpisodeId,
|
||||
seasonNum: q.SeasonNumber,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(cleanups) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
queueIds := make([]int, 0)
|
||||
|
||||
for _, c := range cleanups {
|
||||
// Delete the messed up episodes from queue
|
||||
for _, m := range c {
|
||||
queueIds = append(queueIds, m.id)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the messed up episodes from queue
|
||||
|
||||
payload := struct {
|
||||
Ids []int `json:"ids"`
|
||||
}{
|
||||
Ids: queueIds,
|
||||
}
|
||||
|
||||
// Blocklist that hash(it's typically not complete, then research the episode)
|
||||
|
||||
query := gourl.Values{}
|
||||
query.Add("removeFromClient", "true")
|
||||
query.Add("blocklist", "true")
|
||||
query.Add("skipRedownload", "false")
|
||||
query.Add("changeCategory", "false")
|
||||
url := "api/v3/queue/bulk" + "?" + query.Encode()
|
||||
|
||||
_, err := a.Request(http.MethodDelete, url, payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
209
pkg/arr/import.go
Normal file
209
pkg/arr/import.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package arr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ImportResponseSchema struct {
|
||||
Path string `json:"path"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
FolderName string `json:"folderName"`
|
||||
Name string `json:"name"`
|
||||
Size int `json:"size"`
|
||||
Series struct {
|
||||
Title string `json:"title"`
|
||||
SortTitle string `json:"sortTitle"`
|
||||
Status string `json:"status"`
|
||||
Ended bool `json:"ended"`
|
||||
Overview string `json:"overview"`
|
||||
Network string `json:"network"`
|
||||
AirTime string `json:"airTime"`
|
||||
Images []struct {
|
||||
CoverType string `json:"coverType"`
|
||||
RemoteUrl string `json:"remoteUrl"`
|
||||
} `json:"images"`
|
||||
OriginalLanguage struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"originalLanguage"`
|
||||
Seasons []struct {
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
Monitored bool `json:"monitored"`
|
||||
} `json:"seasons"`
|
||||
Year int `json:"year"`
|
||||
Path string `json:"path"`
|
||||
QualityProfileId int `json:"qualityProfileId"`
|
||||
SeasonFolder bool `json:"seasonFolder"`
|
||||
Monitored bool `json:"monitored"`
|
||||
MonitorNewItems string `json:"monitorNewItems"`
|
||||
UseSceneNumbering bool `json:"useSceneNumbering"`
|
||||
Runtime int `json:"runtime"`
|
||||
TvdbId int `json:"tvdbId"`
|
||||
TvRageId int `json:"tvRageId"`
|
||||
TvMazeId int `json:"tvMazeId"`
|
||||
TmdbId int `json:"tmdbId"`
|
||||
FirstAired time.Time `json:"firstAired"`
|
||||
LastAired time.Time `json:"lastAired"`
|
||||
SeriesType string `json:"seriesType"`
|
||||
CleanTitle string `json:"cleanTitle"`
|
||||
ImdbId string `json:"imdbId"`
|
||||
TitleSlug string `json:"titleSlug"`
|
||||
Certification string `json:"certification"`
|
||||
Genres []string `json:"genres"`
|
||||
Tags []interface{} `json:"tags"`
|
||||
Added time.Time `json:"added"`
|
||||
Ratings struct {
|
||||
Votes int `json:"votes"`
|
||||
Value float64 `json:"value"`
|
||||
} `json:"ratings"`
|
||||
LanguageProfileId int `json:"languageProfileId"`
|
||||
Id int `json:"id"`
|
||||
} `json:"series"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
Episodes []struct {
|
||||
SeriesId int `json:"seriesId"`
|
||||
TvdbId int `json:"tvdbId"`
|
||||
EpisodeFileId int `json:"episodeFileId"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
EpisodeNumber int `json:"episodeNumber"`
|
||||
Title string `json:"title"`
|
||||
AirDate string `json:"airDate"`
|
||||
AirDateUtc time.Time `json:"airDateUtc"`
|
||||
Runtime int `json:"runtime"`
|
||||
Overview string `json:"overview"`
|
||||
HasFile bool `json:"hasFile"`
|
||||
Monitored bool `json:"monitored"`
|
||||
AbsoluteEpisodeNumber int `json:"absoluteEpisodeNumber"`
|
||||
UnverifiedSceneNumbering bool `json:"unverifiedSceneNumbering"`
|
||||
Id int `json:"id"`
|
||||
FinaleType string `json:"finaleType,omitempty"`
|
||||
} `json:"episodes"`
|
||||
ReleaseGroup string `json:"releaseGroup"`
|
||||
Quality struct {
|
||||
Quality struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Source string `json:"source"`
|
||||
Resolution int `json:"resolution"`
|
||||
} `json:"quality"`
|
||||
Revision struct {
|
||||
Version int `json:"version"`
|
||||
Real int `json:"real"`
|
||||
IsRepack bool `json:"isRepack"`
|
||||
} `json:"revision"`
|
||||
} `json:"quality"`
|
||||
Languages []struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"languages"`
|
||||
QualityWeight int `json:"qualityWeight"`
|
||||
CustomFormats []interface{} `json:"customFormats"`
|
||||
CustomFormatScore int `json:"customFormatScore"`
|
||||
IndexerFlags int `json:"indexerFlags"`
|
||||
ReleaseType string `json:"releaseType"`
|
||||
Rejections []struct {
|
||||
Reason string `json:"reason"`
|
||||
Type string `json:"type"`
|
||||
} `json:"rejections"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
type ManualImportRequestFile struct {
|
||||
Path string `json:"path"`
|
||||
SeriesId int `json:"seriesId"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
EpisodeIds []int `json:"episodeIds"`
|
||||
Quality struct {
|
||||
Quality struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Source string `json:"source"`
|
||||
Resolution int `json:"resolution"`
|
||||
} `json:"quality"`
|
||||
Revision struct {
|
||||
Version int `json:"version"`
|
||||
Real int `json:"real"`
|
||||
IsRepack bool `json:"isRepack"`
|
||||
} `json:"revision"`
|
||||
} `json:"quality"`
|
||||
Languages []struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"languages"`
|
||||
ReleaseGroup string `json:"releaseGroup"`
|
||||
CustomFormats []interface{} `json:"customFormats"`
|
||||
CustomFormatScore int `json:"customFormatScore"`
|
||||
IndexerFlags int `json:"indexerFlags"`
|
||||
ReleaseType string `json:"releaseType"`
|
||||
Rejections []struct {
|
||||
Reason string `json:"reason"`
|
||||
Type string `json:"type"`
|
||||
} `json:"rejections"`
|
||||
}
|
||||
|
||||
type ManualImportRequestSchema struct {
|
||||
Name string `json:"name"`
|
||||
Files []ManualImportRequestFile `json:"files"`
|
||||
ImportMode string `json:"importMode"`
|
||||
}
|
||||
|
||||
func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, error) {
|
||||
query := gourl.Values{}
|
||||
query.Add("folder", path)
|
||||
if seriesId != 0 {
|
||||
query.Add("seriesId", strconv.Itoa(seriesId))
|
||||
}
|
||||
url := "api/v3/manualimport" + "?" + query.Encode()
|
||||
resp, err := a.Request(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to import, invalid file: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var data []ImportResponseSchema
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
|
||||
var files []ManualImportRequestFile
|
||||
for _, d := range data {
|
||||
episodesIds := []int{}
|
||||
for _, e := range d.Episodes {
|
||||
episodesIds = append(episodesIds, e.Id)
|
||||
}
|
||||
file := ManualImportRequestFile{
|
||||
Path: d.Path,
|
||||
SeriesId: d.Series.Id,
|
||||
SeasonNumber: d.SeasonNumber,
|
||||
EpisodeIds: episodesIds,
|
||||
Quality: d.Quality,
|
||||
Languages: d.Languages,
|
||||
ReleaseGroup: d.ReleaseGroup,
|
||||
CustomFormats: d.CustomFormats,
|
||||
CustomFormatScore: d.CustomFormatScore,
|
||||
IndexerFlags: d.IndexerFlags,
|
||||
ReleaseType: d.ReleaseType,
|
||||
Rejections: d.Rejections,
|
||||
}
|
||||
files = append(files, file)
|
||||
}
|
||||
request := ManualImportRequestSchema{
|
||||
Name: "ManualImport",
|
||||
Files: files,
|
||||
ImportMode: "copy",
|
||||
}
|
||||
|
||||
url = "api/v3/command"
|
||||
resp, err = a.Request(http.MethodPost, url, request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to import: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.Body, nil
|
||||
|
||||
}
|
||||
59
pkg/arr/refresh.go
Normal file
59
pkg/arr/refresh.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package arr
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (a *Arr) Refresh() error {
|
||||
payload := struct {
|
||||
Name string `json:"name"`
|
||||
}{
|
||||
Name: "RefreshMonitoredDownloads",
|
||||
}
|
||||
|
||||
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
||||
if err == nil && resp != nil {
|
||||
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
|
||||
if statusOk {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to refresh: %v", err)
|
||||
}
|
||||
|
||||
func (a *Arr) Blacklist(infoHash string) error {
|
||||
downloadId := strings.ToUpper(infoHash)
|
||||
history := a.GetHistory(downloadId, "grabbed")
|
||||
if history == nil {
|
||||
return nil
|
||||
}
|
||||
torrentId := 0
|
||||
for _, record := range history.Records {
|
||||
if strings.EqualFold(record.DownloadID, downloadId) {
|
||||
torrentId = record.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
if torrentId != 0 {
|
||||
url, err := request.JoinURL(a.Host, "history/failed/", strconv.Itoa(torrentId))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := &http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err == nil {
|
||||
return fmt.Errorf("failed to mark %s as failed: %v", cmp.Or(a.Name, a.Host), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
31
pkg/arr/tmdb.go
Normal file
31
pkg/arr/tmdb.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package arr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
url2 "net/url"
|
||||
)
|
||||
|
||||
type TMDBResponse struct {
|
||||
Page int `json:"page"`
|
||||
Results []struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
MediaType string `json:"media_type"`
|
||||
PosterPath string `json:"poster_path"`
|
||||
} `json:"results"`
|
||||
}
|
||||
|
||||
func SearchTMDB(term string) (*TMDBResponse, error) {
|
||||
resp, err := http.Get("https://api.themoviedb.org/3/search/multi?api_key=key&query=" + url2.QueryEscape(term))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var data *TMDBResponse
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
38
pkg/arr/types.go
Normal file
38
pkg/arr/types.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package arr
|
||||
|
||||
type Movie struct {
|
||||
Title string `json:"title"`
|
||||
OriginalTitle string `json:"originalTitle"`
|
||||
Path string `json:"path"`
|
||||
MovieFile struct {
|
||||
MovieId int `json:"movieId"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
Path string `json:"path"`
|
||||
Size int `json:"size"`
|
||||
Id int `json:"id"`
|
||||
} `json:"movieFile"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
type ContentFile struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Id int `json:"id"`
|
||||
FileId int `json:"fileId"`
|
||||
TargetPath string `json:"targetPath"`
|
||||
IsSymlink bool `json:"isSymlink"`
|
||||
IsBroken bool `json:"isBroken"`
|
||||
}
|
||||
|
||||
type Content struct {
|
||||
Title string `json:"title"`
|
||||
Id int `json:"id"`
|
||||
Files []ContentFile `json:"files"`
|
||||
}
|
||||
|
||||
type seriesFile struct {
|
||||
SeriesId int `json:"seriesId"`
|
||||
SeasonNumber int `json:"seasonNumber"`
|
||||
Path string `json:"path"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
5
pkg/arr/utils.go
Normal file
5
pkg/arr/utils.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package arr
|
||||
|
||||
func Readfile(path string) error {
|
||||
return nil
|
||||
}
|
||||
302
pkg/debrid/alldebrid/alldebrid.go
Normal file
302
pkg/debrid/alldebrid/alldebrid.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package alldebrid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
"slices"
|
||||
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type AllDebrid struct {
|
||||
Name string
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
DownloadUncached bool
|
||||
client *request.RLHTTPClient
|
||||
cache *cache.Cache
|
||||
MountPath string
|
||||
logger zerolog.Logger
|
||||
CheckCached bool
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetName() string {
|
||||
return ad.Name
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetLogger() zerolog.Logger {
|
||||
return ad.logger
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) IsAvailable(infohashes []string) map[string]bool {
|
||||
// Check if the infohashes are available in the local cache
|
||||
hashes, result := torrent.GetLocalCache(infohashes, ad.cache)
|
||||
|
||||
if len(hashes) == 0 {
|
||||
// Either all the infohashes are locally cached or none are
|
||||
ad.cache.AddMultiple(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// Divide hashes into groups of 100
|
||||
// AllDebrid does not support checking cached infohashes
|
||||
return result
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) SubmitMagnet(torrent *torrent.Torrent) (*torrent.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/magnet/upload", ad.Host)
|
||||
query := gourl.Values{}
|
||||
query.Add("magnets[]", torrent.Magnet.Link)
|
||||
url += "?" + query.Encode()
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := ad.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data UploadMagnetResponse
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
magnets := data.Data.Magnets
|
||||
if len(magnets) == 0 {
|
||||
return nil, fmt.Errorf("error adding torrent")
|
||||
}
|
||||
magnet := magnets[0]
|
||||
torrentId := strconv.Itoa(magnet.ID)
|
||||
torrent.Id = torrentId
|
||||
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func getAlldebridStatus(statusCode int) string {
|
||||
switch {
|
||||
case statusCode == 4:
|
||||
return "downloaded"
|
||||
case statusCode >= 0 && statusCode <= 3:
|
||||
return "downloading"
|
||||
default:
|
||||
return "error"
|
||||
}
|
||||
}
|
||||
|
||||
func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.File {
|
||||
result := make([]torrent.File, 0)
|
||||
|
||||
cfg := config.GetConfig()
|
||||
|
||||
for _, f := range files {
|
||||
currentPath := f.Name
|
||||
if parentPath != "" {
|
||||
currentPath = filepath.Join(parentPath, f.Name)
|
||||
}
|
||||
|
||||
if f.Elements != nil {
|
||||
// This is a folder, recurse into it
|
||||
result = append(result, flattenFiles(f.Elements, currentPath, index)...)
|
||||
} else {
|
||||
// This is a file
|
||||
fileName := filepath.Base(f.Name)
|
||||
|
||||
// Skip sample files
|
||||
if utils.IsSampleFile(fileName) {
|
||||
continue
|
||||
}
|
||||
if !cfg.IsAllowedFile(fileName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
continue
|
||||
}
|
||||
|
||||
*index++
|
||||
file := torrent.File{
|
||||
Id: strconv.Itoa(*index),
|
||||
Name: fileName,
|
||||
Size: f.Size,
|
||||
Path: currentPath,
|
||||
}
|
||||
result = append(result, file)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetTorrent(id string) (*torrent.Torrent, error) {
|
||||
t := &torrent.Torrent{}
|
||||
url := fmt.Sprintf("%s/magnet/status?id=%s", ad.Host, id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := ad.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
var res TorrentInfoResponse
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
|
||||
return t, err
|
||||
}
|
||||
data := res.Data.Magnets
|
||||
status := getAlldebridStatus(data.StatusCode)
|
||||
name := data.Filename
|
||||
t.Id = id
|
||||
t.Name = name
|
||||
t.Status = status
|
||||
t.Filename = name
|
||||
t.OriginalFilename = name
|
||||
t.Folder = name
|
||||
t.MountPath = ad.MountPath
|
||||
t.Debrid = ad.Name
|
||||
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
|
||||
if status == "downloaded" {
|
||||
t.Bytes = data.Size
|
||||
|
||||
t.Progress = float64((data.Downloaded / data.Size) * 100)
|
||||
t.Speed = data.DownloadSpeed
|
||||
t.Seeders = data.Seeders
|
||||
index := -1
|
||||
files := flattenFiles(data.Files, "", &index)
|
||||
t.Files = files
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
|
||||
for {
|
||||
tb, err := ad.GetTorrent(torrent.Id)
|
||||
|
||||
torrent = tb
|
||||
|
||||
if err != nil || tb == nil {
|
||||
return tb, err
|
||||
}
|
||||
status := torrent.Status
|
||||
if status == "downloaded" {
|
||||
ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||
if !isSymlink {
|
||||
err = ad.GetDownloadLinks(torrent)
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
}
|
||||
break
|
||||
} else if slices.Contains(ad.GetDownloadingStatus(), status) {
|
||||
if !ad.DownloadUncached {
|
||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||
}
|
||||
// Break out of the loop if the torrent is downloading.
|
||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||
break
|
||||
} else {
|
||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||
}
|
||||
|
||||
}
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) DeleteTorrent(torrent *torrent.Torrent) {
|
||||
url := fmt.Sprintf("%s/magnet/delete?id=%s", ad.Host, torrent.Id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
_, err := ad.client.MakeRequest(req)
|
||||
if err == nil {
|
||||
ad.logger.Info().Msgf("Torrent: %s deleted", torrent.Name)
|
||||
} else {
|
||||
ad.logger.Info().Msgf("Error deleting torrent: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetDownloadLinks(t *torrent.Torrent) error {
|
||||
downloadLinks := make(map[string]torrent.DownloadLinks)
|
||||
for _, file := range t.Files {
|
||||
url := fmt.Sprintf("%s/link/unlock", ad.Host)
|
||||
query := gourl.Values{}
|
||||
query.Add("link", file.Link)
|
||||
url += "?" + query.Encode()
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := ad.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data DownloadLink
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
link := data.Data.Link
|
||||
|
||||
dl := torrent.DownloadLinks{
|
||||
Link: file.Link,
|
||||
Filename: data.Data.Filename,
|
||||
DownloadLink: link,
|
||||
}
|
||||
downloadLinks[file.Id] = dl
|
||||
}
|
||||
t.DownloadLinks = downloadLinks
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
|
||||
url := fmt.Sprintf("%s/link/unlock", ad.Host)
|
||||
query := gourl.Values{}
|
||||
query.Add("link", file.Link)
|
||||
url += "?" + query.Encode()
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := ad.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var data DownloadLink
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return nil
|
||||
}
|
||||
link := data.Data.Link
|
||||
return &torrent.DownloadLinks{
|
||||
DownloadLink: link,
|
||||
Link: file.Link,
|
||||
Filename: data.Data.Filename,
|
||||
}
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetCheckCached() bool {
|
||||
return ad.CheckCached
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetTorrents() ([]*torrent.Torrent, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetDownloadingStatus() []string {
|
||||
return []string{"downloading"}
|
||||
}
|
||||
|
||||
func New(dc config.Debrid, cache *cache.Cache) *AllDebrid {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
}
|
||||
client := request.NewRLHTTPClient(rl, headers)
|
||||
return &AllDebrid{
|
||||
Name: "alldebrid",
|
||||
Host: dc.Host,
|
||||
APIKey: dc.APIKey,
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
client: client,
|
||||
cache: cache,
|
||||
MountPath: dc.Folder,
|
||||
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
|
||||
CheckCached: dc.CheckCached,
|
||||
}
|
||||
}
|
||||
75
pkg/debrid/alldebrid/types.go
Normal file
75
pkg/debrid/alldebrid/types.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package alldebrid
|
||||
|
||||
type errorResponse struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type MagnetFile struct {
|
||||
Name string `json:"n"`
|
||||
Size int64 `json:"s"`
|
||||
Link string `json:"l"`
|
||||
Elements []MagnetFile `json:"e"`
|
||||
}
|
||||
type magnetInfo struct {
|
||||
Id int `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
Size int64 `json:"size"`
|
||||
Hash string `json:"hash"`
|
||||
Status string `json:"status"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
UploadDate int `json:"uploadDate"`
|
||||
Downloaded int64 `json:"downloaded"`
|
||||
Uploaded int64 `json:"uploaded"`
|
||||
DownloadSpeed int64 `json:"downloadSpeed"`
|
||||
UploadSpeed int64 `json:"uploadSpeed"`
|
||||
Seeders int `json:"seeders"`
|
||||
CompletionDate int `json:"completionDate"`
|
||||
Type string `json:"type"`
|
||||
Notified bool `json:"notified"`
|
||||
Version int `json:"version"`
|
||||
NbLinks int `json:"nbLinks"`
|
||||
Files []MagnetFile `json:"files"`
|
||||
}
|
||||
|
||||
type TorrentInfoResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Magnets magnetInfo `json:"magnets"`
|
||||
} `json:"data"`
|
||||
Error *errorResponse `json:"error"`
|
||||
}
|
||||
|
||||
type UploadMagnetResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Magnets []struct {
|
||||
Magnet string `json:"magnet"`
|
||||
Hash string `json:"hash"`
|
||||
Name string `json:"name"`
|
||||
FilenameOriginal string `json:"filename_original"`
|
||||
Size int64 `json:"size"`
|
||||
Ready bool `json:"ready"`
|
||||
ID int `json:"id"`
|
||||
} `json:"magnets"`
|
||||
}
|
||||
Error *errorResponse `json:"error"`
|
||||
}
|
||||
|
||||
type DownloadLink struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Link string `json:"link"`
|
||||
Host string `json:"host"`
|
||||
Filename string `json:"filename"`
|
||||
Streaming []interface{} `json:"streaming"`
|
||||
Paws bool `json:"paws"`
|
||||
Filesize int `json:"filesize"`
|
||||
Id string `json:"id"`
|
||||
Path []struct {
|
||||
Name string `json:"n"`
|
||||
Size int `json:"s"`
|
||||
} `json:"path"`
|
||||
} `json:"data"`
|
||||
Error *errorResponse `json:"error"`
|
||||
}
|
||||
360
pkg/debrid/cache/cache.go
vendored
Normal file
360
pkg/debrid/cache/cache.go
vendored
Normal file
@@ -0,0 +1,360 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
)
|
||||
|
||||
type DownloadLinkCache struct {
|
||||
Link string `json:"download_link"`
|
||||
}
|
||||
|
||||
type CachedTorrent struct {
|
||||
*torrent.Torrent
|
||||
LastRead time.Time `json:"last_read"`
|
||||
IsComplete bool `json:"is_complete"`
|
||||
DownloadLinks map[string]DownloadLinkCache `json:"download_links"`
|
||||
}
|
||||
|
||||
var (
|
||||
_logInstance zerolog.Logger
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
func getLogger() zerolog.Logger {
|
||||
once.Do(func() {
|
||||
_logInstance = logger.NewLogger("cache", "info", os.Stdout)
|
||||
})
|
||||
return _logInstance
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
dir string
|
||||
client engine.Service
|
||||
torrents *sync.Map // key: torrent.Id, value: *CachedTorrent
|
||||
torrentsNames *sync.Map // key: torrent.Name, value: torrent.Id
|
||||
LastUpdated time.Time `json:"last_updated"`
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
caches map[string]*Cache
|
||||
}
|
||||
|
||||
func NewManager(debridService *engine.Engine) *Manager {
|
||||
cfg := config.GetConfig()
|
||||
cm := &Manager{
|
||||
caches: make(map[string]*Cache),
|
||||
}
|
||||
for _, debrid := range debridService.GetDebrids() {
|
||||
c := New(debrid, cfg.Path)
|
||||
cm.caches[debrid.GetName()] = c
|
||||
}
|
||||
return cm
|
||||
}
|
||||
|
||||
func (m *Manager) GetCaches() map[string]*Cache {
|
||||
return m.caches
|
||||
}
|
||||
|
||||
func (m *Manager) GetCache(debridName string) *Cache {
|
||||
return m.caches[debridName]
|
||||
}
|
||||
|
||||
func New(debridService engine.Service, basePath string) *Cache {
|
||||
return &Cache{
|
||||
dir: filepath.Join(basePath, "cache", debridService.GetName(), "torrents"),
|
||||
torrents: &sync.Map{},
|
||||
torrentsNames: &sync.Map{},
|
||||
client: debridService,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) Start() error {
|
||||
_logger := getLogger()
|
||||
_logger.Info().Msg("Starting cache for: " + c.client.GetName())
|
||||
if err := c.Load(); err != nil {
|
||||
return fmt.Errorf("failed to load cache: %v", err)
|
||||
}
|
||||
if err := c.Sync(); err != nil {
|
||||
return fmt.Errorf("failed to sync cache: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) Load() error {
|
||||
_logger := getLogger()
|
||||
|
||||
if err := os.MkdirAll(c.dir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create cache directory: %w", err)
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(c.dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read cache directory: %w", err)
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() || filepath.Ext(file.Name()) != ".json" {
|
||||
continue
|
||||
}
|
||||
|
||||
filePath := filepath.Join(c.dir, file.Name())
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
_logger.Debug().Err(err).Msgf("Failed to read file: %s", filePath)
|
||||
continue
|
||||
}
|
||||
|
||||
var ct CachedTorrent
|
||||
if err := json.Unmarshal(data, &ct); err != nil {
|
||||
_logger.Debug().Err(err).Msgf("Failed to unmarshal file: %s", filePath)
|
||||
continue
|
||||
}
|
||||
if len(ct.Files) > 0 {
|
||||
c.torrents.Store(ct.Torrent.Id, &ct)
|
||||
c.torrentsNames.Store(ct.Torrent.Name, ct.Torrent.Id)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) GetTorrent(id string) *CachedTorrent {
|
||||
if value, ok := c.torrents.Load(id); ok {
|
||||
return value.(*CachedTorrent)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
|
||||
if id, ok := c.torrentsNames.Load(name); ok {
|
||||
return c.GetTorrent(id.(string))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) SaveTorrent(ct *CachedTorrent) error {
|
||||
data, err := json.MarshalIndent(ct, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal torrent: %w", err)
|
||||
}
|
||||
|
||||
fileName := ct.Torrent.Id + ".json"
|
||||
filePath := filepath.Join(c.dir, fileName)
|
||||
tmpFile := filePath + ".tmp"
|
||||
|
||||
f, err := os.Create(tmpFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
w := bufio.NewWriter(f)
|
||||
if _, err := w.Write(data); err != nil {
|
||||
return fmt.Errorf("failed to write data: %w", err)
|
||||
}
|
||||
|
||||
if err := w.Flush(); err != nil {
|
||||
return fmt.Errorf("failed to flush data: %w", err)
|
||||
}
|
||||
|
||||
return os.Rename(tmpFile, filePath)
|
||||
}
|
||||
|
||||
func (c *Cache) SaveAll() error {
|
||||
const batchSize = 100
|
||||
var wg sync.WaitGroup
|
||||
_logger := getLogger()
|
||||
|
||||
tasks := make(chan *CachedTorrent, batchSize)
|
||||
|
||||
for i := 0; i < runtime.NumCPU(); i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for ct := range tasks {
|
||||
if err := c.SaveTorrent(ct); err != nil {
|
||||
_logger.Error().Err(err).Msg("failed to save torrent")
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
c.torrents.Range(func(_, value interface{}) bool {
|
||||
tasks <- value.(*CachedTorrent)
|
||||
return true
|
||||
})
|
||||
|
||||
close(tasks)
|
||||
wg.Wait()
|
||||
c.LastUpdated = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) Sync() error {
|
||||
_logger := getLogger()
|
||||
torrents, err := c.client.GetTorrents()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sync torrents: %v", err)
|
||||
}
|
||||
|
||||
workers := runtime.NumCPU() * 200
|
||||
workChan := make(chan *torrent.Torrent, len(torrents))
|
||||
errChan := make(chan error, len(torrents))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for t := range workChan {
|
||||
if err := c.processTorrent(t); err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for _, t := range torrents {
|
||||
workChan <- t
|
||||
}
|
||||
close(workChan)
|
||||
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
|
||||
for err := range errChan {
|
||||
_logger.Error().Err(err).Msg("sync error")
|
||||
}
|
||||
|
||||
_logger.Info().Msgf("Synced %d torrents", len(torrents))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) processTorrent(t *torrent.Torrent) error {
|
||||
if existing, ok := c.torrents.Load(t.Id); ok {
|
||||
ct := existing.(*CachedTorrent)
|
||||
if ct.IsComplete {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
c.AddTorrent(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) AddTorrent(t *torrent.Torrent) {
|
||||
_logger := getLogger()
|
||||
|
||||
if len(t.Files) == 0 {
|
||||
tNew, err := c.client.GetTorrent(t.Id)
|
||||
_logger.Debug().Msgf("Getting torrent files for %s", t.Id)
|
||||
if err != nil {
|
||||
_logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err)
|
||||
return
|
||||
}
|
||||
t = tNew
|
||||
}
|
||||
|
||||
if len(t.Files) == 0 {
|
||||
_logger.Debug().Msgf("No files found for %s", t.Id)
|
||||
return
|
||||
}
|
||||
|
||||
ct := &CachedTorrent{
|
||||
Torrent: t,
|
||||
LastRead: time.Now(),
|
||||
IsComplete: len(t.Files) > 0,
|
||||
DownloadLinks: make(map[string]DownloadLinkCache),
|
||||
}
|
||||
|
||||
c.torrents.Store(t.Id, ct)
|
||||
c.torrentsNames.Store(t.Name, t.Id)
|
||||
|
||||
go func() {
|
||||
if err := c.SaveTorrent(ct); err != nil {
|
||||
_logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (c *Cache) RefreshTorrent(torrentId string) *CachedTorrent {
|
||||
_logger := getLogger()
|
||||
|
||||
t, err := c.client.GetTorrent(torrentId)
|
||||
if err != nil {
|
||||
_logger.Debug().Msgf("Failed to get torrent files for %s: %v", torrentId, err)
|
||||
return nil
|
||||
}
|
||||
if len(t.Files) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ct := &CachedTorrent{
|
||||
Torrent: t,
|
||||
LastRead: time.Now(),
|
||||
IsComplete: len(t.Files) > 0,
|
||||
DownloadLinks: make(map[string]DownloadLinkCache),
|
||||
}
|
||||
|
||||
c.torrents.Store(t.Id, ct)
|
||||
c.torrentsNames.Store(t.Name, t.Id)
|
||||
|
||||
go func() {
|
||||
if err := c.SaveTorrent(ct); err != nil {
|
||||
_logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
|
||||
}
|
||||
}()
|
||||
|
||||
return ct
|
||||
}
|
||||
|
||||
func (c *Cache) GetFileDownloadLink(t *CachedTorrent, file *torrent.File) (string, error) {
|
||||
_logger := getLogger()
|
||||
|
||||
if linkCache, ok := t.DownloadLinks[file.Id]; ok {
|
||||
return linkCache.Link, nil
|
||||
}
|
||||
|
||||
if file.Link == "" {
|
||||
t = c.RefreshTorrent(t.Id)
|
||||
if t == nil {
|
||||
return "", fmt.Errorf("torrent not found")
|
||||
}
|
||||
file = t.Torrent.GetFile(file.Id)
|
||||
}
|
||||
|
||||
_logger.Debug().Msgf("Getting download link for %s", t.Name)
|
||||
link := c.client.GetDownloadLink(t.Torrent, file)
|
||||
if link == nil {
|
||||
return "", fmt.Errorf("download link not found")
|
||||
}
|
||||
|
||||
t.DownloadLinks[file.Id] = DownloadLinkCache{
|
||||
Link: link.DownloadLink,
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := c.SaveTorrent(t); err != nil {
|
||||
_logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
|
||||
}
|
||||
}()
|
||||
|
||||
return link.DownloadLink, nil
|
||||
}
|
||||
|
||||
func (c *Cache) GetTorrents() *sync.Map {
|
||||
return c.torrents
|
||||
}
|
||||
93
pkg/debrid/debrid.go
Normal file
93
pkg/debrid/debrid.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package debrid
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/alldebrid"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid_link"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/realdebrid"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torbox"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
)
|
||||
|
||||
func New() *engine.Engine {
|
||||
cfg := config.GetConfig()
|
||||
maxCachedSize := cmp.Or(cfg.MaxCacheSize, 1000)
|
||||
debrids := make([]engine.Service, 0)
|
||||
// Divide the cache size by the number of debrids
|
||||
maxCacheSize := maxCachedSize / len(cfg.Debrids)
|
||||
|
||||
for _, dc := range cfg.Debrids {
|
||||
d := createDebrid(dc, cache.New(maxCacheSize))
|
||||
logger := d.GetLogger()
|
||||
logger.Info().Msg("Debrid Service started")
|
||||
debrids = append(debrids, d)
|
||||
}
|
||||
d := &engine.Engine{Debrids: debrids, LastUsed: 0}
|
||||
return d
|
||||
}
|
||||
|
||||
func createDebrid(dc config.Debrid, cache *cache.Cache) engine.Service {
|
||||
switch dc.Name {
|
||||
case "realdebrid":
|
||||
return realdebrid.New(dc, cache)
|
||||
case "torbox":
|
||||
return torbox.New(dc, cache)
|
||||
case "debridlink":
|
||||
return debrid_link.New(dc, cache)
|
||||
case "alldebrid":
|
||||
return alldebrid.New(dc, cache)
|
||||
default:
|
||||
return realdebrid.New(dc, cache)
|
||||
}
|
||||
}
|
||||
|
||||
func ProcessTorrent(d *engine.Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink bool) (*torrent.Torrent, error) {
|
||||
debridTorrent := &torrent.Torrent{
|
||||
InfoHash: magnet.InfoHash,
|
||||
Magnet: magnet,
|
||||
Name: magnet.Name,
|
||||
Arr: a,
|
||||
Size: magnet.Size,
|
||||
}
|
||||
|
||||
errs := make([]error, 0)
|
||||
|
||||
for index, db := range d.Debrids {
|
||||
logger := db.GetLogger()
|
||||
logger.Info().Msgf("Processing debrid: %s", db.GetName())
|
||||
|
||||
logger.Info().Msgf("Torrent Hash: %s", debridTorrent.InfoHash)
|
||||
if db.GetCheckCached() {
|
||||
hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
|
||||
if !exists || !hash {
|
||||
logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name)
|
||||
continue
|
||||
} else {
|
||||
logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
|
||||
}
|
||||
}
|
||||
|
||||
dbt, err := db.SubmitMagnet(debridTorrent)
|
||||
if dbt != nil {
|
||||
dbt.Arr = a
|
||||
}
|
||||
if err != nil || dbt == nil || dbt.Id == "" {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
logger.Info().Msgf("Torrent: %s(id=%s) submitted to %s", dbt.Name, dbt.Id, db.GetName())
|
||||
d.LastUsed = index
|
||||
return db.CheckStatus(dbt, isSymlink)
|
||||
}
|
||||
err := fmt.Errorf("failed to process torrent")
|
||||
for _, e := range errs {
|
||||
err = fmt.Errorf("%w\n%w", err, e)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
297
pkg/debrid/debrid_link/debrid_link.go
Normal file
297
pkg/debrid/debrid_link/debrid_link.go
Normal file
@@ -0,0 +1,297 @@
|
||||
package debrid_link
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
"slices"
|
||||
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DebridLink struct {
|
||||
Name string
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
DownloadUncached bool
|
||||
client *request.RLHTTPClient
|
||||
cache *cache.Cache
|
||||
MountPath string
|
||||
logger zerolog.Logger
|
||||
CheckCached bool
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetName() string {
|
||||
return dl.Name
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetLogger() zerolog.Logger {
|
||||
return dl.logger
|
||||
}
|
||||
|
||||
func (dl *DebridLink) IsAvailable(infohashes []string) map[string]bool {
|
||||
// Check if the infohashes are available in the local cache
|
||||
hashes, result := torrent.GetLocalCache(infohashes, dl.cache)
|
||||
|
||||
if len(hashes) == 0 {
|
||||
// Either all the infohashes are locally cached or none are
|
||||
dl.cache.AddMultiple(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// Divide hashes into groups of 100
|
||||
for i := 0; i < len(hashes); i += 100 {
|
||||
end := i + 100
|
||||
if end > len(hashes) {
|
||||
end = len(hashes)
|
||||
}
|
||||
|
||||
// Filter out empty strings
|
||||
validHashes := make([]string, 0, end-i)
|
||||
for _, hash := range hashes[i:end] {
|
||||
if hash != "" {
|
||||
validHashes = append(validHashes, hash)
|
||||
}
|
||||
}
|
||||
|
||||
// If no valid hashes in this batch, continue to the next batch
|
||||
if len(validHashes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
hashStr := strings.Join(validHashes, ",")
|
||||
url := fmt.Sprintf("%s/seedbox/cached/%s", dl.Host, hashStr)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := dl.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
dl.logger.Info().Msgf("Error checking availability: %v", err)
|
||||
return result
|
||||
}
|
||||
var data AvailableResponse
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
dl.logger.Info().Msgf("Error marshalling availability: %v", err)
|
||||
return result
|
||||
}
|
||||
if data.Value == nil {
|
||||
return result
|
||||
}
|
||||
value := *data.Value
|
||||
for _, h := range hashes[i:end] {
|
||||
_, exists := value[h]
|
||||
if exists {
|
||||
result[h] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
dl.cache.AddMultiple(result) // Add the results to the cache
|
||||
return result
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetTorrent(id string) (*torrent.Torrent, error) {
|
||||
t := &torrent.Torrent{}
|
||||
url := fmt.Sprintf("%s/seedbox/list?ids=%s", dl.Host, id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := dl.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
var res TorrentInfo
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
if !res.Success {
|
||||
return t, fmt.Errorf("error getting torrent")
|
||||
}
|
||||
if res.Value == nil {
|
||||
return t, fmt.Errorf("torrent not found")
|
||||
}
|
||||
dt := *res.Value
|
||||
|
||||
if len(dt) == 0 {
|
||||
return t, fmt.Errorf("torrent not found")
|
||||
}
|
||||
data := dt[0]
|
||||
status := "downloading"
|
||||
if data.Status == 100 {
|
||||
status = "downloaded"
|
||||
}
|
||||
name := utils.RemoveInvalidChars(data.Name)
|
||||
t.Id = data.ID
|
||||
t.Name = name
|
||||
t.Bytes = data.TotalSize
|
||||
t.Folder = name
|
||||
t.Progress = data.DownloadPercent
|
||||
t.Status = status
|
||||
t.Speed = data.DownloadSpeed
|
||||
t.Seeders = data.PeersConnected
|
||||
t.Filename = name
|
||||
t.OriginalFilename = name
|
||||
files := make([]torrent.File, len(data.Files))
|
||||
cfg := config.GetConfig()
|
||||
for i, f := range data.Files {
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
continue
|
||||
}
|
||||
files[i] = torrent.File{
|
||||
Id: f.ID,
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Path: f.Name,
|
||||
}
|
||||
}
|
||||
t.Files = files
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/seedbox/add", dl.Host)
|
||||
payload := map[string]string{"url": t.Magnet.Link}
|
||||
jsonPayload, _ := json.Marshal(payload)
|
||||
req, _ := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(jsonPayload))
|
||||
resp, err := dl.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res SubmitTorrentInfo
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !res.Success || res.Value == nil {
|
||||
return nil, fmt.Errorf("error adding torrent")
|
||||
}
|
||||
data := *res.Value
|
||||
status := "downloading"
|
||||
name := utils.RemoveInvalidChars(data.Name)
|
||||
t.Id = data.ID
|
||||
t.Name = name
|
||||
t.Bytes = data.TotalSize
|
||||
t.Folder = name
|
||||
t.Progress = data.DownloadPercent
|
||||
t.Status = status
|
||||
t.Speed = data.DownloadSpeed
|
||||
t.Seeders = data.PeersConnected
|
||||
t.Filename = name
|
||||
t.OriginalFilename = name
|
||||
t.MountPath = dl.MountPath
|
||||
t.Debrid = dl.Name
|
||||
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
|
||||
files := make([]torrent.File, len(data.Files))
|
||||
for i, f := range data.Files {
|
||||
files[i] = torrent.File{
|
||||
Id: f.ID,
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Path: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
}
|
||||
}
|
||||
t.Files = files
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
|
||||
for {
|
||||
t, err := dl.GetTorrent(torrent.Id)
|
||||
torrent = t
|
||||
if err != nil || torrent == nil {
|
||||
return torrent, err
|
||||
}
|
||||
status := torrent.Status
|
||||
if status == "downloaded" {
|
||||
dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||
err = dl.GetDownloadLinks(torrent)
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
break
|
||||
} else if slices.Contains(dl.GetDownloadingStatus(), status) {
|
||||
if !dl.DownloadUncached {
|
||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||
}
|
||||
// Break out of the loop if the torrent is downloading.
|
||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||
break
|
||||
} else {
|
||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||
}
|
||||
|
||||
}
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) DeleteTorrent(torrent *torrent.Torrent) {
|
||||
url := fmt.Sprintf("%s/seedbox/%s/remove", dl.Host, torrent.Id)
|
||||
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||
_, err := dl.client.MakeRequest(req)
|
||||
if err == nil {
|
||||
dl.logger.Info().Msgf("Torrent: %s deleted", torrent.Name)
|
||||
} else {
|
||||
dl.logger.Info().Msgf("Error deleting torrent: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetDownloadLinks(t *torrent.Torrent) error {
|
||||
downloadLinks := make(map[string]torrent.DownloadLinks)
|
||||
for _, f := range t.Files {
|
||||
dl := torrent.DownloadLinks{
|
||||
Link: f.Link,
|
||||
Filename: f.Name,
|
||||
}
|
||||
downloadLinks[f.Id] = dl
|
||||
}
|
||||
t.DownloadLinks = downloadLinks
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
|
||||
dlLink, ok := t.DownloadLinks[file.Id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return &dlLink
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetDownloadingStatus() []string {
|
||||
return []string{"downloading"}
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetCheckCached() bool {
|
||||
return dl.CheckCached
|
||||
}
|
||||
|
||||
func New(dc config.Debrid, cache *cache.Cache) *DebridLink {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
client := request.NewRLHTTPClient(rl, headers)
|
||||
return &DebridLink{
|
||||
Name: "debridlink",
|
||||
Host: dc.Host,
|
||||
APIKey: dc.APIKey,
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
client: client,
|
||||
cache: cache,
|
||||
MountPath: dc.Folder,
|
||||
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
|
||||
CheckCached: dc.CheckCached,
|
||||
}
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetTorrents() ([]*torrent.Torrent, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
45
pkg/debrid/debrid_link/types.go
Normal file
45
pkg/debrid/debrid_link/types.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package debrid_link
|
||||
|
||||
type APIResponse[T any] struct {
|
||||
Success bool `json:"success"`
|
||||
Value *T `json:"value"` // Use pointer to allow nil
|
||||
}
|
||||
|
||||
type AvailableResponse APIResponse[map[string]map[string]struct {
|
||||
Name string `json:"name"`
|
||||
HashString string `json:"hashString"`
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
Size int `json:"size"`
|
||||
} `json:"files"`
|
||||
}]
|
||||
|
||||
type debridLinkTorrentInfo struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
HashString string `json:"hashString"`
|
||||
UploadRatio float64 `json:"uploadRatio"`
|
||||
ServerID string `json:"serverId"`
|
||||
Wait bool `json:"wait"`
|
||||
PeersConnected int `json:"peersConnected"`
|
||||
Status int `json:"status"`
|
||||
TotalSize int64 `json:"totalSize"`
|
||||
Files []struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
DownloadURL string `json:"downloadUrl"`
|
||||
Size int64 `json:"size"`
|
||||
DownloadPercent int `json:"downloadPercent"`
|
||||
} `json:"files"`
|
||||
Trackers []struct {
|
||||
Announce string `json:"announce"`
|
||||
} `json:"trackers"`
|
||||
Created int64 `json:"created"`
|
||||
DownloadPercent float64 `json:"downloadPercent"`
|
||||
DownloadSpeed int64 `json:"downloadSpeed"`
|
||||
UploadSpeed int64 `json:"uploadSpeed"`
|
||||
}
|
||||
|
||||
type TorrentInfo APIResponse[[]debridLinkTorrentInfo]
|
||||
|
||||
type SubmitTorrentInfo APIResponse[debridLinkTorrentInfo]
|
||||
26
pkg/debrid/engine/engine.go
Normal file
26
pkg/debrid/engine/engine.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package engine
|
||||
|
||||
type Engine struct {
|
||||
Debrids []Service
|
||||
LastUsed int
|
||||
}
|
||||
|
||||
func (d *Engine) Get() Service {
|
||||
if d.LastUsed == 0 {
|
||||
return d.Debrids[0]
|
||||
}
|
||||
return d.Debrids[d.LastUsed]
|
||||
}
|
||||
|
||||
func (d *Engine) GetByName(name string) Service {
|
||||
for _, deb := range d.Debrids {
|
||||
if deb.GetName() == name {
|
||||
return deb
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Engine) GetDebrids() []Service {
|
||||
return d.Debrids
|
||||
}
|
||||
21
pkg/debrid/engine/service.go
Normal file
21
pkg/debrid/engine/service.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
)
|
||||
|
||||
type Service interface {
|
||||
SubmitMagnet(tr *torrent.Torrent) (*torrent.Torrent, error)
|
||||
CheckStatus(tr *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error)
|
||||
GetDownloadLinks(tr *torrent.Torrent) error
|
||||
GetDownloadLink(tr *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks
|
||||
DeleteTorrent(tr *torrent.Torrent)
|
||||
IsAvailable(infohashes []string) map[string]bool
|
||||
GetCheckCached() bool
|
||||
GetTorrent(id string) (*torrent.Torrent, error)
|
||||
GetTorrents() ([]*torrent.Torrent, error)
|
||||
GetName() string
|
||||
GetLogger() zerolog.Logger
|
||||
GetDownloadingStatus() []string
|
||||
}
|
||||
406
pkg/debrid/realdebrid/realdebrid.go
Normal file
406
pkg/debrid/realdebrid/realdebrid.go
Normal file
@@ -0,0 +1,406 @@
|
||||
package realdebrid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type RealDebrid struct {
|
||||
Name string
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
DownloadUncached bool
|
||||
client *request.RLHTTPClient
|
||||
cache *cache.Cache
|
||||
MountPath string
|
||||
logger zerolog.Logger
|
||||
CheckCached bool
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetName() string {
|
||||
return r.Name
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetLogger() zerolog.Logger {
|
||||
return r.logger
|
||||
}
|
||||
|
||||
// GetTorrentFiles returns a list of torrent files from the torrent info
|
||||
// validate is used to determine if the files should be validated
|
||||
// if validate is false, selected files will be returned
|
||||
func GetTorrentFiles(data TorrentInfo, validate bool) []torrent.File {
|
||||
files := make([]torrent.File, 0)
|
||||
cfg := config.GetConfig()
|
||||
idx := 0
|
||||
for _, f := range data.Files {
|
||||
|
||||
name := filepath.Base(f.Path)
|
||||
|
||||
if validate {
|
||||
if utils.RegexMatch(utils.SAMPLEMATCH, name) {
|
||||
// Skip sample files
|
||||
continue
|
||||
}
|
||||
if !cfg.IsAllowedFile(name) {
|
||||
continue
|
||||
}
|
||||
if !cfg.IsSizeAllowed(f.Bytes) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if f.Selected == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
fileId := f.ID
|
||||
_link := ""
|
||||
if len(data.Links) > idx {
|
||||
_link = data.Links[idx]
|
||||
}
|
||||
file := torrent.File{
|
||||
Name: name,
|
||||
Path: name,
|
||||
Size: f.Bytes,
|
||||
Id: strconv.Itoa(fileId),
|
||||
Link: _link,
|
||||
}
|
||||
files = append(files, file)
|
||||
idx++
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool {
|
||||
// Check if the infohashes are available in the local cache
|
||||
hashes, result := torrent.GetLocalCache(infohashes, r.cache)
|
||||
|
||||
if len(hashes) == 0 {
|
||||
// Either all the infohashes are locally cached or none are
|
||||
r.cache.AddMultiple(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// Divide hashes into groups of 100
|
||||
for i := 0; i < len(hashes); i += 200 {
|
||||
end := i + 200
|
||||
if end > len(hashes) {
|
||||
end = len(hashes)
|
||||
}
|
||||
|
||||
// Filter out empty strings
|
||||
validHashes := make([]string, 0, end-i)
|
||||
for _, hash := range hashes[i:end] {
|
||||
if hash != "" {
|
||||
validHashes = append(validHashes, hash)
|
||||
}
|
||||
}
|
||||
|
||||
// If no valid hashes in this batch, continue to the next batch
|
||||
if len(validHashes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
hashStr := strings.Join(validHashes, "/")
|
||||
url := fmt.Sprintf("%s/torrents/instantAvailability/%s", r.Host, hashStr)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
r.logger.Info().Msgf("Error checking availability: %v", err)
|
||||
return result
|
||||
}
|
||||
var data AvailabilityResponse
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
r.logger.Info().Msgf("Error marshalling availability: %v", err)
|
||||
return result
|
||||
}
|
||||
for _, h := range hashes[i:end] {
|
||||
hosters, exists := data[strings.ToLower(h)]
|
||||
if exists && len(hosters.Rd) > 0 {
|
||||
result[h] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
r.cache.AddMultiple(result) // Add the results to the cache
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *RealDebrid) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents/addMagnet", r.Host)
|
||||
payload := gourl.Values{
|
||||
"magnet": {t.Magnet.Link},
|
||||
}
|
||||
var data AddMagnetSchema
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.Id = data.Id
|
||||
t.Debrid = r.Name
|
||||
t.MountPath = r.MountPath
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetTorrent(id string) (*torrent.Torrent, error) {
|
||||
t := &torrent.Torrent{}
|
||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
var data TorrentInfo
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
name := utils.RemoveInvalidChars(data.OriginalFilename)
|
||||
t.Id = id
|
||||
t.Name = name
|
||||
t.Bytes = data.Bytes
|
||||
t.Folder = name
|
||||
t.Progress = data.Progress
|
||||
t.Status = data.Status
|
||||
t.Speed = data.Speed
|
||||
t.Seeders = data.Seeders
|
||||
t.Filename = data.Filename
|
||||
t.OriginalFilename = data.OriginalFilename
|
||||
t.Links = data.Links
|
||||
t.MountPath = r.MountPath
|
||||
t.Debrid = r.Name
|
||||
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
|
||||
files := GetTorrentFiles(data, false) // Get selected files
|
||||
t.Files = files
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
for {
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
r.logger.Info().Msgf("ERROR Checking file: %v", err)
|
||||
return t, err
|
||||
}
|
||||
var data TorrentInfo
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return t, err
|
||||
}
|
||||
status := data.Status
|
||||
name := utils.RemoveInvalidChars(data.OriginalFilename)
|
||||
t.Name = name // Important because some magnet changes the name
|
||||
t.Folder = name
|
||||
t.Filename = data.Filename
|
||||
t.OriginalFilename = data.OriginalFilename
|
||||
t.Bytes = data.Bytes
|
||||
t.Progress = data.Progress
|
||||
t.Speed = data.Speed
|
||||
t.Seeders = data.Seeders
|
||||
t.Links = data.Links
|
||||
t.Status = status
|
||||
t.Debrid = r.Name
|
||||
t.MountPath = r.MountPath
|
||||
if status == "waiting_files_selection" {
|
||||
files := GetTorrentFiles(data, true) // Validate files to be selected
|
||||
t.Files = files
|
||||
if len(files) == 0 {
|
||||
return t, fmt.Errorf("no video files found")
|
||||
}
|
||||
filesId := make([]string, 0)
|
||||
for _, f := range files {
|
||||
filesId = append(filesId, f.Id)
|
||||
}
|
||||
p := gourl.Values{
|
||||
"files": {strings.Join(filesId, ",")},
|
||||
}
|
||||
payload := strings.NewReader(p.Encode())
|
||||
req, _ := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, t.Id), payload)
|
||||
_, err = r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
} else if status == "downloaded" {
|
||||
files := GetTorrentFiles(data, false) // Get selected files
|
||||
t.Files = files
|
||||
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
|
||||
if !isSymlink {
|
||||
err = r.GetDownloadLinks(t)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
}
|
||||
break
|
||||
} else if slices.Contains(r.GetDownloadingStatus(), status) {
|
||||
if !r.DownloadUncached {
|
||||
return t, fmt.Errorf("torrent: %s not cached", t.Name)
|
||||
}
|
||||
// Break out of the loop if the torrent is downloading.
|
||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||
break
|
||||
} else {
|
||||
return t, fmt.Errorf("torrent: %s has error: %s", t.Name, status)
|
||||
}
|
||||
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) DeleteTorrent(torrent *torrent.Torrent) {
|
||||
url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrent.Id)
|
||||
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||
_, err := r.client.MakeRequest(req)
|
||||
if err == nil {
|
||||
r.logger.Info().Msgf("Torrent: %s deleted", torrent.Name)
|
||||
} else {
|
||||
r.logger.Info().Msgf("Error deleting torrent: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadLinks(t *torrent.Torrent) error {
|
||||
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
||||
downloadLinks := make(map[string]torrent.DownloadLinks)
|
||||
for _, f := range t.Files {
|
||||
dlLink := t.DownloadLinks[f.Id]
|
||||
if f.Link == "" || dlLink.DownloadLink != "" {
|
||||
continue
|
||||
}
|
||||
payload := gourl.Values{
|
||||
"link": {f.Link},
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data UnrestrictResponse
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
download := torrent.DownloadLinks{
|
||||
Link: data.Link,
|
||||
Filename: data.Filename,
|
||||
DownloadLink: data.Download,
|
||||
}
|
||||
downloadLinks[f.Id] = download
|
||||
}
|
||||
t.DownloadLinks = downloadLinks
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
|
||||
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
||||
payload := gourl.Values{
|
||||
"link": {file.Link},
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var data UnrestrictResponse
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return nil
|
||||
}
|
||||
return &torrent.DownloadLinks{
|
||||
Link: data.Link,
|
||||
Filename: data.Filename,
|
||||
DownloadLink: data.Download,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetCheckCached() bool {
|
||||
return r.CheckCached
|
||||
}
|
||||
|
||||
func (r *RealDebrid) getTorrents(offset int, limit int) ([]*torrent.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/torrents?limit=%d", r.Host, limit)
|
||||
if offset > 0 {
|
||||
url = fmt.Sprintf("%s&offset=%d", url, offset)
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data []TorrentsResponse
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
torrents := make([]*torrent.Torrent, 0)
|
||||
for _, t := range data {
|
||||
|
||||
torrents = append(torrents, &torrent.Torrent{
|
||||
Id: t.Id,
|
||||
Name: t.Filename,
|
||||
Bytes: t.Bytes,
|
||||
Progress: t.Progress,
|
||||
Status: t.Status,
|
||||
Filename: t.Filename,
|
||||
OriginalFilename: t.Filename,
|
||||
Links: t.Links,
|
||||
})
|
||||
}
|
||||
return torrents, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetTorrents() ([]*torrent.Torrent, error) {
|
||||
torrents := make([]*torrent.Torrent, 0)
|
||||
offset := 0
|
||||
limit := 5000
|
||||
for {
|
||||
ts, err := r.getTorrents(offset, limit)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(ts) == 0 {
|
||||
break
|
||||
}
|
||||
torrents = append(torrents, ts...)
|
||||
offset = len(torrents)
|
||||
}
|
||||
return torrents, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadingStatus() []string {
|
||||
return []string{"downloading", "magnet_conversion", "queued", "compressing", "uploading"}
|
||||
}
|
||||
|
||||
func New(dc config.Debrid, cache *cache.Cache) *RealDebrid {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
}
|
||||
client := request.NewRLHTTPClient(rl, headers)
|
||||
return &RealDebrid{
|
||||
Name: "realdebrid",
|
||||
Host: dc.Host,
|
||||
APIKey: dc.APIKey,
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
client: client,
|
||||
cache: cache,
|
||||
MountPath: dc.Folder,
|
||||
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
|
||||
CheckCached: dc.CheckCached,
|
||||
}
|
||||
}
|
||||
122
pkg/debrid/realdebrid/types.go
Normal file
122
pkg/debrid/realdebrid/types.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package realdebrid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AvailabilityResponse map[string]Hoster
|
||||
|
||||
func (r *AvailabilityResponse) UnmarshalJSON(data []byte) error {
|
||||
// First, try to unmarshal as an object
|
||||
var objectData map[string]Hoster
|
||||
err := json.Unmarshal(data, &objectData)
|
||||
if err == nil {
|
||||
*r = objectData
|
||||
return nil
|
||||
}
|
||||
|
||||
// If that fails, try to unmarshal as an array
|
||||
var arrayData []map[string]Hoster
|
||||
err = json.Unmarshal(data, &arrayData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal as both object and array: %v", err)
|
||||
}
|
||||
|
||||
// If it's an array, use the first element
|
||||
if len(arrayData) > 0 {
|
||||
*r = arrayData[0]
|
||||
return nil
|
||||
}
|
||||
|
||||
// If it's an empty array, initialize as an empty map
|
||||
*r = make(map[string]Hoster)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Hoster struct {
|
||||
Rd []map[string]FileVariant `json:"rd"`
|
||||
}
|
||||
|
||||
func (h *Hoster) UnmarshalJSON(data []byte) error {
|
||||
// Attempt to unmarshal into the expected structure (an object with an "rd" key)
|
||||
type Alias Hoster
|
||||
var obj Alias
|
||||
if err := json.Unmarshal(data, &obj); err == nil {
|
||||
*h = Hoster(obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If unmarshalling into an object fails, check if it's an empty array
|
||||
var arr []interface{}
|
||||
if err := json.Unmarshal(data, &arr); err == nil && len(arr) == 0 {
|
||||
// It's an empty array; initialize with no entries
|
||||
*h = Hoster{Rd: nil}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If both attempts fail, return an error
|
||||
return fmt.Errorf("hoster: cannot unmarshal JSON data: %s", string(data))
|
||||
}
|
||||
|
||||
type FileVariant struct {
|
||||
Filename string `json:"filename"`
|
||||
Filesize int `json:"filesize"`
|
||||
}
|
||||
|
||||
type AddMagnetSchema struct {
|
||||
Id string `json:"id"`
|
||||
Uri string `json:"uri"`
|
||||
}
|
||||
|
||||
type TorrentInfo struct {
|
||||
ID string `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
OriginalFilename string `json:"original_filename"`
|
||||
Hash string `json:"hash"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
OriginalBytes int64 `json:"original_bytes"`
|
||||
Host string `json:"host"`
|
||||
Split int `json:"split"`
|
||||
Progress float64 `json:"progress"`
|
||||
Status string `json:"status"`
|
||||
Added string `json:"added"`
|
||||
Files []struct {
|
||||
ID int `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Selected int `json:"selected"`
|
||||
} `json:"files"`
|
||||
Links []string `json:"links"`
|
||||
Ended string `json:"ended,omitempty"`
|
||||
Speed int64 `json:"speed,omitempty"`
|
||||
Seeders int `json:"seeders,omitempty"`
|
||||
}
|
||||
|
||||
type UnrestrictResponse struct {
|
||||
Id string `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
MimeType string `json:"mimeType"`
|
||||
Filesize int `json:"filesize"`
|
||||
Link string `json:"link"`
|
||||
Host string `json:"host"`
|
||||
Chunks int `json:"chunks"`
|
||||
Crc int `json:"crc"`
|
||||
Download string `json:"download"`
|
||||
Streamable int `json:"streamable"`
|
||||
}
|
||||
|
||||
type TorrentsResponse struct {
|
||||
Id string `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
Hash string `json:"hash"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Host string `json:"host"`
|
||||
Split int64 `json:"split"`
|
||||
Progress float64 `json:"progress"`
|
||||
Status string `json:"status"`
|
||||
Added time.Time `json:"added"`
|
||||
Links []string `json:"links"`
|
||||
Ended time.Time `json:"ended"`
|
||||
}
|
||||
354
pkg/debrid/torbox/torbox.go
Normal file
354
pkg/debrid/torbox/torbox.go
Normal file
@@ -0,0 +1,354 @@
|
||||
package torbox
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Torbox struct {
|
||||
Name string
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
DownloadUncached bool
|
||||
client *request.RLHTTPClient
|
||||
cache *cache.Cache
|
||||
MountPath string
|
||||
logger zerolog.Logger
|
||||
CheckCached bool
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetName() string {
|
||||
return tb.Name
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetLogger() zerolog.Logger {
|
||||
return tb.logger
|
||||
}
|
||||
|
||||
func (tb *Torbox) IsAvailable(infohashes []string) map[string]bool {
|
||||
// Check if the infohashes are available in the local cache
|
||||
hashes, result := torrent.GetLocalCache(infohashes, tb.cache)
|
||||
|
||||
if len(hashes) == 0 {
|
||||
// Either all the infohashes are locally cached or none are
|
||||
tb.cache.AddMultiple(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// Divide hashes into groups of 100
|
||||
for i := 0; i < len(hashes); i += 100 {
|
||||
end := i + 100
|
||||
if end > len(hashes) {
|
||||
end = len(hashes)
|
||||
}
|
||||
|
||||
// Filter out empty strings
|
||||
validHashes := make([]string, 0, end-i)
|
||||
for _, hash := range hashes[i:end] {
|
||||
if hash != "" {
|
||||
validHashes = append(validHashes, hash)
|
||||
}
|
||||
}
|
||||
|
||||
// If no valid hashes in this batch, continue to the next batch
|
||||
if len(validHashes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
hashStr := strings.Join(validHashes, ",")
|
||||
url := fmt.Sprintf("%s/api/torrents/checkcached?hash=%s", tb.Host, hashStr)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
tb.logger.Info().Msgf("Error checking availability: %v", err)
|
||||
return result
|
||||
}
|
||||
var res AvailableResponse
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
tb.logger.Info().Msgf("Error marshalling availability: %v", err)
|
||||
return result
|
||||
}
|
||||
if res.Data == nil {
|
||||
return result
|
||||
}
|
||||
|
||||
for h, cache := range *res.Data {
|
||||
if cache.Size > 0 {
|
||||
result[strings.ToUpper(h)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
tb.cache.AddMultiple(result) // Add the results to the cache
|
||||
return result
|
||||
}
|
||||
|
||||
func (tb *Torbox) SubmitMagnet(torrent *torrent.Torrent) (*torrent.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/api/torrents/createtorrent", tb.Host)
|
||||
payload := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(payload)
|
||||
_ = writer.WriteField("magnet", torrent.Magnet.Link)
|
||||
err := writer.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodPost, url, payload)
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data AddMagnetResponse
|
||||
err = json.Unmarshal(resp, &data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if data.Data == nil {
|
||||
return nil, fmt.Errorf("error adding torrent")
|
||||
}
|
||||
dt := *data.Data
|
||||
torrentId := strconv.Itoa(dt.Id)
|
||||
torrent.Id = torrentId
|
||||
torrent.MountPath = tb.MountPath
|
||||
torrent.Debrid = tb.Name
|
||||
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func getTorboxStatus(status string, finished bool) string {
|
||||
if finished {
|
||||
return "downloaded"
|
||||
}
|
||||
downloading := []string{"completed", "cached", "paused", "downloading", "uploading",
|
||||
"checkingResumeData", "metaDL", "pausedUP", "queuedUP", "checkingUP",
|
||||
"forcedUP", "allocating", "downloading", "metaDL", "pausedDL",
|
||||
"queuedDL", "checkingDL", "forcedDL", "checkingResumeData", "moving"}
|
||||
switch {
|
||||
case slices.Contains(downloading, status):
|
||||
return "downloading"
|
||||
default:
|
||||
return "error"
|
||||
}
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetTorrent(id string) (*torrent.Torrent, error) {
|
||||
t := &torrent.Torrent{}
|
||||
url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", tb.Host, id)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
var res InfoResponse
|
||||
err = json.Unmarshal(resp, &res)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
data := res.Data
|
||||
name := data.Name
|
||||
t.Id = id
|
||||
t.Name = name
|
||||
t.Bytes = data.Size
|
||||
t.Folder = name
|
||||
t.Progress = data.Progress * 100
|
||||
t.Status = getTorboxStatus(data.DownloadState, data.DownloadFinished)
|
||||
t.Speed = data.DownloadSpeed
|
||||
t.Seeders = data.Seeds
|
||||
t.Filename = name
|
||||
t.OriginalFilename = name
|
||||
t.MountPath = tb.MountPath
|
||||
t.Debrid = tb.Name
|
||||
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
|
||||
files := make([]torrent.File, 0)
|
||||
cfg := config.GetConfig()
|
||||
for _, f := range data.Files {
|
||||
fileName := filepath.Base(f.Name)
|
||||
if utils.IsSampleFile(fileName) {
|
||||
// Skip sample files
|
||||
continue
|
||||
}
|
||||
if !cfg.IsAllowedFile(fileName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
continue
|
||||
}
|
||||
file := torrent.File{
|
||||
Id: strconv.Itoa(f.Id),
|
||||
Name: fileName,
|
||||
Size: f.Size,
|
||||
Path: fileName,
|
||||
}
|
||||
files = append(files, file)
|
||||
}
|
||||
var cleanPath string
|
||||
if len(files) > 0 {
|
||||
cleanPath = path.Clean(data.Files[0].Name)
|
||||
} else {
|
||||
cleanPath = path.Clean(data.Name)
|
||||
}
|
||||
|
||||
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
||||
t.Files = files
|
||||
//t.Debrid = tb
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
|
||||
for {
|
||||
t, err := tb.GetTorrent(torrent.Id)
|
||||
|
||||
torrent = t
|
||||
|
||||
if err != nil || t == nil {
|
||||
return t, err
|
||||
}
|
||||
status := torrent.Status
|
||||
if status == "downloaded" {
|
||||
tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||
if !isSymlink {
|
||||
err = tb.GetDownloadLinks(torrent)
|
||||
if err != nil {
|
||||
return torrent, err
|
||||
}
|
||||
}
|
||||
break
|
||||
} else if slices.Contains(tb.GetDownloadingStatus(), status) {
|
||||
if !tb.DownloadUncached {
|
||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||
}
|
||||
// Break out of the loop if the torrent is downloading.
|
||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||
break
|
||||
} else {
|
||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||
}
|
||||
|
||||
}
|
||||
return torrent, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) DeleteTorrent(torrent *torrent.Torrent) {
|
||||
url := fmt.Sprintf("%s/api/torrents/controltorrent/%s", tb.Host, torrent.Id)
|
||||
payload := map[string]string{"torrent_id": torrent.Id, "action": "Delete"}
|
||||
jsonPayload, _ := json.Marshal(payload)
|
||||
req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(jsonPayload))
|
||||
_, err := tb.client.MakeRequest(req)
|
||||
if err == nil {
|
||||
tb.logger.Info().Msgf("Torrent: %s deleted", torrent.Name)
|
||||
} else {
|
||||
tb.logger.Info().Msgf("Error deleting torrent: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetDownloadLinks(t *torrent.Torrent) error {
|
||||
downloadLinks := make(map[string]torrent.DownloadLinks)
|
||||
for _, file := range t.Files {
|
||||
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
|
||||
query := gourl.Values{}
|
||||
query.Add("torrent_id", t.Id)
|
||||
query.Add("token", tb.APIKey)
|
||||
query.Add("file_id", file.Id)
|
||||
url += "?" + query.Encode()
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data DownloadLinksResponse
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
if data.Data == nil {
|
||||
return fmt.Errorf("error getting download links")
|
||||
}
|
||||
idx := 0
|
||||
link := *data.Data
|
||||
|
||||
dl := torrent.DownloadLinks{
|
||||
Link: link,
|
||||
Filename: t.Files[idx].Name,
|
||||
DownloadLink: link,
|
||||
}
|
||||
downloadLinks[file.Id] = dl
|
||||
}
|
||||
t.DownloadLinks = downloadLinks
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
|
||||
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
|
||||
query := gourl.Values{}
|
||||
query.Add("torrent_id", t.Id)
|
||||
query.Add("token", tb.APIKey)
|
||||
query.Add("file_id", file.Id)
|
||||
url += "?" + query.Encode()
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var data DownloadLinksResponse
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return nil
|
||||
}
|
||||
if data.Data == nil {
|
||||
return nil
|
||||
}
|
||||
link := *data.Data
|
||||
return &torrent.DownloadLinks{
|
||||
Link: file.Link,
|
||||
Filename: file.Name,
|
||||
DownloadLink: link,
|
||||
}
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetDownloadingStatus() []string {
|
||||
return []string{"downloading"}
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetCheckCached() bool {
|
||||
return tb.CheckCached
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetTorrents() ([]*torrent.Torrent, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func New(dc config.Debrid, cache *cache.Cache) *Torbox {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
}
|
||||
client := request.NewRLHTTPClient(rl, headers)
|
||||
return &Torbox{
|
||||
Name: "torbox",
|
||||
Host: dc.Host,
|
||||
APIKey: dc.APIKey,
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
client: client,
|
||||
cache: cache,
|
||||
MountPath: dc.Folder,
|
||||
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
|
||||
CheckCached: dc.CheckCached,
|
||||
}
|
||||
}
|
||||
75
pkg/debrid/torbox/types.go
Normal file
75
pkg/debrid/torbox/types.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package torbox
|
||||
|
||||
import "time"
|
||||
|
||||
type APIResponse[T any] struct {
|
||||
Success bool `json:"success"`
|
||||
Error any `json:"error"`
|
||||
Detail string `json:"detail"`
|
||||
Data *T `json:"data"` // Use pointer to allow nil
|
||||
}
|
||||
|
||||
type AvailableResponse APIResponse[map[string]struct {
|
||||
Name string `json:"name"`
|
||||
Size int `json:"size"`
|
||||
Hash string `json:"hash"`
|
||||
}]
|
||||
|
||||
type AddMagnetResponse APIResponse[struct {
|
||||
Id int `json:"torrent_id"`
|
||||
Hash string `json:"hash"`
|
||||
}]
|
||||
|
||||
type torboxInfo struct {
|
||||
Id int `json:"id"`
|
||||
AuthId string `json:"auth_id"`
|
||||
Server int `json:"server"`
|
||||
Hash string `json:"hash"`
|
||||
Name string `json:"name"`
|
||||
Magnet interface{} `json:"magnet"`
|
||||
Size int64 `json:"size"`
|
||||
Active bool `json:"active"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
DownloadState string `json:"download_state"`
|
||||
Seeds int `json:"seeds"`
|
||||
Peers int `json:"peers"`
|
||||
Ratio float64 `json:"ratio"`
|
||||
Progress float64 `json:"progress"`
|
||||
DownloadSpeed int64 `json:"download_speed"`
|
||||
UploadSpeed int `json:"upload_speed"`
|
||||
Eta int `json:"eta"`
|
||||
TorrentFile bool `json:"torrent_file"`
|
||||
ExpiresAt interface{} `json:"expires_at"`
|
||||
DownloadPresent bool `json:"download_present"`
|
||||
Files []struct {
|
||||
Id int `json:"id"`
|
||||
Md5 interface{} `json:"md5"`
|
||||
Hash string `json:"hash"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Zipped bool `json:"zipped"`
|
||||
S3Path string `json:"s3_path"`
|
||||
Infected bool `json:"infected"`
|
||||
Mimetype string `json:"mimetype"`
|
||||
ShortName string `json:"short_name"`
|
||||
AbsolutePath string `json:"absolute_path"`
|
||||
} `json:"files"`
|
||||
DownloadPath string `json:"download_path"`
|
||||
InactiveCheck int `json:"inactive_check"`
|
||||
Availability int `json:"availability"`
|
||||
DownloadFinished bool `json:"download_finished"`
|
||||
Tracker interface{} `json:"tracker"`
|
||||
TotalUploaded int `json:"total_uploaded"`
|
||||
TotalDownloaded int `json:"total_downloaded"`
|
||||
Cached bool `json:"cached"`
|
||||
Owner string `json:"owner"`
|
||||
SeedTorrent bool `json:"seed_torrent"`
|
||||
AllowZipped bool `json:"allow_zipped"`
|
||||
LongTermSeeding bool `json:"long_term_seeding"`
|
||||
TrackerMessage interface{} `json:"tracker_message"`
|
||||
}
|
||||
|
||||
type InfoResponse APIResponse[torboxInfo]
|
||||
|
||||
type DownloadLinksResponse APIResponse[string]
|
||||
135
pkg/debrid/torrent/torrent.go
Normal file
135
pkg/debrid/torrent/torrent.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package torrent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Arr struct {
|
||||
Name string `json:"name"`
|
||||
Token string `json:"-"`
|
||||
Host string `json:"host"`
|
||||
}
|
||||
|
||||
type ArrHistorySchema struct {
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
SortKey string `json:"sortKey"`
|
||||
SortDirection string `json:"sortDirection"`
|
||||
TotalRecords int `json:"totalRecords"`
|
||||
Records []struct {
|
||||
ID int `json:"id"`
|
||||
DownloadID string `json:"downloadId"`
|
||||
} `json:"records"`
|
||||
}
|
||||
|
||||
type Torrent struct {
|
||||
Id string `json:"id"`
|
||||
InfoHash string `json:"info_hash"`
|
||||
Name string `json:"name"`
|
||||
Folder string `json:"folder"`
|
||||
Filename string `json:"filename"`
|
||||
OriginalFilename string `json:"original_filename"`
|
||||
Size int64 `json:"size"`
|
||||
Bytes int64 `json:"bytes"` // Size of only the files that are downloaded
|
||||
Magnet *utils.Magnet `json:"magnet"`
|
||||
Files []File `json:"files"`
|
||||
Status string `json:"status"`
|
||||
Added string `json:"added"`
|
||||
Progress float64 `json:"progress"`
|
||||
Speed int64 `json:"speed"`
|
||||
Seeders int `json:"seeders"`
|
||||
Links []string `json:"links"`
|
||||
DownloadLinks map[string]DownloadLinks `json:"download_links"`
|
||||
MountPath string `json:"mount_path"`
|
||||
|
||||
Debrid string `json:"debrid"`
|
||||
|
||||
Arr *arr.Arr `json:"arr"`
|
||||
Mu sync.Mutex `json:"-"`
|
||||
SizeDownloaded int64 `json:"-"` // This is used for local download
|
||||
}
|
||||
|
||||
type DownloadLinks struct {
|
||||
Filename string `json:"filename"`
|
||||
Link string `json:"link"`
|
||||
DownloadLink string `json:"download_link"`
|
||||
}
|
||||
|
||||
func (t *Torrent) GetSymlinkFolder(parent string) string {
|
||||
return filepath.Join(parent, t.Arr.Name, t.Folder)
|
||||
}
|
||||
|
||||
func (t *Torrent) GetMountFolder(rClonePath string) (string, error) {
|
||||
possiblePaths := []string{
|
||||
t.OriginalFilename,
|
||||
t.Filename,
|
||||
utils.RemoveExtension(t.OriginalFilename),
|
||||
}
|
||||
|
||||
for _, path := range possiblePaths {
|
||||
_, err := os.Stat(filepath.Join(rClonePath, path))
|
||||
if !os.IsNotExist(err) {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no path found")
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Path string `json:"path"`
|
||||
Link string `json:"link"`
|
||||
}
|
||||
|
||||
func (t *Torrent) Cleanup(remove bool) {
|
||||
if remove {
|
||||
err := os.Remove(t.Filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Torrent) GetFile(id string) *File {
|
||||
for _, f := range t.Files {
|
||||
if f.Id == id {
|
||||
return &f
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetLocalCache(infohashes []string, cache *cache.Cache) ([]string, map[string]bool) {
|
||||
result := make(map[string]bool)
|
||||
hashes := make([]string, 0)
|
||||
|
||||
if len(infohashes) == 0 {
|
||||
return hashes, result
|
||||
}
|
||||
if len(infohashes) == 1 {
|
||||
if cache.Exists(infohashes[0]) {
|
||||
return hashes, map[string]bool{infohashes[0]: true}
|
||||
}
|
||||
return infohashes, result
|
||||
}
|
||||
|
||||
cachedHashes := cache.GetMultiple(infohashes)
|
||||
for _, h := range infohashes {
|
||||
_, exists := cachedHashes[h]
|
||||
if !exists {
|
||||
hashes = append(hashes, h)
|
||||
} else {
|
||||
result[h] = true
|
||||
}
|
||||
}
|
||||
|
||||
return infohashes, result
|
||||
}
|
||||
345
pkg/proxy/proxy.go
Normal file
345
pkg/proxy/proxy.go
Normal file
@@ -0,0 +1,345 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/elazarl/goproxy"
|
||||
"github.com/elazarl/goproxy/ext/auth"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
||||
"github.com/valyala/fastjson"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type RSS struct {
|
||||
XMLName xml.Name `xml:"rss"`
|
||||
Text string `xml:",chardata"`
|
||||
Version string `xml:"version,attr"`
|
||||
Atom string `xml:"atom,attr"`
|
||||
Torznab string `xml:"torznab,attr"`
|
||||
Channel struct {
|
||||
Text string `xml:",chardata"`
|
||||
Link struct {
|
||||
Text string `xml:",chardata"`
|
||||
Rel string `xml:"rel,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
} `xml:"link"`
|
||||
Title string `xml:"title"`
|
||||
Items []Item `xml:"item"`
|
||||
} `xml:"channel"`
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
Text string `xml:",chardata"`
|
||||
Title string `xml:"title"`
|
||||
Description string `xml:"description"`
|
||||
GUID string `xml:"guid"`
|
||||
ProwlarrIndexer struct {
|
||||
Text string `xml:",chardata"`
|
||||
ID string `xml:"id,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
} `xml:"prowlarrindexer"`
|
||||
Comments string `xml:"comments"`
|
||||
PubDate string `xml:"pubDate"`
|
||||
Size string `xml:"size"`
|
||||
Link string `xml:"link"`
|
||||
Category []string `xml:"category"`
|
||||
Enclosure struct {
|
||||
Text string `xml:",chardata"`
|
||||
URL string `xml:"url,attr"`
|
||||
Length string `xml:"length,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
} `xml:"enclosure"`
|
||||
TorznabAttrs []struct {
|
||||
Text string `xml:",chardata"`
|
||||
Name string `xml:"name,attr"`
|
||||
Value string `xml:"value,attr"`
|
||||
} `xml:"attr"`
|
||||
}
|
||||
|
||||
type Proxy struct {
|
||||
port string
|
||||
enabled bool
|
||||
debug bool
|
||||
username string
|
||||
password string
|
||||
cachedOnly bool
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
func NewProxy() *Proxy {
|
||||
cfg := config.GetConfig().Proxy
|
||||
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
|
||||
return &Proxy{
|
||||
port: port,
|
||||
enabled: cfg.Enabled,
|
||||
username: cfg.Username,
|
||||
password: cfg.Password,
|
||||
cachedOnly: cfg.CachedOnly,
|
||||
logger: logger.NewLogger("proxy", cfg.LogLevel, os.Stdout),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proxy) ProcessJSONResponse(resp *http.Response) *http.Response {
|
||||
if resp == nil || resp.Body == nil {
|
||||
return resp
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return resp
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var par fastjson.Parser
|
||||
v, err := par.ParseBytes(body)
|
||||
if err != nil {
|
||||
// If it's not JSON, return the original response
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
|
||||
// Modify the JSON
|
||||
|
||||
// Serialize the modified JSON back to bytes
|
||||
modifiedBody := v.MarshalTo(nil)
|
||||
|
||||
// Set the modified body back to the response
|
||||
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
||||
resp.ContentLength = int64(len(modifiedBody))
|
||||
resp.Header.Set("Content-Length", string(rune(len(modifiedBody))))
|
||||
|
||||
return resp
|
||||
|
||||
}
|
||||
|
||||
func (p *Proxy) ProcessResponse(resp *http.Response) *http.Response {
|
||||
if resp == nil || resp.Body == nil {
|
||||
return resp
|
||||
}
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
switch contentType {
|
||||
case "application/json":
|
||||
return resp // p.ProcessJSONResponse(resp)
|
||||
case "application/xml":
|
||||
return p.ProcessXMLResponse(resp)
|
||||
case "application/rss+xml":
|
||||
return p.ProcessXMLResponse(resp)
|
||||
default:
|
||||
return resp
|
||||
}
|
||||
}
|
||||
|
||||
func getItemsHash(items []Item) map[string]string {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
idHashMap := sync.Map{} // Use sync.Map for concurrent access
|
||||
|
||||
for _, item := range items {
|
||||
wg.Add(1)
|
||||
go func(item Item) {
|
||||
defer wg.Done()
|
||||
hash := strings.ToLower(item.getHash())
|
||||
if hash != "" {
|
||||
idHashMap.Store(item.GUID, hash) // Store directly into sync.Map
|
||||
}
|
||||
}(item)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Convert sync.Map to regular map
|
||||
finalMap := make(map[string]string)
|
||||
idHashMap.Range(func(key, value interface{}) bool {
|
||||
finalMap[key.(string)] = value.(string)
|
||||
return true
|
||||
})
|
||||
|
||||
return finalMap
|
||||
}
|
||||
|
||||
func (item Item) getHash() string {
|
||||
infohash := ""
|
||||
|
||||
for _, attr := range item.TorznabAttrs {
|
||||
if attr.Name == "infohash" {
|
||||
return attr.Value
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(item.GUID, "magnet:?") {
|
||||
magnet, err := utils.GetMagnetInfo(item.GUID)
|
||||
if err == nil && magnet != nil && magnet.InfoHash != "" {
|
||||
return magnet.InfoHash
|
||||
}
|
||||
}
|
||||
|
||||
magnetLink := item.Link
|
||||
|
||||
if magnetLink == "" {
|
||||
// We can't check the availability of the torrent without a magnet link or infohash
|
||||
return ""
|
||||
}
|
||||
|
||||
if strings.Contains(magnetLink, "magnet:?") {
|
||||
magnet, err := utils.GetMagnetInfo(magnetLink)
|
||||
if err == nil && magnet != nil && magnet.InfoHash != "" {
|
||||
return magnet.InfoHash
|
||||
}
|
||||
}
|
||||
|
||||
//Check Description for infohash
|
||||
hash := utils.ExtractInfoHash(item.Description)
|
||||
if hash == "" {
|
||||
// Check Title for infohash
|
||||
hash = utils.ExtractInfoHash(item.Comments)
|
||||
}
|
||||
infohash = hash
|
||||
if infohash == "" {
|
||||
if strings.Contains(magnetLink, "http") {
|
||||
h, _ := utils.GetInfohashFromURL(magnetLink)
|
||||
if h != "" {
|
||||
infohash = h
|
||||
}
|
||||
}
|
||||
}
|
||||
return infohash
|
||||
|
||||
}
|
||||
|
||||
func (p *Proxy) ProcessXMLResponse(resp *http.Response) *http.Response {
|
||||
if resp == nil || resp.Body == nil {
|
||||
return resp
|
||||
}
|
||||
|
||||
svc := service.GetService()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
p.logger.Info().Msgf("Error reading response body: %v", err)
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var rss RSS
|
||||
err = xml.Unmarshal(body, &rss)
|
||||
if err != nil {
|
||||
p.logger.Info().Msgf("Error unmarshalling XML: %v", err)
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
indexer := ""
|
||||
if len(rss.Channel.Items) > 0 {
|
||||
indexer = rss.Channel.Items[0].ProwlarrIndexer.Text
|
||||
} else {
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
|
||||
// Step 4: Extract infohash or magnet URI, manipulate data
|
||||
IdsHashMap := getItemsHash(rss.Channel.Items)
|
||||
hashes := make([]string, 0)
|
||||
for _, hash := range IdsHashMap {
|
||||
if hash != "" {
|
||||
hashes = append(hashes, hash)
|
||||
}
|
||||
}
|
||||
availableHashesMap := svc.Debrid.Get().IsAvailable(hashes)
|
||||
newItems := make([]Item, 0, len(rss.Channel.Items))
|
||||
|
||||
if len(hashes) > 0 {
|
||||
for _, item := range rss.Channel.Items {
|
||||
hash := IdsHashMap[item.GUID]
|
||||
if hash == "" {
|
||||
continue
|
||||
}
|
||||
isCached, exists := availableHashesMap[hash]
|
||||
if !exists || !isCached {
|
||||
continue
|
||||
}
|
||||
newItems = append(newItems, item)
|
||||
}
|
||||
}
|
||||
|
||||
if len(newItems) > 0 {
|
||||
p.logger.Info().Msgf("[%s Report]: %d/%d items are cached || Found %d infohash", indexer, len(newItems), len(rss.Channel.Items), len(hashes))
|
||||
} else {
|
||||
// This will prevent the indexer from being disabled by the arr
|
||||
p.logger.Info().Msgf("[%s Report]: No Items are cached; Return only first item with [UnCached]", indexer)
|
||||
item := rss.Channel.Items[0]
|
||||
item.Title = fmt.Sprintf("%s [UnCached]", item.Title)
|
||||
newItems = append(newItems, item)
|
||||
}
|
||||
|
||||
rss.Channel.Items = newItems
|
||||
modifiedBody, err := xml.MarshalIndent(rss, "", " ")
|
||||
if err != nil {
|
||||
p.logger.Info().Msgf("Error marshalling XML: %v", err)
|
||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||
return resp
|
||||
}
|
||||
modifiedBody = append([]byte(xml.Header), modifiedBody...)
|
||||
|
||||
// Set the modified body back to the response
|
||||
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
||||
return resp
|
||||
}
|
||||
|
||||
func UrlMatches(re *regexp.Regexp) goproxy.ReqConditionFunc {
|
||||
return func(req *http.Request, ctx *goproxy.ProxyCtx) bool {
|
||||
return re.MatchString(req.URL.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proxy) Start(ctx context.Context) error {
|
||||
username, password := p.username, p.password
|
||||
proxy := goproxy.NewProxyHttpServer()
|
||||
if username != "" || password != "" {
|
||||
// Set up basic auth for proxy
|
||||
auth.ProxyBasic(proxy, "my_realm", func(user, pwd string) bool {
|
||||
return user == username && password == pwd
|
||||
})
|
||||
}
|
||||
|
||||
proxy.OnRequest(goproxy.ReqHostMatches(regexp.MustCompile("^.443$"))).HandleConnect(goproxy.AlwaysMitm)
|
||||
proxy.OnResponse(
|
||||
UrlMatches(regexp.MustCompile("^.*/api\\?t=(search|tvsearch|movie)(&.*)?$")),
|
||||
goproxy.StatusCodeIs(http.StatusOK, http.StatusAccepted)).DoFunc(
|
||||
func(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {
|
||||
return p.ProcessResponse(resp)
|
||||
})
|
||||
|
||||
proxy.Verbose = p.debug
|
||||
portFmt := fmt.Sprintf(":%s", p.port)
|
||||
srv := &http.Server{
|
||||
Addr: portFmt,
|
||||
Handler: proxy,
|
||||
}
|
||||
p.logger.Info().Msgf("Starting proxy server on %s", portFmt)
|
||||
go func() {
|
||||
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
p.logger.Info().Msgf("Error starting proxy server: %v", err)
|
||||
}
|
||||
}()
|
||||
<-ctx.Done()
|
||||
p.logger.Info().Msg("Shutting down gracefully...")
|
||||
return srv.Shutdown(context.Background())
|
||||
}
|
||||
205
pkg/qbit/downloader.go
Normal file
205
pkg/qbit/downloader.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"github.com/cavaliergopher/grab/v3"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
debrid "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Download(client *grab.Client, url, filename string, progressCallback func(int64, int64)) error {
|
||||
req, err := grab.NewRequest(filename, url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp := client.Do(req)
|
||||
|
||||
t := time.NewTicker(time.Second)
|
||||
defer t.Stop()
|
||||
|
||||
var lastReported int64
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
current := resp.BytesComplete()
|
||||
speed := int64(resp.BytesPerSecond())
|
||||
if current != lastReported {
|
||||
if progressCallback != nil {
|
||||
progressCallback(current-lastReported, speed)
|
||||
}
|
||||
lastReported = current
|
||||
}
|
||||
case <-resp.Done:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
// Report final bytes
|
||||
if progressCallback != nil {
|
||||
progressCallback(resp.BytesComplete()-lastReported, 0)
|
||||
}
|
||||
|
||||
return resp.Err()
|
||||
}
|
||||
|
||||
func (q *QBit) ProcessManualFile(torrent *Torrent) (string, error) {
|
||||
debridTorrent := torrent.DebridTorrent
|
||||
q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.DownloadLinks))
|
||||
torrentPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, utils.RemoveExtension(debridTorrent.OriginalFilename))
|
||||
torrentPath = utils.RemoveInvalidChars(torrentPath)
|
||||
err := os.MkdirAll(torrentPath, os.ModePerm)
|
||||
if err != nil {
|
||||
// add previous error to the error and return
|
||||
return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err)
|
||||
}
|
||||
q.downloadFiles(torrent, torrentPath)
|
||||
return torrentPath, nil
|
||||
}
|
||||
|
||||
func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
|
||||
debridTorrent := torrent.DebridTorrent
|
||||
var wg sync.WaitGroup
|
||||
semaphore := make(chan struct{}, 5)
|
||||
totalSize := int64(0)
|
||||
for _, file := range debridTorrent.Files {
|
||||
totalSize += file.Size
|
||||
}
|
||||
debridTorrent.Mu.Lock()
|
||||
debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes
|
||||
debridTorrent.Progress = 0 // Reset progress
|
||||
debridTorrent.Mu.Unlock()
|
||||
progressCallback := func(downloaded int64, speed int64) {
|
||||
debridTorrent.Mu.Lock()
|
||||
defer debridTorrent.Mu.Unlock()
|
||||
torrent.Mu.Lock()
|
||||
defer torrent.Mu.Unlock()
|
||||
|
||||
// Update total downloaded bytes
|
||||
debridTorrent.SizeDownloaded += downloaded
|
||||
debridTorrent.Speed = speed
|
||||
|
||||
// Calculate overall progress
|
||||
if totalSize > 0 {
|
||||
debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100
|
||||
}
|
||||
q.UpdateTorrentMin(torrent, debridTorrent)
|
||||
}
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
client := &grab.Client{
|
||||
UserAgent: "qBitTorrent",
|
||||
HTTPClient: &http.Client{
|
||||
Transport: tr,
|
||||
},
|
||||
}
|
||||
for _, link := range debridTorrent.DownloadLinks {
|
||||
if link.DownloadLink == "" {
|
||||
q.logger.Info().Msgf("No download link found for %s", link.Filename)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
semaphore <- struct{}{}
|
||||
go func(link debrid.DownloadLinks) {
|
||||
defer wg.Done()
|
||||
defer func() { <-semaphore }()
|
||||
filename := link.Filename
|
||||
|
||||
err := Download(
|
||||
client,
|
||||
link.DownloadLink,
|
||||
filepath.Join(parent, filename),
|
||||
progressCallback,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
q.logger.Error().Msgf("Failed to download %s: %v", filename, err)
|
||||
} else {
|
||||
q.logger.Info().Msgf("Downloaded %s", filename)
|
||||
}
|
||||
}(link)
|
||||
}
|
||||
wg.Wait()
|
||||
q.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
|
||||
}
|
||||
|
||||
func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) {
|
||||
debridTorrent := torrent.DebridTorrent
|
||||
files := debridTorrent.Files
|
||||
if len(files) == 0 {
|
||||
return "", fmt.Errorf("no video files found")
|
||||
}
|
||||
q.logger.Info().Msgf("Checking symlinks for %d files...", len(files))
|
||||
rCloneBase := debridTorrent.MountPath
|
||||
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
|
||||
// This returns filename.ext for alldebrid instead of the parent folder filename/
|
||||
torrentFolder := torrentPath
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get torrent path: %v", err)
|
||||
}
|
||||
// Check if the torrent path is a file
|
||||
torrentRclonePath := filepath.Join(rCloneBase, torrentPath) // leave it as is
|
||||
if debridTorrent.Debrid == "alldebrid" && utils.IsMediaFile(torrentPath) {
|
||||
// Alldebrid hotfix for single file torrents
|
||||
torrentFolder = utils.RemoveExtension(torrentFolder)
|
||||
torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder
|
||||
}
|
||||
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
|
||||
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
|
||||
}
|
||||
|
||||
pending := make(map[string]debrid.File)
|
||||
for _, file := range files {
|
||||
pending[file.Path] = file
|
||||
}
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for len(pending) > 0 {
|
||||
<-ticker.C
|
||||
for path, file := range pending {
|
||||
fullFilePath := filepath.Join(torrentRclonePath, file.Path)
|
||||
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
|
||||
q.logger.Info().Msgf("File is ready: %s", file.Path)
|
||||
q.createSymLink(torrentSymlinkPath, torrentRclonePath, file)
|
||||
delete(pending, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
return torrentSymlinkPath, nil
|
||||
}
|
||||
|
||||
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
|
||||
for {
|
||||
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)
|
||||
if err == nil {
|
||||
q.logger.Debug().Msgf("Found torrent path: %s", torrentPath)
|
||||
return torrentPath, err
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QBit) createSymLink(path string, torrentMountPath string, file debrid.File) {
|
||||
|
||||
// Combine the directory and filename to form a full path
|
||||
fullPath := filepath.Join(path, file.Name) // /mnt/symlinks/{category}/MyTVShow/MyTVShow.S01E01.720p.mkv
|
||||
// Create a symbolic link if file doesn't exist
|
||||
torrentFilePath := filepath.Join(torrentMountPath, file.Path) // debridFolder/MyTVShow/MyTVShow.S01E01.720p.mkv
|
||||
err := os.Symlink(torrentFilePath, fullPath)
|
||||
if err != nil {
|
||||
// It's okay if the symlink already exists
|
||||
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fullPath, err)
|
||||
}
|
||||
}
|
||||
401
pkg/qbit/http.go
Normal file
401
pkg/qbit/http.go
Normal file
@@ -0,0 +1,401 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func decodeAuthHeader(header string) (string, string, error) {
|
||||
encodedTokens := strings.Split(header, " ")
|
||||
if len(encodedTokens) != 2 {
|
||||
return "", "", nil
|
||||
}
|
||||
encodedToken := encodedTokens[1]
|
||||
|
||||
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
bearer := string(bytes)
|
||||
|
||||
colonIndex := strings.LastIndex(bearer, ":")
|
||||
host := bearer[:colonIndex]
|
||||
token := bearer[colonIndex+1:]
|
||||
|
||||
return host, token, nil
|
||||
}
|
||||
|
||||
func (q *QBit) CategoryContext(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
category := strings.Trim(r.URL.Query().Get("category"), "")
|
||||
if category == "" {
|
||||
// Get from form
|
||||
_ = r.ParseForm()
|
||||
category = r.Form.Get("category")
|
||||
if category == "" {
|
||||
// Get from multipart form
|
||||
_ = r.ParseMultipartForm(32 << 20)
|
||||
category = r.FormValue("category")
|
||||
}
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), "category", strings.TrimSpace(category))
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func (q *QBit) authContext(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
|
||||
category := r.Context().Value("category").(string)
|
||||
svc := service.GetService()
|
||||
// Check if arr exists
|
||||
a := svc.Arr.Get(category)
|
||||
if a == nil {
|
||||
a = arr.New(category, "", "", false)
|
||||
}
|
||||
if err == nil {
|
||||
host = strings.TrimSpace(host)
|
||||
if host != "" {
|
||||
a.Host = host
|
||||
}
|
||||
token = strings.TrimSpace(token)
|
||||
if token != "" {
|
||||
a.Token = token
|
||||
}
|
||||
}
|
||||
|
||||
svc.Arr.AddOrUpdate(a)
|
||||
ctx := context.WithValue(r.Context(), "arr", a)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func HashesCtx(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_hashes := chi.URLParam(r, "hashes")
|
||||
var hashes []string
|
||||
if _hashes != "" {
|
||||
hashes = strings.Split(_hashes, "|")
|
||||
}
|
||||
if hashes == nil {
|
||||
// Get hashes from form
|
||||
_ = r.ParseForm()
|
||||
hashes = r.Form["hashes"]
|
||||
}
|
||||
for i, hash := range hashes {
|
||||
hashes[i] = strings.TrimSpace(hash)
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), "hashes", hashes)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
_arr := ctx.Value("arr").(*arr.Arr)
|
||||
if _arr == nil {
|
||||
// No arr
|
||||
_, _ = w.Write([]byte("Ok."))
|
||||
return
|
||||
}
|
||||
if err := _arr.Validate(); err != nil {
|
||||
q.logger.Info().Msgf("Error validating arr: %v", err)
|
||||
}
|
||||
_, _ = w.Write([]byte("Ok."))
|
||||
}
|
||||
|
||||
func (q *QBit) handleVersion(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte("v4.3.2"))
|
||||
}
|
||||
|
||||
func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte("2.7"))
|
||||
}
|
||||
|
||||
func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) {
|
||||
preferences := NewAppPreferences()
|
||||
|
||||
preferences.WebUiUsername = q.Username
|
||||
preferences.SavePath = q.DownloadFolder
|
||||
preferences.TempPath = filepath.Join(q.DownloadFolder, "temp")
|
||||
|
||||
request.JSONResponse(w, preferences, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
|
||||
res := BuildInfo{
|
||||
Bitness: 64,
|
||||
Boost: "1.75.0",
|
||||
Libtorrent: "1.2.11.0",
|
||||
Openssl: "1.1.1i",
|
||||
Qt: "5.15.2",
|
||||
Zlib: "1.2.11",
|
||||
}
|
||||
request.JSONResponse(w, res, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleShutdown(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
|
||||
//log all url params
|
||||
ctx := r.Context()
|
||||
category := ctx.Value("category").(string)
|
||||
filter := strings.Trim(r.URL.Query().Get("filter"), "")
|
||||
hashes, _ := ctx.Value("hashes").([]string)
|
||||
torrents := q.Storage.GetAll(category, filter, hashes)
|
||||
request.JSONResponse(w, torrents, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Parse form based on content type
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if strings.Contains(contentType, "multipart/form-data") {
|
||||
if err := r.ParseMultipartForm(32 << 20); err != nil {
|
||||
q.logger.Info().Msgf("Error parsing multipart form: %v", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else if strings.Contains(contentType, "application/x-www-form-urlencoded") {
|
||||
if err := r.ParseForm(); err != nil {
|
||||
q.logger.Info().Msgf("Error parsing form: %v", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
http.Error(w, "Invalid content type", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
|
||||
category := r.FormValue("category")
|
||||
atleastOne := false
|
||||
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
|
||||
|
||||
// Handle magnet URLs
|
||||
if urls := r.FormValue("urls"); urls != "" {
|
||||
var urlList []string
|
||||
for _, u := range strings.Split(urls, "\n") {
|
||||
urlList = append(urlList, strings.TrimSpace(u))
|
||||
}
|
||||
for _, url := range urlList {
|
||||
if err := q.AddMagnet(ctx, url, category); err != nil {
|
||||
q.logger.Info().Msgf("Error adding magnet: %v", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
atleastOne = true
|
||||
}
|
||||
}
|
||||
|
||||
// Handle torrent files
|
||||
if r.MultipartForm != nil && r.MultipartForm.File != nil {
|
||||
if files := r.MultipartForm.File["torrents"]; len(files) > 0 {
|
||||
for _, fileHeader := range files {
|
||||
if err := q.AddTorrent(ctx, fileHeader, category); err != nil {
|
||||
q.logger.Info().Msgf("Error adding torrent: %v", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
atleastOne = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !atleastOne {
|
||||
http.Error(w, "No valid URLs or torrents provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
hashes, _ := ctx.Value("hashes").([]string)
|
||||
if len(hashes) == 0 {
|
||||
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
category := ctx.Value("category").(string)
|
||||
for _, hash := range hashes {
|
||||
q.Storage.Delete(hash, category)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
hashes, _ := ctx.Value("hashes").([]string)
|
||||
category := ctx.Value("category").(string)
|
||||
for _, hash := range hashes {
|
||||
torrent := q.Storage.Get(hash, category)
|
||||
if torrent == nil {
|
||||
continue
|
||||
}
|
||||
go q.PauseTorrent(torrent)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
hashes, _ := ctx.Value("hashes").([]string)
|
||||
category := ctx.Value("category").(string)
|
||||
for _, hash := range hashes {
|
||||
torrent := q.Storage.Get(hash, category)
|
||||
if torrent == nil {
|
||||
continue
|
||||
}
|
||||
go q.ResumeTorrent(torrent)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
hashes, _ := ctx.Value("hashes").([]string)
|
||||
category := ctx.Value("category").(string)
|
||||
for _, hash := range hashes {
|
||||
torrent := q.Storage.Get(hash, category)
|
||||
if torrent == nil {
|
||||
continue
|
||||
}
|
||||
go q.RefreshTorrent(torrent)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleCategories(w http.ResponseWriter, r *http.Request) {
|
||||
var categories = map[string]TorrentCategory{}
|
||||
for _, cat := range q.Categories {
|
||||
path := filepath.Join(q.DownloadFolder, cat)
|
||||
categories[cat] = TorrentCategory{
|
||||
Name: cat,
|
||||
SavePath: path,
|
||||
}
|
||||
}
|
||||
request.JSONResponse(w, categories, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
name := r.Form.Get("category")
|
||||
if name == "" {
|
||||
http.Error(w, "No name provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
q.Categories = append(q.Categories, name)
|
||||
|
||||
request.JSONResponse(w, nil, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
hash := r.URL.Query().Get("hash")
|
||||
torrent := q.Storage.Get(hash, ctx.Value("category").(string))
|
||||
|
||||
properties := q.GetTorrentProperties(torrent)
|
||||
request.JSONResponse(w, properties, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
hash := r.URL.Query().Get("hash")
|
||||
torrent := q.Storage.Get(hash, ctx.Value("category").(string))
|
||||
if torrent == nil {
|
||||
return
|
||||
}
|
||||
files := q.GetTorrentFiles(torrent)
|
||||
request.JSONResponse(w, files, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleSetCategory(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
category := ctx.Value("category").(string)
|
||||
hashes, _ := ctx.Value("hashes").([]string)
|
||||
torrents := q.Storage.GetAll("", "", hashes)
|
||||
for _, torrent := range torrents {
|
||||
torrent.Category = category
|
||||
q.Storage.AddOrUpdate(torrent)
|
||||
}
|
||||
request.JSONResponse(w, nil, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
hashes, _ := ctx.Value("hashes").([]string)
|
||||
tags := strings.Split(r.FormValue("tags"), ",")
|
||||
for i, tag := range tags {
|
||||
tags[i] = strings.TrimSpace(tag)
|
||||
}
|
||||
torrents := q.Storage.GetAll("", "", hashes)
|
||||
for _, t := range torrents {
|
||||
q.SetTorrentTags(t, tags)
|
||||
}
|
||||
request.JSONResponse(w, nil, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
hashes, _ := ctx.Value("hashes").([]string)
|
||||
tags := strings.Split(r.FormValue("tags"), ",")
|
||||
for i, tag := range tags {
|
||||
tags[i] = strings.TrimSpace(tag)
|
||||
}
|
||||
torrents := q.Storage.GetAll("", "", hashes)
|
||||
for _, torrent := range torrents {
|
||||
q.RemoveTorrentTags(torrent, tags)
|
||||
|
||||
}
|
||||
request.JSONResponse(w, nil, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleGetTags(w http.ResponseWriter, r *http.Request) {
|
||||
request.JSONResponse(w, q.Tags, http.StatusOK)
|
||||
}
|
||||
|
||||
func (q *QBit) handleCreateTags(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
tags := strings.Split(r.FormValue("tags"), ",")
|
||||
for i, tag := range tags {
|
||||
tags[i] = strings.TrimSpace(tag)
|
||||
}
|
||||
q.AddTags(tags)
|
||||
request.JSONResponse(w, nil, http.StatusOK)
|
||||
}
|
||||
90
pkg/qbit/import.go
Normal file
90
pkg/qbit/import.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
|
||||
)
|
||||
|
||||
type ImportRequest struct {
|
||||
ID string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
URI string `json:"uri"`
|
||||
Arr *arr.Arr `json:"arr"`
|
||||
IsSymlink bool `json:"isSymlink"`
|
||||
SeriesId int `json:"series"`
|
||||
Seasons []int `json:"seasons"`
|
||||
Episodes []string `json:"episodes"`
|
||||
|
||||
Failed bool `json:"failed"`
|
||||
FailedAt time.Time `json:"failedAt"`
|
||||
Reason string `json:"reason"`
|
||||
Completed bool `json:"completed"`
|
||||
CompletedAt time.Time `json:"completedAt"`
|
||||
Async bool `json:"async"`
|
||||
}
|
||||
|
||||
type ManualImportResponseSchema struct {
|
||||
Priority string `json:"priority"`
|
||||
Status string `json:"status"`
|
||||
Result string `json:"result"`
|
||||
Queued time.Time `json:"queued"`
|
||||
Trigger string `json:"trigger"`
|
||||
SendUpdatesToClient bool `json:"sendUpdatesToClient"`
|
||||
UpdateScheduledTask bool `json:"updateScheduledTask"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
func NewImportRequest(uri string, arr *arr.Arr, isSymlink bool) *ImportRequest {
|
||||
return &ImportRequest{
|
||||
ID: uuid.NewString(),
|
||||
URI: uri,
|
||||
Arr: arr,
|
||||
Failed: false,
|
||||
Completed: false,
|
||||
Async: false,
|
||||
IsSymlink: isSymlink,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *ImportRequest) Fail(reason string) {
|
||||
i.Failed = true
|
||||
i.FailedAt = time.Now()
|
||||
i.Reason = reason
|
||||
}
|
||||
|
||||
func (i *ImportRequest) Complete() {
|
||||
i.Completed = true
|
||||
i.CompletedAt = time.Now()
|
||||
}
|
||||
|
||||
func (i *ImportRequest) Process(q *QBit) (err error) {
|
||||
// Use this for now.
|
||||
// This sends the torrent to the arr
|
||||
svc := service.GetService()
|
||||
magnet, err := utils.GetMagnetFromUrl(i.URI)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing magnet link: %w", err)
|
||||
}
|
||||
torrent := CreateTorrentFromMagnet(magnet, i.Arr.Name, "manual")
|
||||
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, magnet, i.Arr, i.IsSymlink)
|
||||
if err != nil || debridTorrent == nil {
|
||||
if debridTorrent != nil {
|
||||
dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
||||
go dbClient.DeleteTorrent(debridTorrent)
|
||||
}
|
||||
if err == nil {
|
||||
err = fmt.Errorf("failed to process torrent")
|
||||
}
|
||||
return err
|
||||
}
|
||||
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
||||
q.Storage.AddOrUpdate(torrent)
|
||||
go q.ProcessFiles(torrent, debridTorrent, i.Arr, i.IsSymlink)
|
||||
return nil
|
||||
}
|
||||
28
pkg/qbit/misc.go
Normal file
28
pkg/qbit/misc.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func CreateTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Torrent {
|
||||
torrent := &Torrent{
|
||||
ID: uuid.NewString(),
|
||||
Hash: strings.ToLower(magnet.InfoHash),
|
||||
Name: magnet.Name,
|
||||
Size: magnet.Size,
|
||||
Category: category,
|
||||
Source: source,
|
||||
State: "downloading",
|
||||
MagnetUri: magnet.Link,
|
||||
|
||||
Tracker: "udp://tracker.opentrackr.org:1337",
|
||||
UpLimit: -1,
|
||||
DlLimit: -1,
|
||||
AutoTmm: false,
|
||||
Ratio: 1,
|
||||
RatioLimit: 1,
|
||||
}
|
||||
return torrent
|
||||
}
|
||||
39
pkg/qbit/qbit.go
Normal file
39
pkg/qbit/qbit.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type QBit struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Port string `json:"port"`
|
||||
DownloadFolder string `json:"download_folder"`
|
||||
Categories []string `json:"categories"`
|
||||
Storage *TorrentStorage
|
||||
logger zerolog.Logger
|
||||
Tags []string
|
||||
RefreshInterval int
|
||||
}
|
||||
|
||||
func New() *QBit {
|
||||
_cfg := config.GetConfig()
|
||||
cfg := _cfg.QBitTorrent
|
||||
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8282")
|
||||
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
|
||||
return &QBit{
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
Port: port,
|
||||
DownloadFolder: cfg.DownloadFolder,
|
||||
Categories: cfg.Categories,
|
||||
Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")),
|
||||
logger: logger.NewLogger("qbit", cfg.LogLevel, os.Stdout),
|
||||
RefreshInterval: refreshInterval,
|
||||
}
|
||||
}
|
||||
42
pkg/qbit/routes.go
Normal file
42
pkg/qbit/routes.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (q *QBit) Routes() http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Use(q.CategoryContext)
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(q.authContext)
|
||||
r.Post("/auth/login", q.handleLogin)
|
||||
r.Route("/torrents", func(r chi.Router) {
|
||||
r.Use(HashesCtx)
|
||||
r.Get("/info", q.handleTorrentsInfo)
|
||||
r.Post("/add", q.handleTorrentsAdd)
|
||||
r.Post("/delete", q.handleTorrentsDelete)
|
||||
r.Get("/categories", q.handleCategories)
|
||||
r.Post("/createCategory", q.handleCreateCategory)
|
||||
r.Post("/setCategory", q.handleSetCategory)
|
||||
r.Post("/addTags", q.handleAddTorrentTags)
|
||||
r.Post("/removeTags", q.handleRemoveTorrentTags)
|
||||
r.Post("/createTags", q.handleCreateTags)
|
||||
r.Get("/tags", q.handleGetTags)
|
||||
r.Get("/pause", q.handleTorrentsPause)
|
||||
r.Get("/resume", q.handleTorrentsResume)
|
||||
r.Get("/recheck", q.handleTorrentRecheck)
|
||||
r.Get("/properties", q.handleTorrentProperties)
|
||||
r.Get("/files", q.handleTorrentFiles)
|
||||
})
|
||||
|
||||
r.Route("/app", func(r chi.Router) {
|
||||
r.Get("/version", q.handleVersion)
|
||||
r.Get("/webapiVersion", q.handleWebAPIVersion)
|
||||
r.Get("/preferences", q.handlePreferences)
|
||||
r.Get("/buildInfo", q.handleBuildInfo)
|
||||
r.Get("/shutdown", q.handleShutdown)
|
||||
})
|
||||
})
|
||||
return r
|
||||
}
|
||||
194
pkg/qbit/storage.go
Normal file
194
pkg/qbit/storage.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func keyPair(hash, category string) string {
|
||||
if category == "" {
|
||||
category = "uncategorized"
|
||||
}
|
||||
return fmt.Sprintf("%s|%s", hash, category)
|
||||
}
|
||||
|
||||
type Torrents = map[string]*Torrent
|
||||
|
||||
type TorrentStorage struct {
|
||||
torrents Torrents
|
||||
mu sync.RWMutex
|
||||
filename string // Added to store the filename for persistence
|
||||
}
|
||||
|
||||
func loadTorrentsFromJSON(filename string) (Torrents, error) {
|
||||
data, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
torrents := make(Torrents)
|
||||
if err := json.Unmarshal(data, &torrents); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return torrents, nil
|
||||
}
|
||||
|
||||
func NewTorrentStorage(filename string) *TorrentStorage {
|
||||
// Open the JSON file and read the data
|
||||
torrents, err := loadTorrentsFromJSON(filename)
|
||||
if err != nil {
|
||||
torrents = make(Torrents)
|
||||
}
|
||||
// Create a new TorrentStorage
|
||||
return &TorrentStorage{
|
||||
torrents: torrents,
|
||||
filename: filename,
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *TorrentStorage) Add(torrent *Torrent) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
ts.torrents[keyPair(torrent.Hash, torrent.Category)] = torrent
|
||||
go func() {
|
||||
err := ts.saveToFile()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ts *TorrentStorage) AddOrUpdate(torrent *Torrent) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
ts.torrents[keyPair(torrent.Hash, torrent.Category)] = torrent
|
||||
go func() {
|
||||
err := ts.saveToFile()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ts *TorrentStorage) Get(hash, category string) *Torrent {
|
||||
ts.mu.RLock()
|
||||
defer ts.mu.RUnlock()
|
||||
torrent, exists := ts.torrents[keyPair(hash, category)]
|
||||
if !exists && category == "" {
|
||||
// Try to find the torrent without knowing the category
|
||||
for _, t := range ts.torrents {
|
||||
if t.Hash == hash {
|
||||
return t
|
||||
}
|
||||
}
|
||||
}
|
||||
return torrent
|
||||
}
|
||||
|
||||
func (ts *TorrentStorage) GetAll(category string, filter string, hashes []string) []*Torrent {
|
||||
ts.mu.RLock()
|
||||
defer ts.mu.RUnlock()
|
||||
torrents := make([]*Torrent, 0)
|
||||
for _, torrent := range ts.torrents {
|
||||
if category != "" && torrent.Category != category {
|
||||
continue
|
||||
}
|
||||
if filter != "" && torrent.State != filter {
|
||||
continue
|
||||
}
|
||||
torrents = append(torrents, torrent)
|
||||
}
|
||||
|
||||
if len(hashes) > 0 {
|
||||
filtered := make([]*Torrent, 0)
|
||||
for _, hash := range hashes {
|
||||
for _, torrent := range torrents {
|
||||
if torrent.Hash == hash {
|
||||
filtered = append(filtered, torrent)
|
||||
}
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
return torrents
|
||||
}
|
||||
|
||||
func (ts *TorrentStorage) Update(torrent *Torrent) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
ts.torrents[keyPair(torrent.Hash, torrent.Category)] = torrent
|
||||
go func() {
|
||||
err := ts.saveToFile()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ts *TorrentStorage) Delete(hash, category string) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
key := keyPair(hash, category)
|
||||
torrent, exists := ts.torrents[key]
|
||||
if !exists && category == "" {
|
||||
// Remove the torrent without knowing the category
|
||||
for k, t := range ts.torrents {
|
||||
if t.Hash == hash {
|
||||
key = k
|
||||
torrent = t
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(ts.torrents, key)
|
||||
if torrent == nil {
|
||||
return
|
||||
}
|
||||
// Delete the torrent folder
|
||||
if torrent.ContentPath != "" {
|
||||
err := os.RemoveAll(torrent.ContentPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
err := ts.saveToFile()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ts *TorrentStorage) DeleteMultiple(hashes []string) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
for _, hash := range hashes {
|
||||
for key, torrent := range ts.torrents {
|
||||
if torrent.Hash == hash {
|
||||
delete(ts.torrents, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
err := ts.saveToFile()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ts *TorrentStorage) Save() error {
|
||||
ts.mu.RLock()
|
||||
defer ts.mu.RUnlock()
|
||||
return ts.saveToFile()
|
||||
}
|
||||
|
||||
// saveToFile is a helper function to write the current state to the JSON file
|
||||
func (ts *TorrentStorage) saveToFile() error {
|
||||
data, err := json.MarshalIndent(ts.torrents, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(ts.filename, data, 0644)
|
||||
}
|
||||
301
pkg/qbit/torrent.go
Normal file
301
pkg/qbit/torrent.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
||||
db "github.com/sirrobot01/debrid-blackhole/pkg/debrid"
|
||||
debrid "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// All torrent related helpers goes here
|
||||
|
||||
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
|
||||
magnet, err := utils.GetMagnetFromUrl(url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing magnet link: %w", err)
|
||||
}
|
||||
err = q.Process(ctx, magnet, category)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process torrent: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
|
||||
file, _ := fileHeader.Open()
|
||||
defer file.Close()
|
||||
var reader io.Reader = file
|
||||
magnet, err := utils.GetMagnetFromFile(reader, fileHeader.Filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
|
||||
}
|
||||
err = q.Process(ctx, magnet, category)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process torrent: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error {
|
||||
svc := service.GetService()
|
||||
torrent := CreateTorrentFromMagnet(magnet, category, "auto")
|
||||
a, ok := ctx.Value("arr").(*arr.Arr)
|
||||
if !ok {
|
||||
return fmt.Errorf("arr not found in context")
|
||||
}
|
||||
isSymlink := ctx.Value("isSymlink").(bool)
|
||||
debridTorrent, err := db.ProcessTorrent(svc.Debrid, magnet, a, isSymlink)
|
||||
if err != nil || debridTorrent == nil {
|
||||
if debridTorrent != nil {
|
||||
dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
||||
go dbClient.DeleteTorrent(debridTorrent)
|
||||
}
|
||||
if err == nil {
|
||||
err = fmt.Errorf("failed to process torrent")
|
||||
}
|
||||
return err
|
||||
}
|
||||
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
||||
q.Storage.AddOrUpdate(torrent)
|
||||
go q.ProcessFiles(torrent, debridTorrent, a, isSymlink) // We can send async for file processing not to delay the response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *arr.Arr, isSymlink bool) {
|
||||
debridClient := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
||||
for debridTorrent.Status != "downloaded" {
|
||||
q.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress)
|
||||
dbT, err := debridClient.CheckStatus(debridTorrent, isSymlink)
|
||||
if err != nil {
|
||||
q.logger.Error().Msgf("Error checking status: %v", err)
|
||||
go debridClient.DeleteTorrent(debridTorrent)
|
||||
q.MarkAsFailed(torrent)
|
||||
if err := arr.Refresh(); err != nil {
|
||||
q.logger.Error().Msgf("Error refreshing arr: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
debridTorrent = dbT
|
||||
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
||||
|
||||
// Exit the loop for downloading statuses to prevent memory buildup
|
||||
if !slices.Contains(debridClient.GetDownloadingStatus(), debridTorrent.Status) {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Duration(q.RefreshInterval) * time.Second)
|
||||
}
|
||||
var (
|
||||
torrentSymlinkPath string
|
||||
err error
|
||||
)
|
||||
debridTorrent.Arr = arr
|
||||
if isSymlink {
|
||||
torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/
|
||||
} else {
|
||||
torrentSymlinkPath, err = q.ProcessManualFile(torrent)
|
||||
}
|
||||
if err != nil {
|
||||
q.MarkAsFailed(torrent)
|
||||
go debridClient.DeleteTorrent(debridTorrent)
|
||||
q.logger.Info().Msgf("Error: %v", err)
|
||||
return
|
||||
}
|
||||
torrent.TorrentPath = torrentSymlinkPath
|
||||
q.UpdateTorrent(torrent, debridTorrent)
|
||||
if err := arr.Refresh(); err != nil {
|
||||
q.logger.Error().Msgf("Error refreshing arr: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
|
||||
t.State = "error"
|
||||
q.Storage.AddOrUpdate(t)
|
||||
return t
|
||||
}
|
||||
|
||||
func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
||||
if debridTorrent == nil {
|
||||
return t
|
||||
}
|
||||
|
||||
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
|
||||
if err != nil {
|
||||
addedOn = time.Now()
|
||||
}
|
||||
totalSize := debridTorrent.Bytes
|
||||
progress := cmp.Or(debridTorrent.Progress, 100)
|
||||
progress = progress / 100.0
|
||||
sizeCompleted := int64(float64(totalSize) * progress)
|
||||
|
||||
var speed int64
|
||||
if debridTorrent.Speed != 0 {
|
||||
speed = debridTorrent.Speed
|
||||
}
|
||||
var eta int
|
||||
if speed != 0 {
|
||||
eta = int((totalSize - sizeCompleted) / speed)
|
||||
}
|
||||
t.ID = debridTorrent.Id
|
||||
t.Name = debridTorrent.Name
|
||||
t.AddedOn = addedOn.Unix()
|
||||
t.DebridTorrent = debridTorrent
|
||||
t.Debrid = debridTorrent.Debrid
|
||||
t.Size = totalSize
|
||||
t.Completed = sizeCompleted
|
||||
t.Downloaded = sizeCompleted
|
||||
t.DownloadedSession = sizeCompleted
|
||||
t.Uploaded = sizeCompleted
|
||||
t.UploadedSession = sizeCompleted
|
||||
t.AmountLeft = totalSize - sizeCompleted
|
||||
t.Progress = progress
|
||||
t.Eta = eta
|
||||
t.Dlspeed = speed
|
||||
t.Upspeed = speed
|
||||
t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
|
||||
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
|
||||
return t
|
||||
}
|
||||
|
||||
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
||||
if debridTorrent == nil {
|
||||
return t
|
||||
}
|
||||
_db := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
||||
if debridTorrent.Status != "downloaded" {
|
||||
debridTorrent, _ = _db.GetTorrent(t.ID)
|
||||
}
|
||||
t = q.UpdateTorrentMin(t, debridTorrent)
|
||||
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
|
||||
|
||||
if t.IsReady() {
|
||||
t.State = "pausedUP"
|
||||
q.Storage.Update(t)
|
||||
return t
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if t.IsReady() {
|
||||
t.State = "pausedUP"
|
||||
q.Storage.Update(t)
|
||||
return t
|
||||
}
|
||||
updatedT := q.UpdateTorrent(t, debridTorrent)
|
||||
t = updatedT
|
||||
|
||||
case <-time.After(10 * time.Minute): // Add a timeout
|
||||
return t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QBit) ResumeTorrent(t *Torrent) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *QBit) PauseTorrent(t *Torrent) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *QBit) RefreshTorrent(t *Torrent) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
|
||||
return &TorrentProperties{
|
||||
AdditionDate: t.AddedOn,
|
||||
Comment: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
||||
CreatedBy: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
||||
CreationDate: t.AddedOn,
|
||||
DlLimit: -1,
|
||||
UpLimit: -1,
|
||||
DlSpeed: t.Dlspeed,
|
||||
UpSpeed: t.Upspeed,
|
||||
TotalSize: t.Size,
|
||||
TotalUploaded: t.Uploaded,
|
||||
TotalDownloaded: t.Downloaded,
|
||||
TotalUploadedSession: t.UploadedSession,
|
||||
TotalDownloadedSession: t.DownloadedSession,
|
||||
LastSeen: time.Now().Unix(),
|
||||
NbConnectionsLimit: 100,
|
||||
Peers: 0,
|
||||
PeersTotal: 2,
|
||||
SeedingTime: 1,
|
||||
Seeds: 100,
|
||||
ShareRatio: 100,
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
|
||||
files := make([]*TorrentFile, 0)
|
||||
if t.DebridTorrent == nil {
|
||||
return files
|
||||
}
|
||||
for _, file := range t.DebridTorrent.Files {
|
||||
files = append(files, &TorrentFile{
|
||||
Name: file.Path,
|
||||
Size: file.Size,
|
||||
})
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool {
|
||||
torrentTags := strings.Split(t.Tags, ",")
|
||||
for _, tag := range tags {
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
if !slices.Contains(torrentTags, tag) {
|
||||
torrentTags = append(torrentTags, tag)
|
||||
}
|
||||
if !slices.Contains(q.Tags, tag) {
|
||||
q.Tags = append(q.Tags, tag)
|
||||
}
|
||||
}
|
||||
t.Tags = strings.Join(torrentTags, ",")
|
||||
q.Storage.Update(t)
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *QBit) RemoveTorrentTags(t *Torrent, tags []string) bool {
|
||||
torrentTags := strings.Split(t.Tags, ",")
|
||||
newTorrentTags := utils.RemoveItem(torrentTags, tags...)
|
||||
q.Tags = utils.RemoveItem(q.Tags, tags...)
|
||||
t.Tags = strings.Join(newTorrentTags, ",")
|
||||
q.Storage.Update(t)
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *QBit) AddTags(tags []string) bool {
|
||||
for _, tag := range tags {
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
if !slices.Contains(q.Tags, tag) {
|
||||
q.Tags = append(q.Tags, tag)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *QBit) RemoveTags(tags []string) bool {
|
||||
q.Tags = utils.RemoveItem(q.Tags, tags...)
|
||||
return true
|
||||
}
|
||||
430
pkg/qbit/types.go
Normal file
430
pkg/qbit/types.go
Normal file
@@ -0,0 +1,430 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type BuildInfo struct {
|
||||
Libtorrent string `json:"libtorrent"`
|
||||
Bitness int `json:"bitness"`
|
||||
Boost string `json:"boost"`
|
||||
Openssl string `json:"openssl"`
|
||||
Qt string `json:"qt"`
|
||||
Zlib string `json:"zlib"`
|
||||
}
|
||||
|
||||
type AppPreferences struct {
|
||||
AddTrackers string `json:"add_trackers"`
|
||||
AddTrackersEnabled bool `json:"add_trackers_enabled"`
|
||||
AltDlLimit int `json:"alt_dl_limit"`
|
||||
AltUpLimit int `json:"alt_up_limit"`
|
||||
AlternativeWebuiEnabled bool `json:"alternative_webui_enabled"`
|
||||
AlternativeWebuiPath string `json:"alternative_webui_path"`
|
||||
AnnounceIp string `json:"announce_ip"`
|
||||
AnnounceToAllTiers bool `json:"announce_to_all_tiers"`
|
||||
AnnounceToAllTrackers bool `json:"announce_to_all_trackers"`
|
||||
AnonymousMode bool `json:"anonymous_mode"`
|
||||
AsyncIoThreads int `json:"async_io_threads"`
|
||||
AutoDeleteMode int `json:"auto_delete_mode"`
|
||||
AutoTmmEnabled bool `json:"auto_tmm_enabled"`
|
||||
AutorunEnabled bool `json:"autorun_enabled"`
|
||||
AutorunProgram string `json:"autorun_program"`
|
||||
BannedIPs string `json:"banned_IPs"`
|
||||
BittorrentProtocol int `json:"bittorrent_protocol"`
|
||||
BypassAuthSubnetWhitelist string `json:"bypass_auth_subnet_whitelist"`
|
||||
BypassAuthSubnetWhitelistEnabled bool `json:"bypass_auth_subnet_whitelist_enabled"`
|
||||
BypassLocalAuth bool `json:"bypass_local_auth"`
|
||||
CategoryChangedTmmEnabled bool `json:"category_changed_tmm_enabled"`
|
||||
CheckingMemoryUse int `json:"checking_memory_use"`
|
||||
CreateSubfolderEnabled bool `json:"create_subfolder_enabled"`
|
||||
CurrentInterfaceAddress string `json:"current_interface_address"`
|
||||
CurrentNetworkInterface string `json:"current_network_interface"`
|
||||
Dht bool `json:"dht"`
|
||||
DiskCache int `json:"disk_cache"`
|
||||
DiskCacheTtl int `json:"disk_cache_ttl"`
|
||||
DlLimit int `json:"dl_limit"`
|
||||
DontCountSlowTorrents bool `json:"dont_count_slow_torrents"`
|
||||
DyndnsDomain string `json:"dyndns_domain"`
|
||||
DyndnsEnabled bool `json:"dyndns_enabled"`
|
||||
DyndnsPassword string `json:"dyndns_password"`
|
||||
DyndnsService int `json:"dyndns_service"`
|
||||
DyndnsUsername string `json:"dyndns_username"`
|
||||
EmbeddedTrackerPort int `json:"embedded_tracker_port"`
|
||||
EnableCoalesceReadWrite bool `json:"enable_coalesce_read_write"`
|
||||
EnableEmbeddedTracker bool `json:"enable_embedded_tracker"`
|
||||
EnableMultiConnectionsFromSameIp bool `json:"enable_multi_connections_from_same_ip"`
|
||||
EnableOsCache bool `json:"enable_os_cache"`
|
||||
EnablePieceExtentAffinity bool `json:"enable_piece_extent_affinity"`
|
||||
EnableSuperSeeding bool `json:"enable_super_seeding"`
|
||||
EnableUploadSuggestions bool `json:"enable_upload_suggestions"`
|
||||
Encryption int `json:"encryption"`
|
||||
ExportDir string `json:"export_dir"`
|
||||
ExportDirFin string `json:"export_dir_fin"`
|
||||
FilePoolSize int `json:"file_pool_size"`
|
||||
IncompleteFilesExt bool `json:"incomplete_files_ext"`
|
||||
IpFilterEnabled bool `json:"ip_filter_enabled"`
|
||||
IpFilterPath string `json:"ip_filter_path"`
|
||||
IpFilterTrackers bool `json:"ip_filter_trackers"`
|
||||
LimitLanPeers bool `json:"limit_lan_peers"`
|
||||
LimitTcpOverhead bool `json:"limit_tcp_overhead"`
|
||||
LimitUtpRate bool `json:"limit_utp_rate"`
|
||||
ListenPort int `json:"listen_port"`
|
||||
Locale string `json:"locale"`
|
||||
Lsd bool `json:"lsd"`
|
||||
MailNotificationAuthEnabled bool `json:"mail_notification_auth_enabled"`
|
||||
MailNotificationEmail string `json:"mail_notification_email"`
|
||||
MailNotificationEnabled bool `json:"mail_notification_enabled"`
|
||||
MailNotificationPassword string `json:"mail_notification_password"`
|
||||
MailNotificationSender string `json:"mail_notification_sender"`
|
||||
MailNotificationSmtp string `json:"mail_notification_smtp"`
|
||||
MailNotificationSslEnabled bool `json:"mail_notification_ssl_enabled"`
|
||||
MailNotificationUsername string `json:"mail_notification_username"`
|
||||
MaxActiveDownloads int `json:"max_active_downloads"`
|
||||
MaxActiveTorrents int `json:"max_active_torrents"`
|
||||
MaxActiveUploads int `json:"max_active_uploads"`
|
||||
MaxConnec int `json:"max_connec"`
|
||||
MaxConnecPerTorrent int `json:"max_connec_per_torrent"`
|
||||
MaxRatio int `json:"max_ratio"`
|
||||
MaxRatioAct int `json:"max_ratio_act"`
|
||||
MaxRatioEnabled bool `json:"max_ratio_enabled"`
|
||||
MaxSeedingTime int `json:"max_seeding_time"`
|
||||
MaxSeedingTimeEnabled bool `json:"max_seeding_time_enabled"`
|
||||
MaxUploads int `json:"max_uploads"`
|
||||
MaxUploadsPerTorrent int `json:"max_uploads_per_torrent"`
|
||||
OutgoingPortsMax int `json:"outgoing_ports_max"`
|
||||
OutgoingPortsMin int `json:"outgoing_ports_min"`
|
||||
Pex bool `json:"pex"`
|
||||
PreallocateAll bool `json:"preallocate_all"`
|
||||
ProxyAuthEnabled bool `json:"proxy_auth_enabled"`
|
||||
ProxyIp string `json:"proxy_ip"`
|
||||
ProxyPassword string `json:"proxy_password"`
|
||||
ProxyPeerConnections bool `json:"proxy_peer_connections"`
|
||||
ProxyPort int `json:"proxy_port"`
|
||||
ProxyTorrentsOnly bool `json:"proxy_torrents_only"`
|
||||
ProxyType int `json:"proxy_type"`
|
||||
ProxyUsername string `json:"proxy_username"`
|
||||
QueueingEnabled bool `json:"queueing_enabled"`
|
||||
RandomPort bool `json:"random_port"`
|
||||
RecheckCompletedTorrents bool `json:"recheck_completed_torrents"`
|
||||
ResolvePeerCountries bool `json:"resolve_peer_countries"`
|
||||
RssAutoDownloadingEnabled bool `json:"rss_auto_downloading_enabled"`
|
||||
RssMaxArticlesPerFeed int `json:"rss_max_articles_per_feed"`
|
||||
RssProcessingEnabled bool `json:"rss_processing_enabled"`
|
||||
RssRefreshInterval int `json:"rss_refresh_interval"`
|
||||
SavePath string `json:"save_path"`
|
||||
SavePathChangedTmmEnabled bool `json:"save_path_changed_tmm_enabled"`
|
||||
SaveResumeDataInterval int `json:"save_resume_data_interval"`
|
||||
ScanDirs ScanDirs `json:"scan_dirs"`
|
||||
ScheduleFromHour int `json:"schedule_from_hour"`
|
||||
ScheduleFromMin int `json:"schedule_from_min"`
|
||||
ScheduleToHour int `json:"schedule_to_hour"`
|
||||
ScheduleToMin int `json:"schedule_to_min"`
|
||||
SchedulerDays int `json:"scheduler_days"`
|
||||
SchedulerEnabled bool `json:"scheduler_enabled"`
|
||||
SendBufferLowWatermark int `json:"send_buffer_low_watermark"`
|
||||
SendBufferWatermark int `json:"send_buffer_watermark"`
|
||||
SendBufferWatermarkFactor int `json:"send_buffer_watermark_factor"`
|
||||
SlowTorrentDlRateThreshold int `json:"slow_torrent_dl_rate_threshold"`
|
||||
SlowTorrentInactiveTimer int `json:"slow_torrent_inactive_timer"`
|
||||
SlowTorrentUlRateThreshold int `json:"slow_torrent_ul_rate_threshold"`
|
||||
SocketBacklogSize int `json:"socket_backlog_size"`
|
||||
StartPausedEnabled bool `json:"start_paused_enabled"`
|
||||
StopTrackerTimeout int `json:"stop_tracker_timeout"`
|
||||
TempPath string `json:"temp_path"`
|
||||
TempPathEnabled bool `json:"temp_path_enabled"`
|
||||
TorrentChangedTmmEnabled bool `json:"torrent_changed_tmm_enabled"`
|
||||
UpLimit int `json:"up_limit"`
|
||||
UploadChokingAlgorithm int `json:"upload_choking_algorithm"`
|
||||
UploadSlotsBehavior int `json:"upload_slots_behavior"`
|
||||
Upnp bool `json:"upnp"`
|
||||
UpnpLeaseDuration int `json:"upnp_lease_duration"`
|
||||
UseHttps bool `json:"use_https"`
|
||||
UtpTcpMixedMode int `json:"utp_tcp_mixed_mode"`
|
||||
WebUiAddress string `json:"web_ui_address"`
|
||||
WebUiBanDuration int `json:"web_ui_ban_duration"`
|
||||
WebUiClickjackingProtectionEnabled bool `json:"web_ui_clickjacking_protection_enabled"`
|
||||
WebUiCsrfProtectionEnabled bool `json:"web_ui_csrf_protection_enabled"`
|
||||
WebUiDomainList string `json:"web_ui_domain_list"`
|
||||
WebUiHostHeaderValidationEnabled bool `json:"web_ui_host_header_validation_enabled"`
|
||||
WebUiHttpsCertPath string `json:"web_ui_https_cert_path"`
|
||||
WebUiHttpsKeyPath string `json:"web_ui_https_key_path"`
|
||||
WebUiMaxAuthFailCount int `json:"web_ui_max_auth_fail_count"`
|
||||
WebUiPort int `json:"web_ui_port"`
|
||||
WebUiSecureCookieEnabled bool `json:"web_ui_secure_cookie_enabled"`
|
||||
WebUiSessionTimeout int `json:"web_ui_session_timeout"`
|
||||
WebUiUpnp bool `json:"web_ui_upnp"`
|
||||
WebUiUsername string `json:"web_ui_username"`
|
||||
WebUiPassword string `json:"web_ui_password"`
|
||||
SSLKey string `json:"ssl_key"`
|
||||
SSLCert string `json:"ssl_cert"`
|
||||
RSSDownloadRepack string `json:"rss_download_repack_proper_episodes"`
|
||||
RSSSmartEpisodeFilters string `json:"rss_smart_episode_filters"`
|
||||
WebUiUseCustomHttpHeaders bool `json:"web_ui_use_custom_http_headers"`
|
||||
WebUiUseCustomHttpHeadersEnabled bool `json:"web_ui_use_custom_http_headers_enabled"`
|
||||
}
|
||||
|
||||
type ScanDirs struct{}
|
||||
|
||||
type TorrentCategory struct {
|
||||
Name string `json:"name"`
|
||||
SavePath string `json:"savePath"`
|
||||
}
|
||||
|
||||
type Torrent struct {
|
||||
ID string `json:"id"`
|
||||
DebridTorrent *torrent.Torrent `json:"-"`
|
||||
Debrid string `json:"debrid"`
|
||||
TorrentPath string `json:"-"`
|
||||
|
||||
AddedOn int64 `json:"added_on,omitempty"`
|
||||
AmountLeft int64 `json:"amount_left"`
|
||||
AutoTmm bool `json:"auto_tmm"`
|
||||
Availability float64 `json:"availability,omitempty"`
|
||||
Category string `json:"category,omitempty"`
|
||||
Completed int64 `json:"completed"`
|
||||
CompletionOn int `json:"completion_on,omitempty"`
|
||||
ContentPath string `json:"content_path"`
|
||||
DlLimit int `json:"dl_limit"`
|
||||
Dlspeed int64 `json:"dlspeed"`
|
||||
Downloaded int64 `json:"downloaded"`
|
||||
DownloadedSession int64 `json:"downloaded_session"`
|
||||
Eta int `json:"eta"`
|
||||
FlPiecePrio bool `json:"f_l_piece_prio,omitempty"`
|
||||
ForceStart bool `json:"force_start,omitempty"`
|
||||
Hash string `json:"hash"`
|
||||
LastActivity int64 `json:"last_activity,omitempty"`
|
||||
MagnetUri string `json:"magnet_uri,omitempty"`
|
||||
MaxRatio int `json:"max_ratio,omitempty"`
|
||||
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
NumComplete int `json:"num_complete,omitempty"`
|
||||
NumIncomplete int `json:"num_incomplete,omitempty"`
|
||||
NumLeechs int `json:"num_leechs,omitempty"`
|
||||
NumSeeds int `json:"num_seeds,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
Progress float64 `json:"progress"`
|
||||
Ratio int `json:"ratio,omitempty"`
|
||||
RatioLimit int `json:"ratio_limit,omitempty"`
|
||||
SavePath string `json:"save_path"`
|
||||
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
|
||||
SeenComplete int64 `json:"seen_complete,omitempty"`
|
||||
SeqDl bool `json:"seq_dl"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
State string `json:"state,omitempty"`
|
||||
SuperSeeding bool `json:"super_seeding"`
|
||||
Tags string `json:"tags,omitempty"`
|
||||
TimeActive int `json:"time_active,omitempty"`
|
||||
TotalSize int64 `json:"total_size,omitempty"`
|
||||
Tracker string `json:"tracker,omitempty"`
|
||||
UpLimit int64 `json:"up_limit,omitempty"`
|
||||
Uploaded int64 `json:"uploaded,omitempty"`
|
||||
UploadedSession int64 `json:"uploaded_session,omitempty"`
|
||||
Upspeed int64 `json:"upspeed,omitempty"`
|
||||
Source string `json:"source,omitempty"`
|
||||
|
||||
Mu sync.Mutex `json:"-"`
|
||||
}
|
||||
|
||||
func (t *Torrent) IsReady() bool {
|
||||
return t.AmountLeft <= 0 && t.TorrentPath != ""
|
||||
}
|
||||
|
||||
type TorrentProperties struct {
|
||||
AdditionDate int64 `json:"addition_date,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
CompletionDate int64 `json:"completion_date,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
CreationDate int64 `json:"creation_date,omitempty"`
|
||||
DlLimit int `json:"dl_limit,omitempty"`
|
||||
DlSpeed int64 `json:"dl_speed,omitempty"`
|
||||
DlSpeedAvg int `json:"dl_speed_avg,omitempty"`
|
||||
Eta int `json:"eta,omitempty"`
|
||||
LastSeen int64 `json:"last_seen,omitempty"`
|
||||
NbConnections int `json:"nb_connections,omitempty"`
|
||||
NbConnectionsLimit int `json:"nb_connections_limit,omitempty"`
|
||||
Peers int `json:"peers,omitempty"`
|
||||
PeersTotal int `json:"peers_total,omitempty"`
|
||||
PieceSize int64 `json:"piece_size,omitempty"`
|
||||
PiecesHave int64 `json:"pieces_have,omitempty"`
|
||||
PiecesNum int64 `json:"pieces_num,omitempty"`
|
||||
Reannounce int `json:"reannounce,omitempty"`
|
||||
SavePath string `json:"save_path,omitempty"`
|
||||
SeedingTime int `json:"seeding_time,omitempty"`
|
||||
Seeds int `json:"seeds,omitempty"`
|
||||
SeedsTotal int `json:"seeds_total,omitempty"`
|
||||
ShareRatio int `json:"share_ratio,omitempty"`
|
||||
TimeElapsed int64 `json:"time_elapsed,omitempty"`
|
||||
TotalDownloaded int64 `json:"total_downloaded,omitempty"`
|
||||
TotalDownloadedSession int64 `json:"total_downloaded_session,omitempty"`
|
||||
TotalSize int64 `json:"total_size,omitempty"`
|
||||
TotalUploaded int64 `json:"total_uploaded,omitempty"`
|
||||
TotalUploadedSession int64 `json:"total_uploaded_session,omitempty"`
|
||||
TotalWasted int64 `json:"total_wasted,omitempty"`
|
||||
UpLimit int `json:"up_limit,omitempty"`
|
||||
UpSpeed int64 `json:"up_speed,omitempty"`
|
||||
UpSpeedAvg int `json:"up_speed_avg,omitempty"`
|
||||
}
|
||||
|
||||
type TorrentFile struct {
|
||||
Index int `json:"index,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
Progress int `json:"progress,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
IsSeed bool `json:"is_seed,omitempty"`
|
||||
PieceRange []int `json:"piece_range,omitempty"`
|
||||
Availability float64 `json:"availability,omitempty"`
|
||||
}
|
||||
|
||||
func NewAppPreferences() *AppPreferences {
|
||||
preferences := &AppPreferences{
|
||||
AddTrackers: "",
|
||||
AddTrackersEnabled: false,
|
||||
AltDlLimit: 10240,
|
||||
AltUpLimit: 10240,
|
||||
AlternativeWebuiEnabled: false,
|
||||
AlternativeWebuiPath: "",
|
||||
AnnounceIp: "",
|
||||
AnnounceToAllTiers: true,
|
||||
AnnounceToAllTrackers: false,
|
||||
AnonymousMode: false,
|
||||
AsyncIoThreads: 4,
|
||||
AutoDeleteMode: 0,
|
||||
AutoTmmEnabled: false,
|
||||
AutorunEnabled: false,
|
||||
AutorunProgram: "",
|
||||
BannedIPs: "",
|
||||
BittorrentProtocol: 0,
|
||||
BypassAuthSubnetWhitelist: "",
|
||||
BypassAuthSubnetWhitelistEnabled: false,
|
||||
BypassLocalAuth: false,
|
||||
CategoryChangedTmmEnabled: false,
|
||||
CheckingMemoryUse: 32,
|
||||
CreateSubfolderEnabled: true,
|
||||
CurrentInterfaceAddress: "",
|
||||
CurrentNetworkInterface: "",
|
||||
Dht: true,
|
||||
DiskCache: -1,
|
||||
DiskCacheTtl: 60,
|
||||
DlLimit: 0,
|
||||
DontCountSlowTorrents: false,
|
||||
DyndnsDomain: "changeme.dyndns.org",
|
||||
DyndnsEnabled: false,
|
||||
DyndnsPassword: "",
|
||||
DyndnsService: 0,
|
||||
DyndnsUsername: "",
|
||||
EmbeddedTrackerPort: 9000,
|
||||
EnableCoalesceReadWrite: true,
|
||||
EnableEmbeddedTracker: false,
|
||||
EnableMultiConnectionsFromSameIp: false,
|
||||
EnableOsCache: true,
|
||||
EnablePieceExtentAffinity: false,
|
||||
EnableSuperSeeding: false,
|
||||
EnableUploadSuggestions: false,
|
||||
Encryption: 0,
|
||||
ExportDir: "",
|
||||
ExportDirFin: "",
|
||||
FilePoolSize: 40,
|
||||
IncompleteFilesExt: false,
|
||||
IpFilterEnabled: false,
|
||||
IpFilterPath: "",
|
||||
IpFilterTrackers: false,
|
||||
LimitLanPeers: true,
|
||||
LimitTcpOverhead: false,
|
||||
LimitUtpRate: true,
|
||||
ListenPort: 31193,
|
||||
Locale: "en",
|
||||
Lsd: true,
|
||||
MailNotificationAuthEnabled: false,
|
||||
MailNotificationEmail: "",
|
||||
MailNotificationEnabled: false,
|
||||
MailNotificationPassword: "",
|
||||
MailNotificationSender: "qBittorrentNotification@example.com",
|
||||
MailNotificationSmtp: "smtp.changeme.com",
|
||||
MailNotificationSslEnabled: false,
|
||||
MailNotificationUsername: "",
|
||||
MaxActiveDownloads: 3,
|
||||
MaxActiveTorrents: 5,
|
||||
MaxActiveUploads: 3,
|
||||
MaxConnec: 500,
|
||||
MaxConnecPerTorrent: 100,
|
||||
MaxRatio: -1,
|
||||
MaxRatioAct: 0,
|
||||
MaxRatioEnabled: false,
|
||||
MaxSeedingTime: -1,
|
||||
MaxSeedingTimeEnabled: false,
|
||||
MaxUploads: -1,
|
||||
MaxUploadsPerTorrent: -1,
|
||||
OutgoingPortsMax: 0,
|
||||
OutgoingPortsMin: 0,
|
||||
Pex: true,
|
||||
PreallocateAll: false,
|
||||
ProxyAuthEnabled: false,
|
||||
ProxyIp: "0.0.0.0",
|
||||
ProxyPassword: "",
|
||||
ProxyPeerConnections: false,
|
||||
ProxyPort: 8080,
|
||||
ProxyTorrentsOnly: false,
|
||||
ProxyType: 0,
|
||||
ProxyUsername: "",
|
||||
QueueingEnabled: false,
|
||||
RandomPort: false,
|
||||
RecheckCompletedTorrents: false,
|
||||
ResolvePeerCountries: true,
|
||||
RssAutoDownloadingEnabled: false,
|
||||
RssMaxArticlesPerFeed: 50,
|
||||
RssProcessingEnabled: false,
|
||||
RssRefreshInterval: 30,
|
||||
SavePathChangedTmmEnabled: false,
|
||||
SaveResumeDataInterval: 60,
|
||||
ScanDirs: ScanDirs{},
|
||||
ScheduleFromHour: 8,
|
||||
ScheduleFromMin: 0,
|
||||
ScheduleToHour: 20,
|
||||
ScheduleToMin: 0,
|
||||
SchedulerDays: 0,
|
||||
SchedulerEnabled: false,
|
||||
SendBufferLowWatermark: 10,
|
||||
SendBufferWatermark: 500,
|
||||
SendBufferWatermarkFactor: 50,
|
||||
SlowTorrentDlRateThreshold: 2,
|
||||
SlowTorrentInactiveTimer: 60,
|
||||
SlowTorrentUlRateThreshold: 2,
|
||||
SocketBacklogSize: 30,
|
||||
StartPausedEnabled: false,
|
||||
StopTrackerTimeout: 1,
|
||||
TempPathEnabled: false,
|
||||
TorrentChangedTmmEnabled: true,
|
||||
UpLimit: 0,
|
||||
UploadChokingAlgorithm: 1,
|
||||
UploadSlotsBehavior: 0,
|
||||
Upnp: true,
|
||||
UpnpLeaseDuration: 0,
|
||||
UseHttps: false,
|
||||
UtpTcpMixedMode: 0,
|
||||
WebUiAddress: "*",
|
||||
WebUiBanDuration: 3600,
|
||||
WebUiClickjackingProtectionEnabled: true,
|
||||
WebUiCsrfProtectionEnabled: true,
|
||||
WebUiDomainList: "*",
|
||||
WebUiHostHeaderValidationEnabled: true,
|
||||
WebUiHttpsCertPath: "",
|
||||
WebUiHttpsKeyPath: "",
|
||||
WebUiMaxAuthFailCount: 5,
|
||||
WebUiPort: 8080,
|
||||
WebUiSecureCookieEnabled: true,
|
||||
WebUiSessionTimeout: 3600,
|
||||
WebUiUpnp: false,
|
||||
|
||||
// Fields in the struct but not in the JSON (set to zero values):
|
||||
WebUiPassword: "",
|
||||
SSLKey: "",
|
||||
SSLCert: "",
|
||||
RSSDownloadRepack: "",
|
||||
RSSSmartEpisodeFilters: "",
|
||||
WebUiUseCustomHttpHeaders: false,
|
||||
WebUiUseCustomHttpHeadersEnabled: false,
|
||||
}
|
||||
return preferences
|
||||
}
|
||||
131
pkg/repair/misc.go
Normal file
131
pkg/repair/misc.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package repair
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func parseSchedule(schedule string) (time.Duration, error) {
|
||||
if schedule == "" {
|
||||
return time.Hour, nil // default 60m
|
||||
}
|
||||
|
||||
// Check if it's a time-of-day format (HH:MM)
|
||||
if strings.Contains(schedule, ":") {
|
||||
return parseTimeOfDay(schedule)
|
||||
}
|
||||
|
||||
// Otherwise treat as duration interval
|
||||
return parseDurationInterval(schedule)
|
||||
}
|
||||
|
||||
func parseTimeOfDay(schedule string) (time.Duration, error) {
|
||||
now := time.Now()
|
||||
scheduledTime, err := time.Parse("15:04", schedule)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid time format: %s. Use HH:MM in 24-hour format", schedule)
|
||||
}
|
||||
|
||||
// Convert scheduled time to today
|
||||
scheduleToday := time.Date(
|
||||
now.Year(), now.Month(), now.Day(),
|
||||
scheduledTime.Hour(), scheduledTime.Minute(), 0, 0,
|
||||
now.Location(),
|
||||
)
|
||||
|
||||
if scheduleToday.Before(now) {
|
||||
scheduleToday = scheduleToday.Add(24 * time.Hour)
|
||||
}
|
||||
|
||||
return scheduleToday.Sub(now), nil
|
||||
}
|
||||
|
||||
func parseDurationInterval(interval string) (time.Duration, error) {
|
||||
if len(interval) < 2 {
|
||||
return 0, fmt.Errorf("invalid interval format: %s", interval)
|
||||
}
|
||||
|
||||
numStr := interval[:len(interval)-1]
|
||||
unit := interval[len(interval)-1]
|
||||
|
||||
num, err := strconv.Atoi(numStr)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid number in interval: %s", numStr)
|
||||
}
|
||||
|
||||
switch unit {
|
||||
case 'm':
|
||||
return time.Duration(num) * time.Minute, nil
|
||||
case 'h':
|
||||
return time.Duration(num) * time.Hour, nil
|
||||
case 'd':
|
||||
return time.Duration(num) * 24 * time.Hour, nil
|
||||
case 's':
|
||||
return time.Duration(num) * time.Second, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid unit in interval: %c", unit)
|
||||
}
|
||||
}
|
||||
|
||||
func fileIsSymlinked(file string) bool {
|
||||
info, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return info.Mode()&os.ModeSymlink != 0
|
||||
}
|
||||
|
||||
func getSymlinkTarget(file string) string {
|
||||
if fileIsSymlinked(file) {
|
||||
target, err := os.Readlink(file)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if !filepath.IsAbs(target) {
|
||||
dir := filepath.Dir(file)
|
||||
target = filepath.Join(dir, target)
|
||||
}
|
||||
return target
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func fileIsReadable(filePath string) error {
|
||||
// First check if file exists and is accessible
|
||||
info, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if it's a regular file
|
||||
if !info.Mode().IsRegular() {
|
||||
return fmt.Errorf("not a regular file")
|
||||
}
|
||||
|
||||
// Try to read the first 1024 bytes
|
||||
err = checkFileStart(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkFileStart(filePath string) error {
|
||||
f, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// Read first 1kb
|
||||
buffer := make([]byte, 1024)
|
||||
_, err = f.Read(buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
352
pkg/repair/repair.go
Normal file
352
pkg/repair/repair.go
Normal file
@@ -0,0 +1,352 @@
|
||||
package repair
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Repair struct {
|
||||
Jobs []Job `json:"jobs"`
|
||||
arrs *arr.Storage
|
||||
deb engine.Service
|
||||
duration time.Duration
|
||||
runOnStart bool
|
||||
ZurgURL string
|
||||
IsZurg bool
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
func New(deb *engine.Engine, arrs *arr.Storage) *Repair {
|
||||
cfg := config.GetConfig()
|
||||
duration, err := parseSchedule(cfg.Repair.Interval)
|
||||
if err != nil {
|
||||
duration = time.Hour * 24
|
||||
}
|
||||
r := &Repair{
|
||||
arrs: arrs,
|
||||
deb: deb.Get(),
|
||||
logger: logger.NewLogger("repair", cfg.LogLevel, os.Stdout),
|
||||
duration: duration,
|
||||
runOnStart: cfg.Repair.RunOnStart,
|
||||
ZurgURL: cfg.Repair.ZurgURL,
|
||||
}
|
||||
if r.ZurgURL != "" {
|
||||
r.IsZurg = true
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
type Job struct {
|
||||
ID string `json:"id"`
|
||||
Arrs []*arr.Arr `json:"arrs"`
|
||||
MediaIDs []string `json:"media_ids"`
|
||||
StartedAt time.Time `json:"created_at"`
|
||||
CompletedAt time.Time `json:"finished_at"`
|
||||
FailedAt time.Time `json:"failed_at"`
|
||||
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func (r *Repair) NewJob(arrs []*arr.Arr, mediaIDs []string) *Job {
|
||||
return &Job{
|
||||
ID: uuid.New().String(),
|
||||
Arrs: arrs,
|
||||
MediaIDs: mediaIDs,
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repair) PreRunChecks() error {
|
||||
// Check if zurg url is reachable
|
||||
if !r.IsZurg {
|
||||
return nil
|
||||
}
|
||||
resp, err := http.Get(fmt.Sprint(r.ZurgURL, "/http/version.txt"))
|
||||
if err != nil {
|
||||
r.logger.Debug().Err(err).Msgf("Precheck failed: Failed to reach zurg at %s", r.ZurgURL)
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
r.logger.Debug().Msgf("Precheck failed: Zurg returned %d", resp.StatusCode)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repair) Repair(arrs []*arr.Arr, mediaIds []string) error {
|
||||
|
||||
j := r.NewJob(arrs, mediaIds)
|
||||
|
||||
if err := r.PreRunChecks(); err != nil {
|
||||
return err
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error)
|
||||
for _, a := range j.Arrs {
|
||||
wg.Add(1)
|
||||
go func(a *arr.Arr) {
|
||||
defer wg.Done()
|
||||
if len(j.MediaIDs) == 0 {
|
||||
if err := r.RepairArr(a, ""); err != nil {
|
||||
log.Printf("Error repairing %s: %v", a.Name, err)
|
||||
errors <- err
|
||||
}
|
||||
} else {
|
||||
for _, id := range j.MediaIDs {
|
||||
if err := r.RepairArr(a, id); err != nil {
|
||||
log.Printf("Error repairing %s: %v", a.Name, err)
|
||||
errors <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
}(a)
|
||||
}
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
err := <-errors
|
||||
if err != nil {
|
||||
j.FailedAt = time.Now()
|
||||
j.Error = err.Error()
|
||||
return err
|
||||
}
|
||||
j.CompletedAt = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repair) Start(ctx context.Context) error {
|
||||
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
cfg := config.GetConfig()
|
||||
|
||||
if r.runOnStart {
|
||||
r.logger.Info().Msgf("Running initial repair")
|
||||
go func() {
|
||||
if err := r.Repair(r.arrs.GetAll(), []string{}); err != nil {
|
||||
r.logger.Info().Msgf("Error during initial repair: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(r.duration)
|
||||
defer ticker.Stop()
|
||||
|
||||
r.logger.Info().Msgf("Starting repair worker with %v interval", r.duration)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
r.logger.Info().Msg("Repair worker stopped")
|
||||
return nil
|
||||
case t := <-ticker.C:
|
||||
r.logger.Info().Msgf("Running repair at %v", t.Format("15:04:05"))
|
||||
err := r.Repair(r.arrs.GetAll(), []string{})
|
||||
if err != nil {
|
||||
r.logger.Info().Msgf("Error during repair: %v", err)
|
||||
}
|
||||
|
||||
// If using time-of-day schedule, reset the ticker for next day
|
||||
if strings.Contains(cfg.Repair.Interval, ":") {
|
||||
ticker.Reset(r.duration)
|
||||
}
|
||||
|
||||
r.logger.Info().Msgf("Next scheduled repair at %v", t.Add(r.duration).Format("15:04:05"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repair) RepairArr(a *arr.Arr, tmdbId string) error {
|
||||
|
||||
cfg := config.GetConfig()
|
||||
|
||||
r.logger.Info().Msgf("Starting repair for %s", a.Name)
|
||||
media, err := a.GetMedia(tmdbId)
|
||||
if err != nil {
|
||||
r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err)
|
||||
return err
|
||||
}
|
||||
r.logger.Info().Msgf("Found %d %s media", len(media), a.Name)
|
||||
|
||||
if len(media) == 0 {
|
||||
r.logger.Info().Msgf("No %s media found", a.Name)
|
||||
return nil
|
||||
}
|
||||
// Check first media to confirm mounts are accessible
|
||||
if !r.isMediaAccessible(media[0]) {
|
||||
r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts")
|
||||
return nil
|
||||
}
|
||||
|
||||
semaphore := make(chan struct{}, runtime.NumCPU()*4)
|
||||
totalBrokenItems := 0
|
||||
var wg sync.WaitGroup
|
||||
for _, m := range media {
|
||||
wg.Add(1)
|
||||
semaphore <- struct{}{}
|
||||
go func(m arr.Content) {
|
||||
defer wg.Done()
|
||||
defer func() { <-semaphore }()
|
||||
brokenItems := r.getBrokenFiles(m)
|
||||
if brokenItems != nil {
|
||||
r.logger.Debug().Msgf("Found %d broken files for %s", len(brokenItems), m.Title)
|
||||
if !cfg.Repair.SkipDeletion {
|
||||
if err := a.DeleteFiles(brokenItems); err != nil {
|
||||
r.logger.Info().Msgf("Failed to delete broken items for %s: %v", m.Title, err)
|
||||
}
|
||||
}
|
||||
if err := a.SearchMissing(brokenItems); err != nil {
|
||||
r.logger.Info().Msgf("Failed to search missing items for %s: %v", m.Title, err)
|
||||
}
|
||||
totalBrokenItems += len(brokenItems)
|
||||
}
|
||||
}(m)
|
||||
}
|
||||
wg.Wait()
|
||||
r.logger.Info().Msgf("Repair completed for %s. %d broken items found", a.Name, totalBrokenItems)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repair) isMediaAccessible(m arr.Content) bool {
|
||||
files := m.Files
|
||||
if len(files) == 0 {
|
||||
return false
|
||||
}
|
||||
firstFile := files[0]
|
||||
r.logger.Debug().Msgf("Checking parent directory for %s", firstFile.Path)
|
||||
if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
// Check symlink parent directory
|
||||
symlinkPath := getSymlinkTarget(firstFile.Path)
|
||||
|
||||
r.logger.Debug().Msgf("Checking symlink parent directory for %s", symlinkPath)
|
||||
|
||||
if symlinkPath != "" {
|
||||
parentSymlink := filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents
|
||||
if _, err := os.Stat(parentSymlink); os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile {
|
||||
|
||||
if r.IsZurg {
|
||||
return r.getZurgBrokenFiles(media)
|
||||
} else {
|
||||
return r.getFileBrokenFiles(media)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile {
|
||||
// This checks symlink target, try to get read a tiny bit of the file
|
||||
|
||||
brokenFiles := make([]arr.ContentFile, 0)
|
||||
|
||||
uniqueParents := make(map[string][]arr.ContentFile)
|
||||
files := media.Files
|
||||
for _, file := range files {
|
||||
target := getSymlinkTarget(file.Path)
|
||||
if target != "" {
|
||||
file.IsSymlink = true
|
||||
dir, _ := filepath.Split(target)
|
||||
parent := filepath.Base(filepath.Clean(dir))
|
||||
uniqueParents[parent] = append(uniqueParents[parent], file)
|
||||
}
|
||||
}
|
||||
|
||||
for parent, f := range uniqueParents {
|
||||
// Check stat
|
||||
// Check file stat first
|
||||
firstFile := f[0]
|
||||
// Read a tiny bit of the file
|
||||
if err := fileIsReadable(firstFile.Path); err != nil {
|
||||
r.logger.Debug().Msgf("Broken file found at: %s", parent)
|
||||
brokenFiles = append(brokenFiles, f...)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if len(brokenFiles) == 0 {
|
||||
r.logger.Debug().Msgf("No broken files found for %s", media.Title)
|
||||
return nil
|
||||
}
|
||||
r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
|
||||
return brokenFiles
|
||||
}
|
||||
|
||||
func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
|
||||
// Use zurg setup to check file availability with zurg
|
||||
// This reduces bandwidth usage significantly
|
||||
|
||||
brokenFiles := make([]arr.ContentFile, 0)
|
||||
uniqueParents := make(map[string][]arr.ContentFile)
|
||||
files := media.Files
|
||||
for _, file := range files {
|
||||
target := getSymlinkTarget(file.Path)
|
||||
if target != "" {
|
||||
file.IsSymlink = true
|
||||
dir, f := filepath.Split(target)
|
||||
parent := filepath.Base(filepath.Clean(dir))
|
||||
// Set target path folder/file.mkv
|
||||
file.TargetPath = f
|
||||
uniqueParents[parent] = append(uniqueParents[parent], file)
|
||||
}
|
||||
}
|
||||
// Access zurg url + symlink folder + first file(encoded)
|
||||
for parent, f := range uniqueParents {
|
||||
r.logger.Debug().Msgf("Checking %s", parent)
|
||||
encodedParent := url.PathEscape(parent)
|
||||
encodedFile := url.PathEscape(f[0].TargetPath)
|
||||
fullURL := fmt.Sprintf("%s/http/__all__/%s/%s", r.ZurgURL, encodedParent, encodedFile)
|
||||
// Check file stat first
|
||||
if _, err := os.Stat(f[0].Path); os.IsNotExist(err) {
|
||||
r.logger.Debug().Msgf("Broken symlink found: %s", fullURL)
|
||||
brokenFiles = append(brokenFiles, f...)
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err := http.Get(fullURL)
|
||||
if err != nil {
|
||||
r.logger.Debug().Err(err).Msgf("Failed to reach %s", fullURL)
|
||||
brokenFiles = append(brokenFiles, f...)
|
||||
continue
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
|
||||
brokenFiles = append(brokenFiles, f...)
|
||||
continue
|
||||
}
|
||||
downloadUrl := resp.Request.URL.String()
|
||||
if downloadUrl != "" {
|
||||
r.logger.Debug().Msgf("Found download url: %s", downloadUrl)
|
||||
} else {
|
||||
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
|
||||
brokenFiles = append(brokenFiles, f...)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if len(brokenFiles) == 0 {
|
||||
r.logger.Debug().Msgf("No broken files found for %s", media.Title)
|
||||
return nil
|
||||
}
|
||||
r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
|
||||
return brokenFiles
|
||||
}
|
||||
101
pkg/server/server.go
Normal file
101
pkg/server/server.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
router *chi.Mux
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
func New() *Server {
|
||||
cfg := config.GetConfig()
|
||||
l := logger.NewLogger("http", cfg.QBitTorrent.LogLevel, os.Stdout)
|
||||
r := chi.NewRouter()
|
||||
r.Use(middleware.Recoverer)
|
||||
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
|
||||
|
||||
return &Server{
|
||||
router: r,
|
||||
logger: l,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) Start(ctx context.Context) error {
|
||||
cfg := config.GetConfig()
|
||||
// Register routes
|
||||
s.router.Get("/logs", s.getLogs)
|
||||
port := fmt.Sprintf(":%s", cfg.QBitTorrent.Port)
|
||||
s.logger.Info().Msgf("Starting server on %s", port)
|
||||
srv := &http.Server{
|
||||
Addr: port,
|
||||
Handler: s.router,
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
go func() {
|
||||
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
s.logger.Info().Msgf("Error starting server: %v", err)
|
||||
stop()
|
||||
}
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
s.logger.Info().Msg("Shutting down gracefully...")
|
||||
return srv.Shutdown(context.Background())
|
||||
}
|
||||
|
||||
func (s *Server) AddRoutes(routes func(r chi.Router) http.Handler) {
|
||||
routes(s.router)
|
||||
}
|
||||
|
||||
func (s *Server) Mount(pattern string, handler http.Handler) {
|
||||
s.router.Mount(pattern, handler)
|
||||
}
|
||||
|
||||
func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) {
|
||||
logFile := logger.GetLogPath()
|
||||
|
||||
// Open and read the file
|
||||
file, err := os.Open(logFile)
|
||||
if err != nil {
|
||||
http.Error(w, "Error reading log file", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer func(file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
s.logger.Debug().Err(err).Msg("Error closing log file")
|
||||
}
|
||||
}(file)
|
||||
|
||||
// Set headers
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.Header().Set("Content-Disposition", "inline; filename=application.log")
|
||||
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||
w.Header().Set("Pragma", "no-cache")
|
||||
w.Header().Set("Expires", "0")
|
||||
|
||||
// Stream the file
|
||||
_, err = io.Copy(w, file)
|
||||
if err != nil {
|
||||
s.logger.Debug().Err(err).Msg("Error streaming log file")
|
||||
http.Error(w, "Error streaming log file", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
56
pkg/service/service.go
Normal file
56
pkg/service/service.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/repair"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
Repair *repair.Repair
|
||||
Arr *arr.Storage
|
||||
Debrid *engine.Engine
|
||||
}
|
||||
|
||||
var (
|
||||
instance *Service
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
func New() *Service {
|
||||
once.Do(func() {
|
||||
arrs := arr.NewStorage()
|
||||
deb := debrid.New()
|
||||
instance = &Service{
|
||||
Repair: repair.New(deb, arrs),
|
||||
Arr: arrs,
|
||||
Debrid: deb,
|
||||
}
|
||||
})
|
||||
return instance
|
||||
}
|
||||
|
||||
// GetService returns the singleton instance
|
||||
func GetService() *Service {
|
||||
if instance == nil {
|
||||
instance = New()
|
||||
}
|
||||
return instance
|
||||
}
|
||||
|
||||
func Update() *Service {
|
||||
arrs := arr.NewStorage()
|
||||
deb := debrid.New()
|
||||
instance = &Service{
|
||||
Repair: repair.New(deb, arrs),
|
||||
Arr: arrs,
|
||||
Debrid: deb,
|
||||
}
|
||||
return instance
|
||||
}
|
||||
|
||||
func GetDebrid() *engine.Engine {
|
||||
return GetService().Debrid
|
||||
}
|
||||
24
pkg/version/version.go
Normal file
24
pkg/version/version.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package version
|
||||
|
||||
import "fmt"
|
||||
|
||||
type Info struct {
|
||||
Version string `json:"version"`
|
||||
Channel string `json:"channel"`
|
||||
}
|
||||
|
||||
func (i Info) String() string {
|
||||
return fmt.Sprintf("%s-%s", i.Version, i.Channel)
|
||||
}
|
||||
|
||||
var (
|
||||
Version = ""
|
||||
Channel = ""
|
||||
)
|
||||
|
||||
func GetInfo() Info {
|
||||
return Info{
|
||||
Version: Version,
|
||||
Channel: Channel,
|
||||
}
|
||||
}
|
||||
35
pkg/web/routes.go
Normal file
35
pkg/web/routes.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package web
|
||||
|
||||
import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (ui *Handler) Routes() http.Handler {
|
||||
r := chi.NewRouter()
|
||||
|
||||
r.Get("/login", ui.LoginHandler)
|
||||
r.Post("/login", ui.LoginHandler)
|
||||
r.Get("/setup", ui.SetupHandler)
|
||||
r.Post("/setup", ui.SetupHandler)
|
||||
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(ui.authMiddleware)
|
||||
r.Get("/", ui.IndexHandler)
|
||||
r.Get("/download", ui.DownloadHandler)
|
||||
r.Get("/repair", ui.RepairHandler)
|
||||
r.Get("/config", ui.ConfigHandler)
|
||||
r.Route("/internal", func(r chi.Router) {
|
||||
r.Get("/arrs", ui.handleGetArrs)
|
||||
r.Post("/add", ui.handleAddContent)
|
||||
r.Post("/repair", ui.handleRepairMedia)
|
||||
r.Get("/torrents", ui.handleGetTorrents)
|
||||
r.Delete("/torrents/{category}/{hash}", ui.handleDeleteTorrent)
|
||||
r.Delete("/torrents/", ui.handleDeleteTorrents)
|
||||
r.Get("/config", ui.handleGetConfig)
|
||||
r.Get("/version", ui.handleGetVersion)
|
||||
})
|
||||
})
|
||||
|
||||
return r
|
||||
}
|
||||
443
pkg/web/ui.go
Normal file
443
pkg/web/ui.go
Normal file
@@ -0,0 +1,443 @@
|
||||
package web
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/gorilla/sessions"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/qbit"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/version"
|
||||
)
|
||||
|
||||
type AddRequest struct {
|
||||
Url string `json:"url"`
|
||||
Arr string `json:"arr"`
|
||||
File string `json:"file"`
|
||||
NotSymlink bool `json:"notSymlink"`
|
||||
Content string `json:"content"`
|
||||
Seasons []string `json:"seasons"`
|
||||
Episodes []string `json:"episodes"`
|
||||
}
|
||||
|
||||
type ArrResponse struct {
|
||||
Name string `json:"name"`
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
type ContentResponse struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Type string `json:"type"`
|
||||
ArrID string `json:"arr"`
|
||||
}
|
||||
|
||||
type RepairRequest struct {
|
||||
ArrName string `json:"arr"`
|
||||
MediaIds []string `json:"mediaIds"`
|
||||
Async bool `json:"async"`
|
||||
}
|
||||
|
||||
//go:embed web/*
|
||||
var content embed.FS
|
||||
|
||||
type Handler struct {
|
||||
qbit *qbit.QBit
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
func New(qbit *qbit.QBit) *Handler {
|
||||
cfg := config.GetConfig()
|
||||
return &Handler{
|
||||
qbit: qbit,
|
||||
logger: logger.NewLogger("ui", cfg.LogLevel, os.Stdout),
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
store = sessions.NewCookieStore([]byte("your-secret-key")) // Change this to a secure key
|
||||
templates *template.Template
|
||||
)
|
||||
|
||||
func init() {
|
||||
templates = template.Must(template.ParseFS(
|
||||
content,
|
||||
"web/layout.html",
|
||||
"web/index.html",
|
||||
"web/download.html",
|
||||
"web/repair.html",
|
||||
"web/config.html",
|
||||
"web/login.html",
|
||||
"web/setup.html",
|
||||
))
|
||||
|
||||
store.Options = &sessions.Options{
|
||||
Path: "/",
|
||||
MaxAge: 86400 * 7,
|
||||
HttpOnly: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (ui *Handler) authMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check if setup is needed
|
||||
cfg := config.GetConfig()
|
||||
if cfg.NeedsSetup() && r.URL.Path != "/setup" {
|
||||
http.Redirect(w, r, "/setup", http.StatusSeeOther)
|
||||
return
|
||||
}
|
||||
|
||||
if !cfg.UseAuth {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Skip auth check for setup page
|
||||
if r.URL.Path == "/setup" {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
session, _ := store.Get(r, "auth-session")
|
||||
auth, ok := session.Values["authenticated"].(bool)
|
||||
|
||||
if !ok || !auth {
|
||||
http.Redirect(w, r, "/login", http.StatusSeeOther)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func (ui *Handler) verifyAuth(username, password string) bool {
|
||||
// If you're storing hashed password, use bcrypt to compare
|
||||
if username == "" {
|
||||
return false
|
||||
}
|
||||
auth := config.GetConfig().GetAuth()
|
||||
if auth == nil {
|
||||
return false
|
||||
}
|
||||
if username != auth.Username {
|
||||
return false
|
||||
}
|
||||
err := bcrypt.CompareHashAndPassword([]byte(auth.Password), []byte(password))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "GET" {
|
||||
data := map[string]interface{}{
|
||||
"Page": "login",
|
||||
"Title": "Login",
|
||||
}
|
||||
if err := templates.ExecuteTemplate(w, "layout", data); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var credentials struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&credentials); err != nil {
|
||||
http.Error(w, "Invalid request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if ui.verifyAuth(credentials.Username, credentials.Password) {
|
||||
session, _ := store.Get(r, "auth-session")
|
||||
session.Values["authenticated"] = true
|
||||
session.Values["username"] = credentials.Username
|
||||
if err := session.Save(r, w); err != nil {
|
||||
http.Error(w, "Error saving session", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
http.Redirect(w, r, "/", http.StatusSeeOther)
|
||||
return
|
||||
}
|
||||
|
||||
http.Error(w, "Invalid credentials", http.StatusUnauthorized)
|
||||
}
|
||||
|
||||
func (ui *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) {
|
||||
session, _ := store.Get(r, "auth-session")
|
||||
session.Values["authenticated"] = false
|
||||
session.Options.MaxAge = -1
|
||||
err := session.Save(r, w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
http.Redirect(w, r, "/login", http.StatusSeeOther)
|
||||
}
|
||||
|
||||
func (ui *Handler) SetupHandler(w http.ResponseWriter, r *http.Request) {
|
||||
cfg := config.GetConfig()
|
||||
authCfg := cfg.GetAuth()
|
||||
|
||||
if !cfg.NeedsSetup() {
|
||||
http.Redirect(w, r, "/", http.StatusSeeOther)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Method == "GET" {
|
||||
data := map[string]interface{}{
|
||||
"Page": "setup",
|
||||
"Title": "Setup",
|
||||
}
|
||||
if err := templates.ExecuteTemplate(w, "layout", data); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle POST (setup attempt)
|
||||
username := r.FormValue("username")
|
||||
password := r.FormValue("password")
|
||||
confirmPassword := r.FormValue("confirmPassword")
|
||||
|
||||
if password != confirmPassword {
|
||||
http.Error(w, "Passwords do not match", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Hash the password
|
||||
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
http.Error(w, "Error processing password", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Set the credentials
|
||||
authCfg.Username = username
|
||||
authCfg.Password = string(hashedPassword)
|
||||
|
||||
if err := cfg.SaveAuth(authCfg); err != nil {
|
||||
http.Error(w, "Error saving credentials", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Create a session
|
||||
session, _ := store.Get(r, "auth-session")
|
||||
session.Values["authenticated"] = true
|
||||
session.Values["username"] = username
|
||||
if err := session.Save(r, w); err != nil {
|
||||
http.Error(w, "Error saving session", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
http.Redirect(w, r, "/", http.StatusSeeOther)
|
||||
}
|
||||
|
||||
func (ui *Handler) IndexHandler(w http.ResponseWriter, r *http.Request) {
|
||||
data := map[string]interface{}{
|
||||
"Page": "index",
|
||||
"Title": "Torrents",
|
||||
}
|
||||
if err := templates.ExecuteTemplate(w, "layout", data); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (ui *Handler) DownloadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
data := map[string]interface{}{
|
||||
"Page": "download",
|
||||
"Title": "Download",
|
||||
}
|
||||
if err := templates.ExecuteTemplate(w, "layout", data); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (ui *Handler) RepairHandler(w http.ResponseWriter, r *http.Request) {
|
||||
data := map[string]interface{}{
|
||||
"Page": "repair",
|
||||
"Title": "Repair",
|
||||
}
|
||||
if err := templates.ExecuteTemplate(w, "layout", data); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (ui *Handler) ConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
data := map[string]interface{}{
|
||||
"Page": "config",
|
||||
"Title": "Config",
|
||||
}
|
||||
if err := templates.ExecuteTemplate(w, "layout", data); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (ui *Handler) handleGetArrs(w http.ResponseWriter, r *http.Request) {
|
||||
svc := service.GetService()
|
||||
request.JSONResponse(w, svc.Arr.GetAll(), http.StatusOK)
|
||||
}
|
||||
|
||||
func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
||||
if err := r.ParseMultipartForm(32 << 20); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
svc := service.GetService()
|
||||
|
||||
results := make([]*qbit.ImportRequest, 0)
|
||||
errs := make([]string, 0)
|
||||
|
||||
arrName := r.FormValue("arr")
|
||||
notSymlink := r.FormValue("notSymlink") == "true"
|
||||
|
||||
_arr := svc.Arr.Get(arrName)
|
||||
if _arr == nil {
|
||||
_arr = arr.New(arrName, "", "", false)
|
||||
}
|
||||
|
||||
// Handle URLs
|
||||
if urls := r.FormValue("urls"); urls != "" {
|
||||
var urlList []string
|
||||
for _, u := range strings.Split(urls, "\n") {
|
||||
if trimmed := strings.TrimSpace(u); trimmed != "" {
|
||||
urlList = append(urlList, trimmed)
|
||||
}
|
||||
}
|
||||
|
||||
for _, url := range urlList {
|
||||
importReq := qbit.NewImportRequest(url, _arr, !notSymlink)
|
||||
err := importReq.Process(ui.qbit)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("URL %s: %v", url, err))
|
||||
continue
|
||||
}
|
||||
results = append(results, importReq)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle torrent/magnet files
|
||||
if files := r.MultipartForm.File["files"]; len(files) > 0 {
|
||||
for _, fileHeader := range files {
|
||||
file, err := fileHeader.Open()
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("Failed to open file %s: %v", fileHeader.Filename, err))
|
||||
continue
|
||||
}
|
||||
|
||||
magnet, err := utils.GetMagnetFromFile(file, fileHeader.Filename)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("Failed to parse torrent file %s: %v", fileHeader.Filename, err))
|
||||
continue
|
||||
}
|
||||
|
||||
importReq := qbit.NewImportRequest(magnet.Link, _arr, !notSymlink)
|
||||
err = importReq.Process(ui.qbit)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("File %s: %v", fileHeader.Filename, err))
|
||||
continue
|
||||
}
|
||||
results = append(results, importReq)
|
||||
}
|
||||
}
|
||||
|
||||
request.JSONResponse(w, struct {
|
||||
Results []*qbit.ImportRequest `json:"results"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
}{
|
||||
Results: results,
|
||||
Errors: errs,
|
||||
}, http.StatusOK)
|
||||
}
|
||||
|
||||
func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
|
||||
var req RepairRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
svc := service.GetService()
|
||||
|
||||
_arr := svc.Arr.Get(req.ArrName)
|
||||
if _arr == nil {
|
||||
http.Error(w, "No Arrs found to repair", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if req.Async {
|
||||
go func() {
|
||||
if err := svc.Repair.Repair([]*arr.Arr{_arr}, req.MediaIds); err != nil {
|
||||
ui.logger.Error().Err(err).Msg("Failed to repair media")
|
||||
}
|
||||
}()
|
||||
request.JSONResponse(w, "Repair process started", http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
if err := svc.Repair.Repair([]*arr.Arr{_arr}, req.MediaIds); err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
request.JSONResponse(w, "Repair completed", http.StatusOK)
|
||||
}
|
||||
|
||||
func (ui *Handler) handleGetVersion(w http.ResponseWriter, r *http.Request) {
|
||||
v := version.GetInfo()
|
||||
request.JSONResponse(w, v, http.StatusOK)
|
||||
}
|
||||
|
||||
func (ui *Handler) handleGetTorrents(w http.ResponseWriter, r *http.Request) {
|
||||
request.JSONResponse(w, ui.qbit.Storage.GetAll("", "", nil), http.StatusOK)
|
||||
}
|
||||
|
||||
func (ui *Handler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) {
|
||||
hash := chi.URLParam(r, "hash")
|
||||
category := r.URL.Query().Get("category")
|
||||
if hash == "" {
|
||||
http.Error(w, "No hash provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ui.qbit.Storage.Delete(hash, category)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (ui *Handler) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) {
|
||||
hashesStr := r.URL.Query().Get("hashes")
|
||||
if hashesStr == "" {
|
||||
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
hashes := strings.Split(hashesStr, ",")
|
||||
ui.qbit.Storage.DeleteMultiple(hashes)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
|
||||
cfg := config.GetConfig()
|
||||
arrCfgs := make([]config.Arr, 0)
|
||||
svc := service.GetService()
|
||||
for _, a := range svc.Arr.GetAll() {
|
||||
arrCfgs = append(arrCfgs, config.Arr{Host: a.Host, Name: a.Name, Token: a.Token, Cleanup: a.Cleanup})
|
||||
}
|
||||
cfg.Arrs = arrCfgs
|
||||
request.JSONResponse(w, cfg, http.StatusOK)
|
||||
}
|
||||
409
pkg/web/web/config.html
Normal file
409
pkg/web/web/config.html
Normal file
@@ -0,0 +1,409 @@
|
||||
{{ define "config" }}
|
||||
<div class="container mt-4">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h4 class="mb-0"><i class="bi bi-gear me-2"></i>Configuration</h4>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<form id="configForm">
|
||||
<div class="section mb-5">
|
||||
<h5 class="border-bottom pb-2">General Configuration</h5>
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="form-group">
|
||||
<label for="qbitDebug">Log Level</label>
|
||||
<select class="form-select" name="qbit.log_level" id="log-level" disabled>
|
||||
<option value="info">Info</option>
|
||||
<option value="debug">Debug</option>
|
||||
<option value="warn">Warning</option>
|
||||
<option value="error">Error</option>
|
||||
<option value="trace">Trace</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<!-- Register Magnet Link Button -->
|
||||
<div class="col-md-6">
|
||||
<label>
|
||||
<!-- Empty label to keep the button aligned -->
|
||||
</label>
|
||||
<div class="btn btn-primary w-100" onclick="registerMagnetLinkHandler()" id="registerMagnetLink">
|
||||
Open Magnet Links in DecyphArr
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-12 mt-3">
|
||||
<div class="form-group">
|
||||
<label for="allowedExtensions">Allowed File Extensions</label>
|
||||
<div class="input-group">
|
||||
<textarea type="text"
|
||||
class="form-control"
|
||||
id="allowedExtensions"
|
||||
name="allowed_file_types"
|
||||
disabled
|
||||
placeholder="mkv, mp4, avi, etc.">
|
||||
</textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6 mt-3">
|
||||
<div class="form-group">
|
||||
<label for="minFileSize">Minimum File Size</label>
|
||||
<input type="text"
|
||||
class="form-control"
|
||||
id="minFileSize"
|
||||
name="min_file_size"
|
||||
disabled
|
||||
placeholder="e.g., 10MB, 1GB">
|
||||
<small class="form-text text-muted">Minimum file size to download (0 for no limit)</small>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6 mt-3">
|
||||
<div class="form-group">
|
||||
<label for="maxFileSize">Maximum File Size</label>
|
||||
<input type="text"
|
||||
class="form-control"
|
||||
id="maxFileSize"
|
||||
name="max_file_size"
|
||||
disabled
|
||||
placeholder="e.g., 50GB, 100MB">
|
||||
<small class="form-text text-muted">Maximum file size to download (0 for no limit)</small>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<!-- Debrid Configuration -->
|
||||
<div class="section mb-5">
|
||||
<h5 class="border-bottom pb-2">Debrid Configuration</h5>
|
||||
<div id="debridConfigs"></div>
|
||||
</div>
|
||||
|
||||
<!-- QBitTorrent Configuration -->
|
||||
<div class="section mb-5">
|
||||
<h5 class="border-bottom pb-2">QBitTorrent Configuration</h5>
|
||||
<div class="row">
|
||||
<div class="col-md-6 mb-3">
|
||||
<label class="form-label">Username</label>
|
||||
<input type="text" disabled class="form-control" name="qbit.username">
|
||||
</div>
|
||||
<div class="col-md-6 mb-3">
|
||||
<label class="form-label">Password</label>
|
||||
<input type="password" disabled class="form-control" name="qbit.password">
|
||||
</div>
|
||||
<div class="col-md-6 mb-3">
|
||||
<label class="form-label">Port</label>
|
||||
<input type="text" disabled class="form-control" name="qbit.port">
|
||||
</div>
|
||||
<div class="col-md-6 mb-3">
|
||||
<label class="form-label">Symlink/Download Folder</label>
|
||||
<input type="text" disabled class="form-control" name="qbit.download_folder">
|
||||
</div>
|
||||
<div class="col-md-6 mb-3">
|
||||
<label class="form-label">Refresh Interval (seconds)</label>
|
||||
<input type="number" class="form-control" name="qbit.refresh_interval">
|
||||
</div>
|
||||
<div class="col-12 mb-3">
|
||||
<div class="form-group">
|
||||
<label for="qbitDebug">Log Level</label>
|
||||
<select class="form-select" name="qbit.log_level" id="qbitDebug" disabled>
|
||||
<option value="info">Info</option>
|
||||
<option value="debug">Debug</option>
|
||||
<option value="warn">Warning</option>
|
||||
<option value="error">Error</option>
|
||||
<option value="trace">Trace</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Arr Configurations -->
|
||||
<div class="section mb-5">
|
||||
<h5 class="border-bottom pb-2">Arr Configurations</h5>
|
||||
<div id="arrConfigs"></div>
|
||||
</div>
|
||||
|
||||
<!-- Repair Configuration -->
|
||||
<div class="section mb-5">
|
||||
<h5 class="border-bottom pb-2">Repair Configuration</h5>
|
||||
<div class="row">
|
||||
<div class="col-md-3 mb-3">
|
||||
<label class="form-label">Interval</label>
|
||||
<input type="text" disabled class="form-control" name="repair.interval" placeholder="e.g., 24h">
|
||||
</div>
|
||||
<div class="col-md-4 mb-3">
|
||||
<label class="form-label">Zurg URL</label>
|
||||
<input type="text" disabled class="form-control" name="repair.zurg_url" placeholder="http://zurg:9999">
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-12">
|
||||
<div class="form-check me-3 d-inline-block">
|
||||
<input type="checkbox" disabled class="form-check-input" name="repair.enabled" id="repairEnabled">
|
||||
<label class="form-check-label" for="repairEnabled">Enable Repair</label>
|
||||
</div>
|
||||
<div class="form-check me-3 d-inline-block">
|
||||
<input type="checkbox" disabled class="form-check-input" name="repair.run_on_start" id="repairOnStart">
|
||||
<label class="form-check-label" for="repairOnStart">Run on Start</label>
|
||||
</div>
|
||||
<div class="form-check d-inline-block">
|
||||
<input type="checkbox" disabled class="form-check-input" name="repair.skip_deletion" id="skipDeletion">
|
||||
<label class="form-check-label" for="skipDeletion">Run on Start</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
// Templates for dynamic elements
|
||||
const debridTemplate = (index) => `
|
||||
<div class="config-item position-relative mb-3 p-3 border rounded">
|
||||
<div class="row">
|
||||
<div class="col-md-6 mb-3">
|
||||
<label class="form-label">Name</label>
|
||||
<input type="text" disabled class="form-control" name="debrid[${index}].name" required>
|
||||
</div>
|
||||
<div class="col-md-6 mb-3">
|
||||
<label class="form-label">Host</label>
|
||||
<input type="text" disabled class="form-control" name="debrid[${index}].host" required>
|
||||
</div>
|
||||
<div class="col-md-6 mb-3">
|
||||
<label class="form-label">API Key</label>
|
||||
<input type="password" disabled class="form-control" name="debrid[${index}].api_key" required>
|
||||
</div>
|
||||
<div class="col-md-6 mb-3">
|
||||
<label class="form-label">Mount Folder</label>
|
||||
<input type="text" disabled class="form-control" name="debrid[${index}].folder">
|
||||
</div>
|
||||
<div class="col-md-6 mb-3">
|
||||
<label class="form-label">Rate Limit</label>
|
||||
<input type="text" disabled class="form-control" name="debrid[${index}].rate_limit" placeholder="e.g., 200/minute">
|
||||
</div>
|
||||
<div class="col-12">
|
||||
<div class="form-check me-3 d-inline-block">
|
||||
<input type="checkbox" disabled class="form-check-input" name="debrid[${index}].download_uncached">
|
||||
<label class="form-check-label">Download Uncached</label>
|
||||
</div>
|
||||
<div class="form-check d-inline-block">
|
||||
<input type="checkbox" disabled class="form-check-input" name="debrid[${index}].check_cached">
|
||||
<label class="form-check-label">Check Cached</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
|
||||
const arrTemplate = (index) => `
|
||||
<div class="config-item position-relative mb-3 p-3 border rounded">
|
||||
<div class="row">
|
||||
<div class="col-md-4 mb-3">
|
||||
<label class="form-label">Name</label>
|
||||
<input type="text" disabled class="form-control" name="arr[${index}].name" required>
|
||||
</div>
|
||||
<div class="col-md-4 mb-3">
|
||||
<label class="form-label">Host</label>
|
||||
<input type="text" disabled class="form-control" name="arr[${index}].host" required>
|
||||
</div>
|
||||
<div class="col-md-4 mb-3">
|
||||
<label class="form-label">API Token</label>
|
||||
<input type="password" disabled class="form-control" name="arr[${index}].token" required>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-md-2 mb-3">
|
||||
<div class="form-check">
|
||||
<label class="form-check-label" for="repairOnStart">Cleanup Queue</label>
|
||||
<input type="checkbox" disabled class="form-check-input" name="arr[${index}].cleanup">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
|
||||
// Main functionality
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
let debridCount = 0;
|
||||
let arrCount = 0;
|
||||
|
||||
// Load existing configuration
|
||||
fetch('/internal/config')
|
||||
.then(response => response.json())
|
||||
.then(config => {
|
||||
// Load Debrid configs
|
||||
config.debrids?.forEach(debrid => {
|
||||
addDebridConfig(debrid);
|
||||
});
|
||||
|
||||
// Load QBitTorrent config
|
||||
if (config.qbittorrent) {
|
||||
Object.entries(config.qbittorrent).forEach(([key, value]) => {
|
||||
const input = document.querySelector(`[name="qbit.${key}"]`);
|
||||
if (input) {
|
||||
if (input.type === 'checkbox') {
|
||||
input.checked = value;
|
||||
} else {
|
||||
input.value = value;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Load Arr configs
|
||||
config.arrs?.forEach(arr => {
|
||||
addArrConfig(arr);
|
||||
});
|
||||
|
||||
// Load Repair config
|
||||
if (config.repair) {
|
||||
Object.entries(config.repair).forEach(([key, value]) => {
|
||||
const input = document.querySelector(`[name="repair.${key}"]`);
|
||||
if (input) {
|
||||
if (input.type === 'checkbox') {
|
||||
input.checked = value;
|
||||
} else {
|
||||
input.value = value;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Load general config
|
||||
|
||||
const logLevel = document.getElementById('log-level');
|
||||
logLevel.value = config.log_level;
|
||||
if (config.allowed_file_types && Array.isArray(config.allowed_file_types)) {
|
||||
document.querySelector('[name="allowed_file_types"]').value = config.allowed_file_types.join(', ');
|
||||
}
|
||||
if (config.min_file_size) {
|
||||
document.querySelector('[name="min_file_size"]').value = config.min_file_size;
|
||||
}
|
||||
if (config.max_file_size) {
|
||||
document.querySelector('[name="max_file_size"]').value = config.max_file_size;
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
// Handle form submission
|
||||
document.getElementById('configForm').addEventListener('submit', async (e) => {
|
||||
e.preventDefault();
|
||||
const formData = new FormData(e.target);
|
||||
const config = {
|
||||
debrids: [],
|
||||
qbittorrent: {},
|
||||
arrs: [],
|
||||
repair: {}
|
||||
};
|
||||
|
||||
// Process form data
|
||||
for (let [key, value] of formData.entries()) {
|
||||
if (key.startsWith('debrid[')) {
|
||||
const match = key.match(/debrid\[(\d+)\]\.(.+)/);
|
||||
if (match) {
|
||||
const [_, index, field] = match;
|
||||
if (!config.debrids[index]) config.debrids[index] = {};
|
||||
config.debrids[index][field] = value;
|
||||
}
|
||||
} else if (key.startsWith('qbit.')) {
|
||||
config.qbittorrent[key.replace('qbit.', '')] = value;
|
||||
} else if (key.startsWith('arr[')) {
|
||||
const match = key.match(/arr\[(\d+)\]\.(.+)/);
|
||||
if (match) {
|
||||
const [_, index, field] = match;
|
||||
if (!config.arrs[index]) config.arrs[index] = {};
|
||||
config.arrs[index][field] = value;
|
||||
}
|
||||
} else if (key.startsWith('repair.')) {
|
||||
config.repair[key.replace('repair.', '')] = value;
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up arrays (remove empty entries)
|
||||
config.debrids = config.debrids.filter(Boolean);
|
||||
config.arrs = config.arrs.filter(Boolean);
|
||||
|
||||
try {
|
||||
const response = await fetch('/internal/config', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify(config)
|
||||
});
|
||||
|
||||
if (!response.ok) throw new Error(await response.text());
|
||||
|
||||
createToast('Configuration saved successfully!');
|
||||
} catch (error) {
|
||||
createToast(`Error saving configuration: ${error.message}`, 'error');
|
||||
}
|
||||
});
|
||||
|
||||
// Helper functions
|
||||
function addDebridConfig(data = {}) {
|
||||
const container = document.getElementById('debridConfigs');
|
||||
container.insertAdjacentHTML('beforeend', debridTemplate(debridCount));
|
||||
|
||||
if (data) {
|
||||
Object.entries(data).forEach(([key, value]) => {
|
||||
const input = container.querySelector(`[name="debrid[${debridCount}].${key}"]`);
|
||||
if (input) {
|
||||
if (input.type === 'checkbox') {
|
||||
input.checked = value;
|
||||
} else {
|
||||
input.value = value;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
debridCount++;
|
||||
}
|
||||
|
||||
function addArrConfig(data = {}) {
|
||||
const container = document.getElementById('arrConfigs');
|
||||
container.insertAdjacentHTML('beforeend', arrTemplate(arrCount));
|
||||
|
||||
if (data) {
|
||||
Object.entries(data).forEach(([key, value]) => {
|
||||
const input = container.querySelector(`[name="arr[${arrCount}].${key}"]`);
|
||||
if (input) {
|
||||
if (input.type === 'checkbox') {
|
||||
input.checked = value;
|
||||
} else {
|
||||
input.value = value;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
arrCount++;
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
// Register magnet link handler
|
||||
function registerMagnetLinkHandler() {
|
||||
if ('registerProtocolHandler' in navigator) {
|
||||
try {
|
||||
navigator.registerProtocolHandler(
|
||||
'magnet',
|
||||
`${window.location.origin}/download?magnet=%s`,
|
||||
'DecyphArr'
|
||||
);
|
||||
localStorage.setItem('magnetHandler', 'true');
|
||||
document.getElementById('registerMagnetLink').innerText = '✅ DecyphArr Can Open Magnet Links';
|
||||
document.getElementById('registerMagnetLink').classList.add('bg-white', 'text-black');
|
||||
console.log('Registered magnet link handler successfully.');
|
||||
} catch (error) {
|
||||
console.error('Failed to register magnet link handler:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var magnetHandler = localStorage.getItem('magnetHandler');
|
||||
if (magnetHandler === 'true') {
|
||||
document.getElementById('registerMagnetLink').innerText = '✅ DecyphArr Can Open Magnet Links';
|
||||
document.getElementById('registerMagnetLink').classList.add('bg-white', 'text-black');
|
||||
}
|
||||
</script>
|
||||
{{ end }}
|
||||
141
pkg/web/web/download.html
Normal file
141
pkg/web/web/download.html
Normal file
@@ -0,0 +1,141 @@
|
||||
{{ define "download" }}
|
||||
<div class="container mt-4">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h4 class="mb-0"><i class="bi bi-cloud-download me-2"></i>Add New Download</h4>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<form id="downloadForm" enctype="multipart/form-data">
|
||||
<div class="mb-2">
|
||||
<label for="magnetURI" class="form-label">Torrent(s)</label>
|
||||
<textarea class="form-control" id="magnetURI" name="urls" rows="8" placeholder="Paste your magnet links or torrent URLs here, one per line..."></textarea>
|
||||
</div>
|
||||
|
||||
<div class="mb-3">
|
||||
<input type="file" class="form-control" id="torrentFiles" name="torrents" multiple accept=".torrent,.magnet">
|
||||
</div>
|
||||
|
||||
<hr />
|
||||
|
||||
<div class="mb-3">
|
||||
<label for="category" class="form-label">Enter Category</label>
|
||||
<input type="text" class="form-control" id="category" name="arr" placeholder="Enter Category (e.g sonarr, radarr, radarr4k)">
|
||||
</div>
|
||||
|
||||
<div class="mb-3">
|
||||
<div class="form-check">
|
||||
<input class="form-check-input" type="checkbox" id="isSymlink" name="notSymlink">
|
||||
<label class="form-check-label" for="isSymlink">
|
||||
Download real files instead of symlinks
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<button type="submit" class="btn btn-primary" id="submitDownload">
|
||||
<i class="bi bi-cloud-upload me-2"></i>Add to Download Queue
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
const loadSavedDownloadOptions = () => {
|
||||
const savedCategory = localStorage.getItem('downloadCategory');
|
||||
const savedSymlink = localStorage.getItem('downloadSymlink');
|
||||
document.getElementById('category').value = savedCategory || '';
|
||||
document.getElementById('isSymlink').checked = savedSymlink === 'true'
|
||||
};
|
||||
|
||||
const saveCurrentDownloadOptions = () => {
|
||||
const category = document.getElementById('category').value;
|
||||
const isSymlink = document.getElementById('isSymlink').checked;
|
||||
localStorage.setItem('downloadCategory', category);
|
||||
localStorage.setItem('downloadSymlink', isSymlink.toString());
|
||||
};
|
||||
|
||||
// Load the last used download options from local storage
|
||||
loadSavedDownloadOptions();
|
||||
|
||||
// Handle form submission
|
||||
document.getElementById('downloadForm').addEventListener('submit', async (e) => {
|
||||
e.preventDefault();
|
||||
const submitBtn = document.getElementById('submitDownload');
|
||||
const originalText = submitBtn.innerHTML;
|
||||
|
||||
submitBtn.disabled = true;
|
||||
submitBtn.innerHTML = '<span class="spinner-border spinner-border-sm me-2"></span>Adding...';
|
||||
|
||||
try {
|
||||
const formData = new FormData();
|
||||
|
||||
// Add URLs if present
|
||||
const urls = document.getElementById('magnetURI').value
|
||||
.split('\n')
|
||||
.map(url => url.trim())
|
||||
.filter(url => url.length > 0);
|
||||
|
||||
if (urls.length > 0) {
|
||||
formData.append('urls', urls.join('\n'));
|
||||
}
|
||||
|
||||
// Add torrent files if present
|
||||
const fileInput = document.getElementById('torrentFiles');
|
||||
for (let i = 0; i < fileInput.files.length; i++) {
|
||||
formData.append('files', fileInput.files[i]);
|
||||
}
|
||||
|
||||
if (urls.length + fileInput.files.length === 0) {
|
||||
createToast('Please submit at least one torrent', 'warning');
|
||||
return;
|
||||
}
|
||||
|
||||
if (urls.length + fileInput.files.length > 100) {
|
||||
createToast('Please submit up to 100 torrents at a time', 'warning');
|
||||
return;
|
||||
}
|
||||
|
||||
formData.append('arr', document.getElementById('category').value);
|
||||
formData.append('notSymlink', document.getElementById('isSymlink').checked);
|
||||
|
||||
const response = await fetch('/internal/add', {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
});
|
||||
|
||||
const result = await response.json();
|
||||
if (!response.ok) throw new Error(result.error || 'Unknown error');
|
||||
if (result.errors && result.errors.length > 0) {
|
||||
if (result.results.length > 0) {
|
||||
createToast(`Added ${result.results.length} torrents with ${result.errors.length} errors:\n${result.errors.join('\n')}`, 'warning');
|
||||
} else {
|
||||
createToast(`Failed to add torrents:\n${result.errors.join('\n')}`, 'error');
|
||||
}
|
||||
} else {
|
||||
createToast(`Successfully added ${result.results.length} torrents!`);
|
||||
document.getElementById('magnetURI').value = '';
|
||||
document.getElementById('torrentFiles').value = '';
|
||||
}
|
||||
} catch (error) {
|
||||
createToast(`Error adding downloads: ${error.message}`, 'error');
|
||||
} finally {
|
||||
submitBtn.disabled = false;
|
||||
submitBtn.innerHTML = originalText;
|
||||
}
|
||||
});
|
||||
|
||||
// Save the download options to local storage when they change
|
||||
document.getElementById('category').addEventListener('change', saveCurrentDownloadOptions);
|
||||
document.getElementById('isSymlink').addEventListener('change', saveCurrentDownloadOptions);
|
||||
|
||||
// Read the URL parameters for a magnet link and add it to the download queue if found
|
||||
const urlParams = new URLSearchParams(window.location.search);
|
||||
const magnetURI = urlParams.get('magnet');
|
||||
if (magnetURI) {
|
||||
document.getElementById('magnetURI').value = magnetURI;
|
||||
history.replaceState({}, document.title, window.location.pathname);
|
||||
}
|
||||
});
|
||||
</script>
|
||||
{{ end }}
|
||||
246
pkg/web/web/index.html
Normal file
246
pkg/web/web/index.html
Normal file
@@ -0,0 +1,246 @@
|
||||
{{ define "index" }}
|
||||
<div class="container mt-4">
|
||||
<div class="card">
|
||||
<div class="card-header d-flex justify-content-between align-items-center gap-4">
|
||||
<h4 class="mb-0 text-nowrap"><i class="bi bi-table me-2"></i>Active Torrents</h4>
|
||||
<div class="d-flex align-items-center overflow-auto" style="flex-wrap: nowrap; gap: 0.5rem;">
|
||||
<button class="btn btn-outline-danger btn-sm" id="batchDeleteBtn" style="display: none; flex-shrink: 0;">
|
||||
<i class="bi bi-trash me-1"></i>Delete Selected
|
||||
</button>
|
||||
<button class="btn btn-outline-secondary btn-sm me-2" id="refreshBtn" style="flex-shrink: 0;">
|
||||
<i class="bi bi-arrow-clockwise me-1"></i>Refresh
|
||||
</button>
|
||||
<select class="form-select form-select-sm d-inline-block w-auto me-2" id="stateFilter" style="flex-shrink: 0;">
|
||||
<option value="">All States</option>
|
||||
<option value="downloading">Downloading</option>
|
||||
<option value="pausedup">Paused</option>
|
||||
<option value="error">Error</option>
|
||||
</select>
|
||||
<select class="form-select form-select-sm d-inline-block w-auto" id="categoryFilter">
|
||||
<option value="">All Categories</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-body p-0">
|
||||
<div class="table-responsive">
|
||||
<table class="table table-hover mb-0">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>
|
||||
<input type="checkbox" class="form-check-input" id="selectAll">
|
||||
</th>
|
||||
<th>Name</th>
|
||||
<th>Size</th>
|
||||
<th>Progress</th>
|
||||
<th>Speed</th>
|
||||
<th>Category</th>
|
||||
<th>Debrid</th>
|
||||
<th>State</th>
|
||||
<th>Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="torrentsList">
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
let refs = {
|
||||
torrentsList: document.getElementById('torrentsList'),
|
||||
categoryFilter: document.getElementById('categoryFilter'),
|
||||
stateFilter: document.getElementById('stateFilter'),
|
||||
selectAll: document.getElementById('selectAll'),
|
||||
batchDeleteBtn: document.getElementById('batchDeleteBtn'),
|
||||
refreshBtn: document.getElementById('refreshBtn'),
|
||||
};
|
||||
let state = {
|
||||
torrents: [],
|
||||
selectedTorrents: new Set(),
|
||||
categories: new Set(),
|
||||
states: new Set('downloading', 'pausedup', 'error'),
|
||||
selectedCategory: refs.categoryFilter?.value || '',
|
||||
selectedState: refs.stateFilter?.value || '',
|
||||
};
|
||||
|
||||
const torrentRowTemplate = (torrent) => `
|
||||
<tr data-hash="${torrent.hash}">
|
||||
<td>
|
||||
<input type="checkbox" class="form-check-input torrent-select" data-hash="${torrent.hash}" ${state.selectedTorrents.has(torrent.hash) ? 'checked' : ''}>
|
||||
</td>
|
||||
<td class="text-nowrap text-truncate overflow-hidden" style="max-width: 350px;" title="${torrent.name}">${torrent.name}</td>
|
||||
<td class="text-nowrap">${formatBytes(torrent.size)}</td>
|
||||
<td style="min-width: 150px;">
|
||||
<div class="progress" style="height: 8px;">
|
||||
<div class="progress-bar" role="progressbar"
|
||||
style="width: ${(torrent.progress * 100).toFixed(1)}%"
|
||||
aria-valuenow="${(torrent.progress * 100).toFixed(1)}"
|
||||
aria-valuemin="0"
|
||||
aria-valuemax="100"></div>
|
||||
</div>
|
||||
<small class="text-muted">${(torrent.progress * 100).toFixed(1)}%</small>
|
||||
</td>
|
||||
<td>${formatSpeed(torrent.dlspeed)}</td>
|
||||
<td><span class="badge bg-secondary">${torrent.category || 'None'}</span></td>
|
||||
<td>${torrent.debrid || 'None'}</td>
|
||||
<td><span class="badge ${getStateColor(torrent.state)}">${torrent.state}</span></td>
|
||||
<td>
|
||||
<button class="btn btn-sm btn-outline-danger" onclick="deleteTorrent('${torrent.hash}', '${torrent.category}')">
|
||||
<i class="bi bi-trash"></i>
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
`;
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (!bytes) return '0 B';
|
||||
const k = 1024;
|
||||
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
||||
return `${parseFloat((bytes / Math.pow(k, i)).toFixed(2))} ${sizes[i]}`;
|
||||
}
|
||||
|
||||
function formatSpeed(speed) {
|
||||
return `${formatBytes(speed)}/s`;
|
||||
}
|
||||
|
||||
function getStateColor(state) {
|
||||
const stateColors = {
|
||||
'downloading': 'bg-primary',
|
||||
'pausedup': 'bg-success',
|
||||
'error': 'bg-danger',
|
||||
};
|
||||
return stateColors[state?.toLowerCase()] || 'bg-secondary';
|
||||
}
|
||||
|
||||
function updateUI() {
|
||||
// Filter torrents by selected category and state
|
||||
let filteredTorrents = state.torrents;
|
||||
if (state.selectedCategory) {
|
||||
filteredTorrents = filteredTorrents.filter(t => t.category === state.selectedCategory);
|
||||
}
|
||||
if (state.selectedState) {
|
||||
filteredTorrents = filteredTorrents.filter(t => t.state === state.selectedState);
|
||||
}
|
||||
|
||||
// Update the torrents list table
|
||||
refs.torrentsList.innerHTML = filteredTorrents.map(torrent => torrentRowTemplate(torrent)).join('');
|
||||
|
||||
// Update the category filter dropdown
|
||||
const currentCategories = Array.from(state.categories).sort();
|
||||
const categoryOptions = ['<option value="">All Categories</option>']
|
||||
.concat(currentCategories.map(cat =>
|
||||
`<option value="${cat}" ${cat === state.selectedCategory ? 'selected' : ''}>${cat}</option>`
|
||||
));
|
||||
refs.categoryFilter.innerHTML = categoryOptions.join('');
|
||||
|
||||
// Clean up selected torrents that no longer exist
|
||||
state.selectedTorrents = new Set(
|
||||
Array.from(state.selectedTorrents)
|
||||
.filter(hash => filteredTorrents.some(t => t.hash === hash))
|
||||
);
|
||||
|
||||
// Update batch delete button visibility
|
||||
refs.batchDeleteBtn.style.display = state.selectedTorrents.size > 0 ? '' : 'none';
|
||||
|
||||
// Update the select all checkbox state
|
||||
refs.selectAll.checked = filteredTorrents.length > 0 && filteredTorrents.every(torrent => state.selectedTorrents.has(torrent.hash));
|
||||
}
|
||||
|
||||
async function loadTorrents() {
|
||||
try {
|
||||
const response = await fetch('/internal/torrents');
|
||||
const torrents = await response.json();
|
||||
|
||||
state.torrents = torrents;
|
||||
state.categories = new Set(torrents.map(t => t.category).filter(Boolean));
|
||||
|
||||
updateUI();
|
||||
} catch (error) {
|
||||
console.error('Error loading torrents:', error);
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteTorrent(hash, category) {
|
||||
if (!confirm('Are you sure you want to delete this torrent?')) return;
|
||||
|
||||
try {
|
||||
await fetch(`/internal/torrents/${category}/${hash}`, {
|
||||
method: 'DELETE'
|
||||
});
|
||||
await loadTorrents();
|
||||
createToast('Torrent deleted successfully');
|
||||
} catch (error) {
|
||||
console.error('Error deleting torrent:', error);
|
||||
createToast('Failed to delete torrent', 'error');
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteSelectedTorrents() {
|
||||
if (!confirm(`Are you sure you want to delete ${state.selectedTorrents.size} selected torrents?`)) return;
|
||||
|
||||
try {
|
||||
// COmma separated list of hashes
|
||||
const hashes = Array.from(state.selectedTorrents).join(',');
|
||||
await fetch(`/internal/torrents/?hashes=${encodeURIComponent(hashes)}`, {
|
||||
method: 'DELETE'
|
||||
});
|
||||
await loadTorrents();
|
||||
createToast('Selected torrents deleted successfully');
|
||||
} catch (error) {
|
||||
console.error('Error deleting torrents:', error);
|
||||
createToast('Failed to delete some torrents' , 'error');
|
||||
}
|
||||
}
|
||||
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
loadTorrents();
|
||||
const refreshInterval = setInterval(loadTorrents, 5000);
|
||||
|
||||
refs.refreshBtn.addEventListener('click', loadTorrents);
|
||||
refs.batchDeleteBtn.addEventListener('click', deleteSelectedTorrents);
|
||||
|
||||
refs.selectAll.addEventListener('change', (e) => {
|
||||
const filteredTorrents = state.torrents.filter(t => {
|
||||
if (state.selectedCategory && t.category !== state.selectedCategory) return false;
|
||||
if (state.selectedState && t.state?.toLowerCase() !== state.selectedState.toLowerCase()) return false;
|
||||
return true;
|
||||
});
|
||||
|
||||
if (e.target.checked) {
|
||||
filteredTorrents.forEach(torrent => state.selectedTorrents.add(torrent.hash));
|
||||
} else {
|
||||
filteredTorrents.forEach(torrent => state.selectedTorrents.delete(torrent.hash));
|
||||
}
|
||||
updateUI();
|
||||
});
|
||||
|
||||
refs.torrentsList.addEventListener('change', (e) => {
|
||||
if (e.target.classList.contains('torrent-select')) {
|
||||
const hash = e.target.dataset.hash;
|
||||
if (e.target.checked) {
|
||||
state.selectedTorrents.add(hash);
|
||||
} else {
|
||||
state.selectedTorrents.delete(hash);
|
||||
}
|
||||
updateUI();
|
||||
}
|
||||
});
|
||||
|
||||
refs.categoryFilter.addEventListener('change', (e) => {
|
||||
state.selectedCategory = e.target.value;
|
||||
updateUI();
|
||||
});
|
||||
|
||||
refs.stateFilter.addEventListener('change', (e) => {
|
||||
state.selectedState = e.target.value;
|
||||
updateUI();
|
||||
});
|
||||
|
||||
window.addEventListener('beforeunload', () => {
|
||||
clearInterval(refreshInterval);
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{{ end }}
|
||||
196
pkg/web/web/layout.html
Normal file
196
pkg/web/web/layout.html
Normal file
@@ -0,0 +1,196 @@
|
||||
{{ define "layout" }}
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>DecyphArr - {{.Title}}</title>
|
||||
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css" rel="stylesheet">
|
||||
<link href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.7.2/font/bootstrap-icons.css" rel="stylesheet">
|
||||
<link href="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/css/select2.min.css" rel="stylesheet"/>
|
||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/select2-bootstrap-5-theme@1.3.0/dist/select2-bootstrap-5-theme.min.css"/>
|
||||
<style>
|
||||
:root {
|
||||
--primary-color: #2563eb;
|
||||
--secondary-color: #1e40af;
|
||||
}
|
||||
|
||||
body {
|
||||
background-color: #f8fafc;
|
||||
}
|
||||
|
||||
.navbar {
|
||||
padding: 1rem 0;
|
||||
background: #fff !important;
|
||||
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.navbar-brand {
|
||||
color: var(--primary-color) !important;
|
||||
font-weight: 700;
|
||||
font-size: 1.5rem;
|
||||
}
|
||||
|
||||
.card {
|
||||
border: none;
|
||||
border-radius: 10px;
|
||||
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.nav-link {
|
||||
padding: 0.5rem 1rem;
|
||||
color: #4b5563;
|
||||
}
|
||||
|
||||
.nav-link.active {
|
||||
color: var(--primary-color) !important;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.badge#channel-badge {
|
||||
background-color: #0d6efd;
|
||||
}
|
||||
|
||||
.badge#channel-badge.beta {
|
||||
background-color: #fd7e14;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="toast-container position-fixed bottom-0 end-0 p-3">
|
||||
<!-- Toast messages will be created dynamically here -->
|
||||
</div>
|
||||
<nav class="navbar navbar-expand-lg navbar-light mb-4">
|
||||
<div class="container">
|
||||
<a class="navbar-brand" href="/">
|
||||
<i class="bi bi-cloud-download me-2"></i>DecyphArr
|
||||
</a>
|
||||
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
<div class="collapse navbar-collapse" id="navbarNav">
|
||||
<ul class="navbar-nav me-auto">
|
||||
<li class="nav-item">
|
||||
<a class="nav-link {{if eq .Page "index"}}active{{end}}" href="/">
|
||||
<i class="bi bi-table me-1"></i>Torrents
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link {{if eq .Page "download"}}active{{end}}" href="/download">
|
||||
<i class="bi bi-cloud-download me-1"></i>Download
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link {{if eq .Page "repair"}}active{{end}}" href="/repair">
|
||||
<i class="bi bi-tools me-1"></i>Repair
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link {{if eq .Page "config"}}active{{end}}" href="/config">
|
||||
<i class="bi bi-gear me-1"></i>Config
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="/logs" target="_blank">
|
||||
<i class="bi bi-journal me-1"></i>Logs
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
<div class="d-flex align-items-center">
|
||||
<span class="badge me-2" id="channel-badge">Loading...</span>
|
||||
<span class="badge bg-primary" id="version-badge">Loading...</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
{{ if eq .Page "index" }}
|
||||
{{ template "index" . }}
|
||||
{{ else if eq .Page "download" }}
|
||||
{{ template "download" . }}
|
||||
{{ else if eq .Page "repair" }}
|
||||
{{ template "repair" . }}
|
||||
{{ else if eq .Page "config" }}
|
||||
{{ template "config" . }}
|
||||
{{ else if eq .Page "login" }}
|
||||
{{ template "login" . }}
|
||||
{{ else if eq .Page "setup" }}
|
||||
{{ template "setup" . }}
|
||||
{{ else }}
|
||||
{{ end }}
|
||||
|
||||
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.bundle.min.js"></script>
|
||||
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/js/select2.min.js"></script>
|
||||
<script>
|
||||
/**
|
||||
* Create a toast message
|
||||
* @param {string} message - The message to display
|
||||
* @param {string} [type='success'] - The type of toast (success, warning, error)
|
||||
*/
|
||||
const createToast = (message, type = 'success') => {
|
||||
type = ['success', 'warning', 'error'].includes(type) ? type : 'success';
|
||||
|
||||
const toastTimeouts = {
|
||||
success: 5000,
|
||||
warning: 10000,
|
||||
error: 15000
|
||||
}
|
||||
|
||||
const toastContainer = document.querySelector('.toast-container');
|
||||
const toastId = `toast-${Date.now()}`;
|
||||
|
||||
const toastHtml = `
|
||||
<div id="${toastId}" class="toast" role="alert" aria-live="assertive" aria-atomic="true">
|
||||
<div class="toast-header ${type === 'error' ? 'bg-danger text-white' : type === 'warning' ? 'bg-warning text-dark' : 'bg-success text-white'}">
|
||||
<strong class="me-auto">
|
||||
${type === 'error' ? 'Error' : type === 'warning' ? 'Warning' : 'Success'}
|
||||
</strong>
|
||||
<button type="button" class="btn-close ${type === 'warning' ? '' : 'btn-close-white'}" data-bs-dismiss="toast" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="toast-body">
|
||||
${message.replace(/\n/g, '<br>')}
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
|
||||
toastContainer.insertAdjacentHTML('beforeend', toastHtml);
|
||||
|
||||
const toastElement = document.getElementById(toastId);
|
||||
const toast = new bootstrap.Toast(toastElement, {
|
||||
autohide: true,
|
||||
delay: toastTimeouts[type]
|
||||
});
|
||||
|
||||
toast.show();
|
||||
|
||||
toastElement.addEventListener('hidden.bs.toast', () => {
|
||||
toastElement.remove();
|
||||
});
|
||||
};
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
fetch('/internal/version')
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
const versionBadge = document.getElementById('version-badge');
|
||||
const channelBadge = document.getElementById('channel-badge');
|
||||
|
||||
// Add url to version badge
|
||||
versionBadge.innerHTML = `<a href="https://github.com/sirrobot01/debrid-blackhole/releases/tag/${data.version}" target="_blank" class="text-white">${data.version}</a>`;
|
||||
channelBadge.textContent = data.channel.charAt(0).toUpperCase() + data.channel.slice(1);
|
||||
|
||||
if (data.channel === 'beta') {
|
||||
channelBadge.classList.add('beta');
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
console.error('Error fetching version:', error);
|
||||
document.getElementById('version-badge').textContent = 'Unknown';
|
||||
document.getElementById('channel-badge').textContent = 'Unknown';
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
{{ end }}
|
||||
131
pkg/web/web/login.html
Normal file
131
pkg/web/web/login.html
Normal file
@@ -0,0 +1,131 @@
|
||||
{{ define "login" }}
|
||||
<div class="container mt-5">
|
||||
<div class="row justify-content-center">
|
||||
<div class="col-md-6 col-lg-4">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h4 class="mb-0 text-center">Login</h4>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<form id="loginForm">
|
||||
<div class="mb-3">
|
||||
<label for="username" class="form-label">Username</label>
|
||||
<input type="text" class="form-control" id="username" name="username" required>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="password" class="form-label">Password</label>
|
||||
<input type="password" class="form-control" id="password" name="password" required>
|
||||
</div>
|
||||
<div class="d-grid">
|
||||
<button type="submit" class="btn btn-primary">Login</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
document.getElementById('loginForm').addEventListener('submit', async (e) => {
|
||||
e.preventDefault();
|
||||
|
||||
const formData = {
|
||||
username: document.getElementById('username').value,
|
||||
password: document.getElementById('password').value
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await fetch('/login', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify(formData)
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
window.location.href = '/';
|
||||
} else {
|
||||
createToast('Invalid credentials', 'error');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Login error:', error);
|
||||
createToast('Login failed', 'error');
|
||||
}
|
||||
});
|
||||
</script>
|
||||
{{ end }}
|
||||
|
||||
{{ define "setup" }}
|
||||
<div class="container mt-5">
|
||||
<div class="row justify-content-center">
|
||||
<div class="col-md-6 col-lg-4">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h4 class="mb-0 text-center">First Time Setup</h4>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<form id="setupForm">
|
||||
<div class="mb-3">
|
||||
<label for="username" class="form-label">Choose Username</label>
|
||||
<input type="text" class="form-control" id="username" name="username" required>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="password" class="form-label">Choose Password</label>
|
||||
<input type="password" class="form-control" id="password" name="password" required>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="confirmPassword" class="form-label">Confirm Password</label>
|
||||
<input type="password" class="form-control" id="confirmPassword" name="confirmPassword" required>
|
||||
</div>
|
||||
<div class="d-grid">
|
||||
<button type="submit" class="btn btn-primary">Set Credentials</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
document.getElementById('setupForm').addEventListener('submit', async (e) => {
|
||||
e.preventDefault();
|
||||
|
||||
const password = document.getElementById('password').value;
|
||||
const confirmPassword = document.getElementById('confirmPassword').value;
|
||||
|
||||
if (password !== confirmPassword) {
|
||||
createToast('Passwords do not match', 'error');
|
||||
return;
|
||||
}
|
||||
|
||||
const formData = {
|
||||
username: document.getElementById('username').value,
|
||||
password: password,
|
||||
confirmPassword: confirmPassword
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await fetch('/setup', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify(formData)
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
window.location.href = '/';
|
||||
} else {
|
||||
const error = await response.text();
|
||||
createToast(error, 'error');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Setup error:', error);
|
||||
createToast('Setup failed', 'error');
|
||||
}
|
||||
});
|
||||
</script>
|
||||
{{ end }}
|
||||
94
pkg/web/web/repair.html
Normal file
94
pkg/web/web/repair.html
Normal file
@@ -0,0 +1,94 @@
|
||||
{{ define "repair" }}
|
||||
<div class="container mt-4">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h4 class="mb-0"><i class="bi bi-tools me-2"></i>Repair Media</h4>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<form id="repairForm">
|
||||
<div class="mb-3">
|
||||
<label for="arrSelect" class="form-label">Select Arr Instance</label>
|
||||
<select class="form-select" id="arrSelect" required>
|
||||
<option value="">Select an Arr instance</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="mb-3">
|
||||
<label for="mediaIds" class="form-label">Media IDs</label>
|
||||
<input type="text" class="form-control" id="mediaIds"
|
||||
placeholder="Enter IDs (comma-separated)">
|
||||
<small class="text-muted">Enter TV DB ids for Sonarr, TM DB ids for Radarr</small>
|
||||
</div>
|
||||
|
||||
<div class="mb-3">
|
||||
<div class="form-check">
|
||||
<input class="form-check-input" type="checkbox" id="isAsync" checked>
|
||||
<label class="form-check-label" for="isAsync">
|
||||
Run repair in background
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<button type="submit" class="btn btn-primary" id="submitRepair">
|
||||
<i class="bi bi-wrench me-2"></i>Start Repair
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
// Load Arr instances
|
||||
fetch('/internal/arrs')
|
||||
.then(response => response.json())
|
||||
.then(arrs => {
|
||||
const select = document.getElementById('arrSelect');
|
||||
arrs.forEach(arr => {
|
||||
const option = document.createElement('option');
|
||||
option.value = arr.name;
|
||||
option.textContent = arr.name;
|
||||
select.appendChild(option);
|
||||
});
|
||||
});
|
||||
|
||||
// Handle form submission
|
||||
document.getElementById('repairForm').addEventListener('submit', async (e) => {
|
||||
e.preventDefault();
|
||||
const submitBtn = document.getElementById('submitRepair');
|
||||
const originalText = submitBtn.innerHTML;
|
||||
|
||||
submitBtn.disabled = true;
|
||||
submitBtn.innerHTML = '<span class="spinner-border spinner-border-sm me-2"></span>Repairing...';
|
||||
let mediaIds = document.getElementById('mediaIds').value.split(',').map(id => id.trim());
|
||||
let arr = document.getElementById('arrSelect').value;
|
||||
if (!arr) {
|
||||
createToast('Please select an Arr instance', 'warning');
|
||||
submitBtn.disabled = false;
|
||||
submitBtn.innerHTML = originalText;
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const response = await fetch('/internal/repair', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
arr: document.getElementById('arrSelect').value,
|
||||
mediaIds: mediaIds,
|
||||
async: document.getElementById('isAsync').checked
|
||||
})
|
||||
});
|
||||
|
||||
if (!response.ok) throw new Error(await response.text());
|
||||
createToast('Repair process initiated successfully!');
|
||||
} catch (error) {
|
||||
createToast(`Error starting repair: ${error.message}`, 'error');
|
||||
} finally {
|
||||
submitBtn.disabled = false;
|
||||
submitBtn.innerHTML = originalText;
|
||||
}
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{{ end }}
|
||||
32
pkg/web/web/setup.html
Normal file
32
pkg/web/web/setup.html
Normal file
@@ -0,0 +1,32 @@
|
||||
{{ define "setup" }}
|
||||
<div class="container mt-5">
|
||||
<div class="row justify-content-center">
|
||||
<div class="col-md-6 col-lg-4">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h4 class="mb-0 text-center">First Time Setup</h4>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<form id="setupForm" method="POST" action="/setup">
|
||||
<div class="mb-3">
|
||||
<label for="username" class="form-label">Choose Username</label>
|
||||
<input type="text" class="form-control" id="username" name="username" required>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="password" class="form-label">Choose Password</label>
|
||||
<input type="password" class="form-control" id="password" name="password" required>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="confirmPassword" class="form-label">Confirm Password</label>
|
||||
<input type="password" class="form-control" id="confirmPassword" name="confirmPassword" required>
|
||||
</div>
|
||||
<div class="d-grid">
|
||||
<button type="submit" class="btn btn-primary">Set Credentials</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
160
pkg/webdav/file.go
Normal file
160
pkg/webdav/file.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
type File struct {
|
||||
cache *cache.Cache
|
||||
cachedTorrent *cache.CachedTorrent
|
||||
file *torrent.File
|
||||
offset int64
|
||||
isDir bool
|
||||
children []os.FileInfo
|
||||
reader io.ReadCloser
|
||||
}
|
||||
|
||||
// File interface implementations for File
|
||||
|
||||
func (f *File) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) GetDownloadLink() string {
|
||||
file := f.file
|
||||
link, err := f.cache.GetFileDownloadLink(f.cachedTorrent, file)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return link
|
||||
}
|
||||
|
||||
func (f *File) Read(p []byte) (n int, err error) {
|
||||
// Directories cannot be read as a byte stream.
|
||||
if f.isDir {
|
||||
return 0, os.ErrInvalid
|
||||
}
|
||||
|
||||
// If we haven't started streaming the file yet, open the HTTP connection.
|
||||
if f.reader == nil {
|
||||
// Create an HTTP GET request to the file's URL.
|
||||
req, err := http.NewRequest("GET", f.GetDownloadLink(), nil)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
// If we've already read some data (f.offset > 0), request only the remaining bytes.
|
||||
if f.offset > 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset))
|
||||
}
|
||||
|
||||
// Execute the HTTP request.
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("HTTP request error: %w", err)
|
||||
}
|
||||
|
||||
// Accept a 200 (OK) or 206 (Partial Content) status.
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||
resp.Body.Close()
|
||||
return 0, fmt.Errorf("unexpected HTTP status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Store the response body as our reader.
|
||||
f.reader = resp.Body
|
||||
}
|
||||
|
||||
// Read data from the HTTP stream.
|
||||
n, err = f.reader.Read(p)
|
||||
f.offset += int64(n)
|
||||
|
||||
// When we reach the end of the stream, close the reader.
|
||||
if err == io.EOF {
|
||||
f.reader.Close()
|
||||
f.reader = nil
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *File) Seek(offset int64, whence int) (int64, error) {
|
||||
if f.isDir {
|
||||
return 0, os.ErrInvalid
|
||||
}
|
||||
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
f.offset = offset
|
||||
case io.SeekCurrent:
|
||||
f.offset += offset
|
||||
case io.SeekEnd:
|
||||
f.offset = f.file.Size - offset
|
||||
default:
|
||||
return 0, os.ErrInvalid
|
||||
}
|
||||
|
||||
if f.offset < 0 {
|
||||
f.offset = 0
|
||||
}
|
||||
if f.offset > f.file.Size {
|
||||
f.offset = f.file.Size
|
||||
}
|
||||
|
||||
return f.offset, nil
|
||||
}
|
||||
|
||||
func (f *File) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if !f.isDir {
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
|
||||
if count <= 0 {
|
||||
return f.children, nil
|
||||
}
|
||||
|
||||
if len(f.children) == 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
if count > len(f.children) {
|
||||
count = len(f.children)
|
||||
}
|
||||
|
||||
files := f.children[:count]
|
||||
f.children = f.children[count:]
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (f *File) Stat() (os.FileInfo, error) {
|
||||
if f.isDir {
|
||||
name := "/"
|
||||
if f.cachedTorrent != nil {
|
||||
name = f.cachedTorrent.Name
|
||||
}
|
||||
return &FileInfo{
|
||||
name: name,
|
||||
size: 0,
|
||||
mode: 0755 | os.ModeDir,
|
||||
modTime: time.Now(),
|
||||
isDir: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &FileInfo{
|
||||
name: f.file.Name,
|
||||
size: f.file.Size,
|
||||
mode: 0644,
|
||||
modTime: time.Now(),
|
||||
isDir: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *File) Write(p []byte) (n int, err error) {
|
||||
return 0, os.ErrPermission
|
||||
}
|
||||
22
pkg/webdav/file_info.go
Normal file
22
pkg/webdav/file_info.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileInfo implements os.FileInfo for our WebDAV files
|
||||
type FileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
}
|
||||
|
||||
func (fi *FileInfo) Name() string { return fi.name }
|
||||
func (fi *FileInfo) Size() int64 { return fi.size }
|
||||
func (fi *FileInfo) Mode() os.FileMode { return fi.mode }
|
||||
func (fi *FileInfo) ModTime() time.Time { return fi.modTime }
|
||||
func (fi *FileInfo) IsDir() bool { return fi.isDir }
|
||||
func (fi *FileInfo) Sys() interface{} { return nil }
|
||||
263
pkg/webdav/handler.go
Normal file
263
pkg/webdav/handler.go
Normal file
@@ -0,0 +1,263 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
||||
"golang.org/x/net/webdav"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
Name string
|
||||
logger zerolog.Logger
|
||||
cache *cache.Cache
|
||||
rootListing atomic.Value
|
||||
lastRefresh time.Time
|
||||
refreshMutex sync.Mutex
|
||||
RootPath string
|
||||
}
|
||||
|
||||
func NewHandler(name string, cache *cache.Cache, logger zerolog.Logger) *Handler {
|
||||
h := &Handler{
|
||||
Name: name,
|
||||
cache: cache,
|
||||
logger: logger,
|
||||
RootPath: fmt.Sprintf("/%s", name),
|
||||
}
|
||||
|
||||
h.refreshRootListing()
|
||||
|
||||
// Start background refresh
|
||||
go h.backgroundRefresh()
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *Handler) backgroundRefresh() {
|
||||
ticker := time.NewTicker(5 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
h.refreshRootListing()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) refreshRootListing() {
|
||||
h.refreshMutex.Lock()
|
||||
defer h.refreshMutex.Unlock()
|
||||
|
||||
if time.Since(h.lastRefresh) < time.Minute {
|
||||
return
|
||||
}
|
||||
|
||||
var files []os.FileInfo
|
||||
h.cache.GetTorrents().Range(func(key, value interface{}) bool {
|
||||
cachedTorrent := value.(*cache.CachedTorrent)
|
||||
if cachedTorrent != nil && cachedTorrent.Torrent != nil {
|
||||
files = append(files, &FileInfo{
|
||||
name: cachedTorrent.Torrent.Name,
|
||||
size: 0,
|
||||
mode: 0755 | os.ModeDir,
|
||||
modTime: time.Now(),
|
||||
isDir: true,
|
||||
})
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
h.rootListing.Store(files)
|
||||
h.lastRefresh = time.Now()
|
||||
}
|
||||
|
||||
func (h *Handler) getParentRootPath() string {
|
||||
return fmt.Sprintf("/webdav/%s", h.Name)
|
||||
}
|
||||
|
||||
func (h *Handler) getRootFileInfos() []os.FileInfo {
|
||||
if listing := h.rootListing.Load(); listing != nil {
|
||||
return listing.([]os.FileInfo)
|
||||
}
|
||||
return []os.FileInfo{}
|
||||
}
|
||||
|
||||
// Mkdir implements webdav.FileSystem
|
||||
func (h *Handler) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
|
||||
return os.ErrPermission // Read-only filesystem
|
||||
}
|
||||
|
||||
// RemoveAll implements webdav.FileSystem
|
||||
func (h *Handler) RemoveAll(ctx context.Context, name string) error {
|
||||
return os.ErrPermission // Read-only filesystem
|
||||
}
|
||||
|
||||
// Rename implements webdav.FileSystem
|
||||
func (h *Handler) Rename(ctx context.Context, oldName, newName string) error {
|
||||
return os.ErrPermission // Read-only filesystem
|
||||
}
|
||||
|
||||
func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
|
||||
name = path.Clean("/" + name)
|
||||
|
||||
// Fast path for root directory
|
||||
if name == h.getParentRootPath() {
|
||||
return &File{
|
||||
cache: h.cache,
|
||||
isDir: true,
|
||||
children: h.getRootFileInfos(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Remove root directory from path
|
||||
name = strings.TrimPrefix(name, h.getParentRootPath())
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
parts := strings.SplitN(name, "/", 2)
|
||||
|
||||
// Get torrent from cache using sync.Map
|
||||
cachedTorrent := h.cache.GetTorrentByName(parts[0])
|
||||
if cachedTorrent == nil {
|
||||
h.logger.Debug().Msgf("Torrent not found: %s", parts[0])
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
if len(parts) == 1 {
|
||||
return &File{
|
||||
cache: h.cache,
|
||||
cachedTorrent: cachedTorrent,
|
||||
isDir: true,
|
||||
children: h.getTorrentFileInfos(cachedTorrent.Torrent),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Use a map for faster file lookup
|
||||
fileMap := make(map[string]*torrent.File, len(cachedTorrent.Torrent.Files))
|
||||
for i := range cachedTorrent.Torrent.Files {
|
||||
fileMap[cachedTorrent.Torrent.Files[i].Name] = &cachedTorrent.Torrent.Files[i]
|
||||
}
|
||||
|
||||
if file, ok := fileMap[parts[1]]; ok {
|
||||
return &File{
|
||||
cache: h.cache,
|
||||
cachedTorrent: cachedTorrent,
|
||||
file: file,
|
||||
isDir: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
h.logger.Debug().Msgf("File not found: %s", name)
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
// Stat implements webdav.FileSystem
|
||||
func (h *Handler) Stat(ctx context.Context, name string) (os.FileInfo, error) {
|
||||
f, err := h.OpenFile(ctx, name, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.Stat()
|
||||
}
|
||||
|
||||
func (h *Handler) getTorrentFileInfos(torrent *torrent.Torrent) []os.FileInfo {
|
||||
files := make([]os.FileInfo, 0, len(torrent.Files))
|
||||
for _, file := range torrent.Files {
|
||||
files = append(files, &FileInfo{
|
||||
name: file.Name,
|
||||
size: file.Size,
|
||||
mode: 0644,
|
||||
modTime: time.Now(),
|
||||
isDir: false,
|
||||
})
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Handle OPTIONS
|
||||
if r.Method == "OPTIONS" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
// Create WebDAV handler
|
||||
handler := &webdav.Handler{
|
||||
FileSystem: h,
|
||||
LockSystem: webdav.NewMemLS(),
|
||||
Logger: func(r *http.Request, err error) {
|
||||
if err != nil {
|
||||
h.logger.Error().
|
||||
Err(err).
|
||||
Str("method", r.Method).
|
||||
Str("path", r.URL.Path).
|
||||
Msg("WebDAV error")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// Special handling for GET requests on directories
|
||||
if r.Method == "GET" {
|
||||
if f, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0); err == nil {
|
||||
if fi, err := f.Stat(); err == nil && fi.IsDir() {
|
||||
h.serveDirectory(w, r, f)
|
||||
return
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file webdav.File) {
|
||||
var children []os.FileInfo
|
||||
if f, ok := file.(*File); ok {
|
||||
children = f.children
|
||||
} else {
|
||||
var err error
|
||||
children, err = file.Readdir(-1)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to list directory", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Clean and prepare the path
|
||||
cleanPath := path.Clean(r.URL.Path)
|
||||
parentPath := path.Dir(cleanPath)
|
||||
showParent := cleanPath != "/" && parentPath != "." && parentPath != cleanPath
|
||||
|
||||
// Prepare template data
|
||||
data := struct {
|
||||
Path string
|
||||
ParentPath string
|
||||
ShowParent bool
|
||||
Children []os.FileInfo
|
||||
}{
|
||||
Path: cleanPath,
|
||||
ParentPath: parentPath,
|
||||
ShowParent: showParent,
|
||||
Children: children,
|
||||
}
|
||||
|
||||
// Parse and execute template
|
||||
tmpl, err := template.New("directory").Parse(directoryTemplate)
|
||||
if err != nil {
|
||||
h.logger.Error().Err(err).Msg("Failed to parse directory template")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
if err := tmpl.Execute(w, data); err != nil {
|
||||
h.logger.Error().Err(err).Msg("Failed to execute directory template")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
112
pkg/webdav/templates.go
Normal file
112
pkg/webdav/templates.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package webdav
|
||||
|
||||
const rootTemplate = `
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>WebDAV Shares</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
margin: 0 auto;
|
||||
padding: 20px;
|
||||
}
|
||||
h1 {
|
||||
color: #2c3e50;
|
||||
}
|
||||
ul {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
}
|
||||
li {
|
||||
margin: 10px 0;
|
||||
}
|
||||
a {
|
||||
color: #3498db;
|
||||
text-decoration: none;
|
||||
padding: 10px;
|
||||
display: block;
|
||||
border: 1px solid #eee;
|
||||
border-radius: 4px;
|
||||
}
|
||||
a:hover {
|
||||
background-color: #f7f9fa;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Available WebDAV Shares</h1>
|
||||
<ul>
|
||||
{{range .Handlers}}
|
||||
<li><a href="{{$.Prefix}}{{.RootPath}}">{{$.Prefix}}{{.RootPath}}</a></li>
|
||||
{{end}}
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
|
||||
const directoryTemplate = `
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Index of {{.Path}}</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
margin: 0 auto;
|
||||
padding: 20px;
|
||||
}
|
||||
h1 {
|
||||
color: #2c3e50;
|
||||
}
|
||||
ul {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
}
|
||||
li {
|
||||
margin: 10px 0;
|
||||
}
|
||||
a {
|
||||
color: #3498db;
|
||||
text-decoration: none;
|
||||
padding: 10px;
|
||||
display: block;
|
||||
border: 1px solid #eee;
|
||||
border-radius: 4px;
|
||||
}
|
||||
a:hover {
|
||||
background-color: #f7f9fa;
|
||||
}
|
||||
.file-info {
|
||||
color: #666;
|
||||
font-size: 0.9em;
|
||||
float: right;
|
||||
}
|
||||
.parent-dir {
|
||||
background-color: #f8f9fa;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Index of {{.Path}}</h1>
|
||||
<ul>
|
||||
{{if .ShowParent}}
|
||||
<li><a href="{{.ParentPath}}" class="parent-dir">Parent Directory</a></li>
|
||||
{{end}}
|
||||
{{range .Children}}
|
||||
<li>
|
||||
<a href="{{$.Path}}/{{.Name}}">
|
||||
{{.Name}}{{if .IsDir}}/{{end}}
|
||||
<span class="file-info">
|
||||
{{if not .IsDir}}
|
||||
{{.Size}} bytes
|
||||
{{end}}
|
||||
{{.ModTime.Format "2006-01-02 15:04:05"}}
|
||||
</span>
|
||||
</a>
|
||||
</li>
|
||||
{{end}}
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
126
pkg/webdav/webdav.go
Normal file
126
pkg/webdav/webdav.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WebDav struct {
|
||||
Handlers []*Handler
|
||||
}
|
||||
|
||||
func New() *WebDav {
|
||||
//svc := service.GetService()
|
||||
//cfg := config.GetConfig()
|
||||
w := &WebDav{
|
||||
Handlers: make([]*Handler, 0),
|
||||
}
|
||||
//for name, c := range svc.DebridCache.GetCaches() {
|
||||
// h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name), cfg.LogLevel, os.Stdout))
|
||||
// w.Handlers = append(w.Handlers, h)
|
||||
//}
|
||||
return w
|
||||
}
|
||||
|
||||
func (wd *WebDav) Routes() http.Handler {
|
||||
chi.RegisterMethod("PROPFIND")
|
||||
chi.RegisterMethod("PROPPATCH")
|
||||
chi.RegisterMethod("MKCOL") // Note: it was "MKOL" in your example, should be "MKCOL"
|
||||
chi.RegisterMethod("COPY")
|
||||
chi.RegisterMethod("MOVE")
|
||||
chi.RegisterMethod("LOCK")
|
||||
chi.RegisterMethod("UNLOCK")
|
||||
wr := chi.NewRouter()
|
||||
wr.Use(wd.commonMiddleware)
|
||||
|
||||
wd.setupRootHandler(wr)
|
||||
wd.mountHandlers(wr)
|
||||
|
||||
return wr
|
||||
}
|
||||
|
||||
func (wd *WebDav) Start(ctx context.Context) error {
|
||||
wg := sync.WaitGroup{}
|
||||
errChan := make(chan error, len(wd.Handlers))
|
||||
|
||||
for _, h := range wd.Handlers {
|
||||
wg.Add(1)
|
||||
go func(h *Handler) {
|
||||
defer wg.Done()
|
||||
if err := h.cache.Start(); err != nil {
|
||||
select {
|
||||
case errChan <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(h)
|
||||
}
|
||||
|
||||
// Use a separate goroutine to close channel after WaitGroup
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
// Collect all errors
|
||||
var errors []error
|
||||
for err := range errChan {
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("multiple handlers failed: %v", errors)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wd *WebDav) mountHandlers(r chi.Router) {
|
||||
for _, h := range wd.Handlers {
|
||||
r.Mount(h.RootPath, h)
|
||||
}
|
||||
}
|
||||
|
||||
func (wd *WebDav) setupRootHandler(r chi.Router) {
|
||||
r.Get("/", wd.handleRoot())
|
||||
}
|
||||
|
||||
func (wd *WebDav) commonMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("DAV", "1, 2")
|
||||
w.Header().Set("Allow", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Depth, Content-Type, Authorization")
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func (wd *WebDav) handleRoot() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
|
||||
tmpl, err := template.New("root").Parse(rootTemplate)
|
||||
if err != nil {
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
data := struct {
|
||||
Handlers []*Handler
|
||||
Prefix string
|
||||
}{
|
||||
Handlers: wd.Handlers,
|
||||
Prefix: "/webdav",
|
||||
}
|
||||
if err := tmpl.Execute(w, data); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
108
pkg/worker/worker.go
Normal file
108
pkg/worker/worker.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
_logInstance zerolog.Logger
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
func getLogger() zerolog.Logger {
|
||||
|
||||
once.Do(func() {
|
||||
cfg := config.GetConfig()
|
||||
_logInstance = logger.NewLogger("worker", cfg.LogLevel, os.Stdout)
|
||||
})
|
||||
return _logInstance
|
||||
}
|
||||
|
||||
func Start(ctx context.Context) error {
|
||||
cfg := config.GetConfig()
|
||||
// Start Arr Refresh Worker
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
cleanUpQueuesWorker(ctx, cfg)
|
||||
}()
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
//func arrRefreshWorker(ctx context.Context, cfg *config.Config) {
|
||||
// // Start Arr Refresh Worker
|
||||
// _logger := getLogger()
|
||||
// _logger.Debug().Msg("Refresh Worker started")
|
||||
// refreshCtx := context.WithValue(ctx, "worker", "refresh")
|
||||
// refreshTicker := time.NewTicker(time.Duration(cfg.QBitTorrent.RefreshInterval) * time.Second)
|
||||
//
|
||||
// for {
|
||||
// select {
|
||||
// case <-refreshCtx.Done():
|
||||
// _logger.Debug().Msg("Refresh Worker stopped")
|
||||
// return
|
||||
// case <-refreshTicker.C:
|
||||
// refreshArrs()
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
func cleanUpQueuesWorker(ctx context.Context, cfg *config.Config) {
|
||||
// Start Clean up Queues Worker
|
||||
_logger := getLogger()
|
||||
_logger.Debug().Msg("Clean up Queues Worker started")
|
||||
cleanupCtx := context.WithValue(ctx, "worker", "cleanup")
|
||||
cleanupTicker := time.NewTicker(time.Duration(10) * time.Second)
|
||||
|
||||
var cleanupMutex sync.Mutex
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-cleanupCtx.Done():
|
||||
_logger.Debug().Msg("Clean up Queues Worker stopped")
|
||||
return
|
||||
case <-cleanupTicker.C:
|
||||
if cleanupMutex.TryLock() {
|
||||
go func() {
|
||||
defer cleanupMutex.Unlock()
|
||||
cleanUpQueues()
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//func refreshArrs() {
|
||||
// for _, a := range service.GetService().Arr.GetAll() {
|
||||
// err := a.Refresh()
|
||||
// if err != nil {
|
||||
// _logger := getLogger()
|
||||
// _logger.Debug().Err(err).Msg("Error refreshing arr")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
func cleanUpQueues() {
|
||||
// Clean up queues
|
||||
_logger := getLogger()
|
||||
for _, a := range service.GetService().Arr.GetAll() {
|
||||
if !a.Cleanup {
|
||||
continue
|
||||
}
|
||||
_logger.Debug().Msgf("Cleaning up queue for %s", a.Name)
|
||||
if err := a.CleanupQueue(); err != nil {
|
||||
_logger.Debug().Err(err).Msg("Error cleaning up queue")
|
||||
}
|
||||
}
|
||||
}
|
||||
57
scripts/deploy.sh
Executable file
57
scripts/deploy.sh
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
|
||||
# deploy.sh
|
||||
|
||||
# Function to display usage
|
||||
usage() {
|
||||
echo "Usage: $0 [-b|--beta] <version>"
|
||||
echo "Example for main: $0 v1.0.0"
|
||||
echo "Example for beta: $0 -b v1.0.0"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
BETA=false
|
||||
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case $1 in
|
||||
-b|--beta) BETA=true; shift ;;
|
||||
-*) echo "Unknown parameter: $1"; usage ;;
|
||||
*) VERSION="$1"; shift ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check if version is provided
|
||||
if [ -z "$VERSION" ]; then
|
||||
echo "Error: Version is required"
|
||||
usage
|
||||
fi
|
||||
|
||||
# Validate version format
|
||||
if ! [[ $VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Error: Version must be in format v1.0.0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set tag based on branch
|
||||
if [ "$BETA" = true ]; then
|
||||
TAG="$VERSION-beta"
|
||||
BRANCH="beta"
|
||||
else
|
||||
TAG="$VERSION"
|
||||
BRANCH="main"
|
||||
fi
|
||||
|
||||
echo "Deploying version $VERSION to $BRANCH branch..."
|
||||
|
||||
# Ensure we're on the right branch
|
||||
git checkout $BRANCH || exit 1
|
||||
|
||||
# Create and push tag
|
||||
echo "Creating tag $TAG..."
|
||||
git tag "$TAG" || exit 1
|
||||
git push origin "$TAG" || exit 1
|
||||
|
||||
echo "Deployment initiated successfully!"
|
||||
echo "GitHub Actions will handle the release process."
|
||||
echo "Check the progress at: https://github.com/sirrobot01/debrid-blackhole/actions"
|
||||
Reference in New Issue
Block a user