Compare commits
60 Commits
v0.4.1
...
experiment
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
39945616f3 | ||
|
|
8029cd3840 | ||
|
|
19b8664146 | ||
|
|
8ea128446c | ||
|
|
391900e93d | ||
|
|
5987028f05 | ||
|
|
7492f629f9 | ||
|
|
101ae4197e | ||
|
|
a357897222 | ||
|
|
92177b150b | ||
|
|
9011420ac3 | ||
|
|
4b5e18df94 | ||
|
|
4659cd4273 | ||
|
|
7d954052ae | ||
|
|
8bf164451c | ||
|
|
5792305a66 | ||
|
|
f9addaed36 | ||
|
|
face86e151 | ||
|
|
cf28f42db4 | ||
|
|
dc2301eb98 | ||
|
|
f9bc7ad914 | ||
|
|
4ae5de99e8 | ||
|
|
d49fbea60f | ||
|
|
7bd38736b1 | ||
|
|
56bca562f4 | ||
|
|
9469c98df7 | ||
|
|
8c13da5d30 | ||
|
|
e2f792d5ab | ||
|
|
49875446b4 | ||
|
|
738474be16 | ||
|
|
d10b679584 | ||
|
|
f93d489956 | ||
|
|
8d494fc277 | ||
|
|
0c68364a6a | ||
|
|
50c775ca74 | ||
|
|
0d178992ef | ||
|
|
5d2fabe20b | ||
|
|
fa469c64c6 | ||
|
|
26f6f384a3 | ||
|
|
b91aa1db38 | ||
|
|
e2ff3b26de | ||
|
|
2d29996d2c | ||
|
|
b4e4db27fb | ||
|
|
c0589d4ad2 | ||
|
|
a30861984c | ||
|
|
4f92b135d4 | ||
|
|
2b2a682218 | ||
|
|
a83f3d72ce | ||
|
|
1c06407900 | ||
|
|
b1a3d8b762 | ||
|
|
0e25de0e3c | ||
|
|
e741a0e32b | ||
|
|
84bd93805f | ||
|
|
fce2ce28c7 | ||
|
|
302a461efd | ||
|
|
7eb021aac1 | ||
|
|
7a989ccf2b | ||
|
|
f04d7ac86e | ||
|
|
65fb2d1e7c | ||
|
|
46beac7227 |
@@ -5,7 +5,7 @@ tmp_dir = "tmp"
|
|||||||
[build]
|
[build]
|
||||||
args_bin = ["--config", "data/"]
|
args_bin = ["--config", "data/"]
|
||||||
bin = "./tmp/main"
|
bin = "./tmp/main"
|
||||||
cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/debrid-blackhole/pkg/version.Version=0.0.4 -X github.com/sirrobot01/debrid-blackhole/pkg/version.Channel=beta\" -o ./tmp/main .'"
|
cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/decypharr/pkg/version.Version=0.0.0 -X github.com/sirrobot01/decypharr/pkg/version.Channel=dev\" -o ./tmp/main .'"
|
||||||
delay = 1000
|
delay = 1000
|
||||||
exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"]
|
exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"]
|
||||||
exclude_file = []
|
exclude_file = []
|
||||||
|
|||||||
@@ -9,3 +9,4 @@ docker-compose.yml
|
|||||||
torrents.json
|
torrents.json
|
||||||
**/dist/
|
**/dist/
|
||||||
*.json
|
*.json
|
||||||
|
.ven/**
|
||||||
|
|||||||
85
.github/workflows/beta-docker.yml
vendored
Normal file
85
.github/workflows/beta-docker.yml
vendored
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
name: Beta Docker Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- beta
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Calculate beta version
|
||||||
|
id: calculate_version
|
||||||
|
run: |
|
||||||
|
LATEST_TAG=$(git tag | grep -v 'beta' | sort -V | tail -n1)
|
||||||
|
echo "Found latest tag: ${LATEST_TAG}"
|
||||||
|
|
||||||
|
IFS='.' read -r -a VERSION_PARTS <<< "$LATEST_TAG"
|
||||||
|
MAJOR="${VERSION_PARTS[0]}"
|
||||||
|
MINOR="${VERSION_PARTS[1]}"
|
||||||
|
PATCH="${VERSION_PARTS[2]}"
|
||||||
|
|
||||||
|
NEW_PATCH=$((PATCH + 1))
|
||||||
|
BETA_VERSION="${MAJOR}.${MINOR}.${NEW_PATCH}"
|
||||||
|
|
||||||
|
echo "Calculated beta version: ${BETA_VERSION}"
|
||||||
|
echo "beta_version=${BETA_VERSION}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Cache Docker layers
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache
|
||||||
|
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-
|
||||||
|
|
||||||
|
# Login to Docker Hub
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
# Login to GitHub Container Registry
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push beta Docker image
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
cy01/blackhole:beta
|
||||||
|
ghcr.io/${{ github.repository_owner }}/decypharr:beta
|
||||||
|
cache-from: type=local,src=/tmp/.buildx-cache
|
||||||
|
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||||
|
build-args: |
|
||||||
|
VERSION=${{ env.beta_version }}
|
||||||
|
CHANNEL=beta
|
||||||
|
|
||||||
|
- name: Move cache
|
||||||
|
run: |
|
||||||
|
rm -rf /tmp/.buildx-cache
|
||||||
|
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||||
29
.github/workflows/deploy-docs.yml
vendored
Normal file
29
.github/workflows/deploy-docs.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name: ci
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- beta
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Configure Git Credentials
|
||||||
|
run: |
|
||||||
|
git config user.name github-actions[bot]
|
||||||
|
git config user.email 41898282+github-actions[bot]@users.noreply.github.com
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: 3.x
|
||||||
|
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
key: mkdocs-material-${{ env.cache_id }}
|
||||||
|
path: .cache
|
||||||
|
restore-keys: |
|
||||||
|
mkdocs-material-
|
||||||
|
- run: pip install mkdocs-material
|
||||||
|
- run: cd docs && mkdocs gh-deploy --force
|
||||||
69
.github/workflows/docker.yml
vendored
69
.github/workflows/docker.yml
vendored
@@ -1,69 +0,0 @@
|
|||||||
name: Docker Build and Push
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
- beta
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docker:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Get version
|
|
||||||
id: get_version
|
|
||||||
run: |
|
|
||||||
LATEST_TAG=$(git tag | sort -V | tail -n1)
|
|
||||||
echo "latest_tag=${LATEST_TAG}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Set channel
|
|
||||||
id: set_channel
|
|
||||||
run: |
|
|
||||||
if [[ ${{ github.ref }} == 'refs/heads/beta' ]]; then
|
|
||||||
echo "CHANNEL=beta" >> $GITHUB_ENV
|
|
||||||
else
|
|
||||||
echo "CHANNEL=stable" >> $GITHUB_ENV
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and push for beta branch
|
|
||||||
if: github.ref == 'refs/heads/beta'
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
|
||||||
push: true
|
|
||||||
tags: cy01/blackhole:beta
|
|
||||||
build-args: |
|
|
||||||
VERSION=${{ env.latest_tag }}
|
|
||||||
CHANNEL=${{ env.CHANNEL }}
|
|
||||||
|
|
||||||
- name: Build and push for main branch
|
|
||||||
if: github.ref == 'refs/heads/main'
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
|
||||||
push: true
|
|
||||||
tags: |
|
|
||||||
cy01/blackhole:latest
|
|
||||||
cy01/blackhole:${{ env.latest_tag }}
|
|
||||||
build-args: |
|
|
||||||
VERSION=${{ env.latest_tag }}
|
|
||||||
CHANNEL=${{ env.CHANNEL }}
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Release
|
name: GoReleaser
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -22,14 +22,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version: '1.22'
|
||||||
|
|
||||||
- name: Set Release Channel
|
|
||||||
run: |
|
|
||||||
if [[ ${{ github.ref }} == refs/tags/beta* ]]; then
|
|
||||||
echo "RELEASE_CHANNEL=beta" >> $GITHUB_ENV
|
|
||||||
else
|
|
||||||
echo "RELEASE_CHANNEL=stable" >> $GITHUB_ENV
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Run GoReleaser
|
- name: Run GoReleaser
|
||||||
uses: goreleaser/goreleaser-action@v5
|
uses: goreleaser/goreleaser-action@v5
|
||||||
with:
|
with:
|
||||||
@@ -38,3 +30,4 @@ jobs:
|
|||||||
args: release --clean
|
args: release --clean
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
RELEASE_CHANNEL: stable
|
||||||
77
.github/workflows/release-docker.yml
vendored
Normal file
77
.github/workflows/release-docker.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
name: Release Docker Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Get tag name
|
||||||
|
id: get_tag
|
||||||
|
run: |
|
||||||
|
TAG_NAME=${GITHUB_REF#refs/tags/}
|
||||||
|
echo "tag_name=${TAG_NAME}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Cache Docker layers
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache
|
||||||
|
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-
|
||||||
|
|
||||||
|
# Login to Docker Hub
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
# Login to GitHub Container Registry
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push release Docker image
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
cy01/blackhole:latest
|
||||||
|
cy01/blackhole:${{ env.tag_name }}
|
||||||
|
ghcr.io/${{ github.repository_owner }}/decypharr:latest
|
||||||
|
ghcr.io/${{ github.repository_owner }}/decypharr:${{ env.tag_name }}
|
||||||
|
cache-from: type=local,src=/tmp/.buildx-cache
|
||||||
|
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||||
|
build-args: |
|
||||||
|
VERSION=${{ env.tag_name }}
|
||||||
|
CHANNEL=stable
|
||||||
|
|
||||||
|
- name: Move cache
|
||||||
|
run: |
|
||||||
|
rm -rf /tmp/.buildx-cache
|
||||||
|
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,6 +1,5 @@
|
|||||||
data/
|
data/
|
||||||
config.json
|
config.json
|
||||||
docker-compose.yml
|
|
||||||
.idea/
|
.idea/
|
||||||
.DS_Store
|
.DS_Store
|
||||||
*.torrent
|
*.torrent
|
||||||
@@ -13,3 +12,4 @@ tmp/**
|
|||||||
torrents.json
|
torrents.json
|
||||||
logs/**
|
logs/**
|
||||||
auth.json
|
auth.json
|
||||||
|
.ven/
|
||||||
@@ -17,21 +17,20 @@ builds:
|
|||||||
- arm64
|
- arm64
|
||||||
ldflags:
|
ldflags:
|
||||||
- -s -w
|
- -s -w
|
||||||
- -X github.com/sirrobot01/debrid-blackhole/pkg/version.Version={{.Version}}
|
- -X github.com/sirrobot01/decypharr/pkg/version.Version={{.Version}}
|
||||||
- -X github.com/sirrobot01/debrid-blackhole/pkg/version.Channel={{.Env.RELEASE_CHANNEL}}
|
- -X github.com/sirrobot01/decypharr/pkg/version.Channel={{.Env.RELEASE_CHANNEL}}
|
||||||
|
|
||||||
|
|
||||||
archives:
|
archives:
|
||||||
- format: tar.gz
|
- format: tar.gz
|
||||||
# this name template makes the OS and Arch compatible with the results of `uname`.
|
# this name template makes the OS and Arch compatible with the results of `uname`.
|
||||||
name_template: >-
|
name_template: >-
|
||||||
{{ .ProjectName }}_
|
decypharr_
|
||||||
{{- title .Os }}_
|
{{- title .Os }}_
|
||||||
{{- if eq .Arch "amd64" }}x86_64
|
{{- if eq .Arch "amd64" }}x86_64
|
||||||
{{- else if eq .Arch "386" }}i386
|
{{- else if eq .Arch "386" }}i386
|
||||||
{{- else }}{{ .Arch }}{{ end }}
|
{{- else }}{{ .Arch }}{{ end }}
|
||||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||||
# use zip for windows archives
|
|
||||||
format_overrides:
|
format_overrides:
|
||||||
- goos: windows
|
- goos: windows
|
||||||
format: zip
|
format: zip
|
||||||
|
|||||||
15
Dockerfile
15
Dockerfile
@@ -19,8 +19,8 @@ RUN --mount=type=cache,target=/go/pkg/mod \
|
|||||||
--mount=type=cache,target=/root/.cache/go-build \
|
--mount=type=cache,target=/root/.cache/go-build \
|
||||||
CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH \
|
CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH \
|
||||||
go build -trimpath \
|
go build -trimpath \
|
||||||
-ldflags="-w -s -X github.com/sirrobot01/debrid-blackhole/pkg/version.Version=${VERSION} -X github.com/sirrobot01/debrid-blackhole/pkg/version.Channel=${CHANNEL}" \
|
-ldflags="-w -s -X github.com/sirrobot01/decypharr/pkg/version.Version=${VERSION} -X github.com/sirrobot01/decypharr/pkg/version.Channel=${CHANNEL}" \
|
||||||
-o /blackhole
|
-o /decypharr
|
||||||
|
|
||||||
# Build healthcheck (optimized)
|
# Build healthcheck (optimized)
|
||||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||||
@@ -32,6 +32,7 @@ RUN --mount=type=cache,target=/go/pkg/mod \
|
|||||||
# Stage 2: Create directory structure
|
# Stage 2: Create directory structure
|
||||||
FROM alpine:3.19 as dirsetup
|
FROM alpine:3.19 as dirsetup
|
||||||
RUN mkdir -p /app/logs && \
|
RUN mkdir -p /app/logs && \
|
||||||
|
mkdir -p /app/cache && \
|
||||||
chmod 777 /app/logs && \
|
chmod 777 /app/logs && \
|
||||||
touch /app/logs/decypharr.log && \
|
touch /app/logs/decypharr.log && \
|
||||||
chmod 666 /app/logs/decypharr.log
|
chmod 666 /app/logs/decypharr.log
|
||||||
@@ -41,13 +42,13 @@ FROM gcr.io/distroless/static-debian12:nonroot
|
|||||||
|
|
||||||
LABEL version = "${VERSION}-${CHANNEL}"
|
LABEL version = "${VERSION}-${CHANNEL}"
|
||||||
|
|
||||||
LABEL org.opencontainers.image.source = "https://github.com/sirrobot01/debrid-blackhole"
|
LABEL org.opencontainers.image.source = "https://github.com/sirrobot01/decypharr"
|
||||||
LABEL org.opencontainers.image.title = "debrid-blackhole"
|
LABEL org.opencontainers.image.title = "decypharr"
|
||||||
LABEL org.opencontainers.image.authors = "sirrobot01"
|
LABEL org.opencontainers.image.authors = "sirrobot01"
|
||||||
LABEL org.opencontainers.image.documentation = "https://github.com/sirrobot01/debrid-blackhole/blob/main/README.md"
|
LABEL org.opencontainers.image.documentation = "https://github.com/sirrobot01/decypharr/blob/main/README.md"
|
||||||
|
|
||||||
# Copy binaries
|
# Copy binaries
|
||||||
COPY --from=builder --chown=nonroot:nonroot /blackhole /usr/bin/blackhole
|
COPY --from=builder --chown=nonroot:nonroot /decypharr /usr/bin/decypharr
|
||||||
COPY --from=builder --chown=nonroot:nonroot /healthcheck /usr/bin/healthcheck
|
COPY --from=builder --chown=nonroot:nonroot /healthcheck /usr/bin/healthcheck
|
||||||
|
|
||||||
# Copy pre-made directory structure
|
# Copy pre-made directory structure
|
||||||
@@ -62,4 +63,4 @@ USER nonroot:nonroot
|
|||||||
|
|
||||||
HEALTHCHECK CMD ["/usr/bin/healthcheck"]
|
HEALTHCHECK CMD ["/usr/bin/healthcheck"]
|
||||||
|
|
||||||
CMD ["/usr/bin/blackhole", "--config", "/app"]
|
CMD ["/usr/bin/decypharr", "--config", "/app"]
|
||||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2025 Mukhtar Akere
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
205
README.md
205
README.md
@@ -1,126 +1,78 @@
|
|||||||
### DecyphArr(Qbittorent, but with Debrid Proxy Support)
|
# DecyphArr
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
This is an implementation of QbitTorrent with a **Multiple Debrid service support**. Written in Go.
|
**DecyphArr** is an implementation of QbitTorrent with **Multiple Debrid service support**, written in Go.
|
||||||
|
|
||||||
### Table of Contents
|
## What is DecyphArr?
|
||||||
|
|
||||||
- [Features](#features)
|
DecyphArr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications while leveraging the capabilities of Debrid providers.
|
||||||
- [Supported Debrid Providers](#supported-debrid-providers)
|
|
||||||
- [Installation](#installation)
|
|
||||||
- [Docker Compose](#docker-compose)
|
|
||||||
- [Binary](#binary)
|
|
||||||
- [Usage](#usage)
|
|
||||||
- [Connecting to Sonarr/Radarr](#connecting-to-sonarrradarr)
|
|
||||||
- [Sample Config](#sample-config)
|
|
||||||
- [Config Notes](#config-notes)
|
|
||||||
- [Log Level](#log-level)
|
|
||||||
- [Max Cache Size](#max-cache-size)
|
|
||||||
- [Debrid Config](#debrid-config)
|
|
||||||
- [Proxy Config](#proxy-config)
|
|
||||||
- [Qbittorrent Config](#qbittorrent-config)
|
|
||||||
- [Arrs Config](#arrs-config)
|
|
||||||
- [Proxy](#proxy)
|
|
||||||
- [Repair Worker](#repair-worker)
|
|
||||||
- [Changelog](#changelog)
|
|
||||||
- [TODO](#todo)
|
|
||||||
|
|
||||||
### Features
|
## Features
|
||||||
|
|
||||||
- Mock Qbittorent API that supports the Arrs(Sonarr, Radarr, etc)
|
- 🔄 Mock Qbittorent API that supports the Arrs (Sonarr, Radarr, Lidarr etc)
|
||||||
- A Full-fledged UI for managing torrents
|
- 🖥️ Full-fledged UI for managing torrents
|
||||||
- Proxy support for the Arrs
|
- 🛡️ Proxy support for filtering out un-cached Debrid torrents
|
||||||
- Real Debrid Support
|
- 🔌 Multiple Debrid providers support
|
||||||
- Torbox Support
|
- 📁 WebDAV server support for each debrid provider
|
||||||
- Debrid Link Support
|
- 🔧 Repair Worker for missing files
|
||||||
- Multi-Debrid Providers support
|
|
||||||
- Repair Worker for missing files (**NEW**)
|
|
||||||
|
|
||||||
The proxy is useful for filtering out un-cached Debrid torrents
|
## Supported Debrid Providers
|
||||||
|
|
||||||
### Supported Debrid Providers
|
|
||||||
- [Real Debrid](https://real-debrid.com)
|
- [Real Debrid](https://real-debrid.com)
|
||||||
- [Torbox](https://torbox.app)
|
- [Torbox](https://torbox.app)
|
||||||
- [Debrid Link](https://debrid-link.com)
|
- [Debrid Link](https://debrid-link.com)
|
||||||
- [All Debrid](https://alldebrid.com)
|
- [All Debrid](https://alldebrid.com)
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
### Installation
|
### Docker (Recommended)
|
||||||
|
|
||||||
##### Docker Compose
|
|
||||||
```yaml
|
```yaml
|
||||||
version: '3.7'
|
version: '3.7'
|
||||||
services:
|
services:
|
||||||
blackhole:
|
decypharr:
|
||||||
image: cy01/blackhole:latest # or cy01/blackhole:beta
|
image: cy01/blackhole:latest # or cy01/blackhole:beta
|
||||||
container_name: blackhole
|
container_name: decypharr
|
||||||
ports:
|
ports:
|
||||||
- "8282:8282" # qBittorrent
|
- "8282:8282" # qBittorrent
|
||||||
- "8181:8181" # Proxy
|
|
||||||
user: "1000:1000"
|
user: "1000:1000"
|
||||||
volumes:
|
volumes:
|
||||||
- /mnt/:/mnt
|
- /mnt/:/mnt
|
||||||
- ~/plex/configs/blackhole/:/app # config.json must be in this directory
|
- ./configs/:/app # config.json must be in this directory
|
||||||
environment:
|
environment:
|
||||||
- PUID=1000
|
- PUID=1000
|
||||||
- PGID=1000
|
- PGID=1000
|
||||||
- UMASK=002
|
- UMASK=002
|
||||||
- QBIT_PORT=8282 # qBittorrent Port. This is optional. You can set this in the config file
|
|
||||||
- PORT=8181 # Proxy Port. This is optional. You can set this in the config file
|
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on:
|
|
||||||
- rclone # If you are using rclone with docker
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Binary
|
## Documentation
|
||||||
Download the binary from the releases page and run it with the config file.
|
|
||||||
|
|
||||||
```bash
|
For complete documentation, please visit our [Documentation](https://sirrobot01.github.io/debrid-blackhole/).
|
||||||
./blackhole --config /app
|
|
||||||
```
|
|
||||||
|
|
||||||
### Usage
|
The documentation includes:
|
||||||
- The UI is available at `http://localhost:8282`
|
|
||||||
- Setup the config.json file. Scroll down for the sample config file
|
|
||||||
- Setup docker compose/ binary with the config file
|
|
||||||
- Start the service
|
|
||||||
- Connect to Sonarr/Radarr/Lidarr
|
|
||||||
|
|
||||||
#### Connecting to Sonarr/Radarr
|
- Detailed installation instructions
|
||||||
|
- Configuration guide
|
||||||
|
- Usage with Sonarr/Radarr
|
||||||
|
- WebDAV setup
|
||||||
|
- Repair Worker information
|
||||||
|
- ...and more!
|
||||||
|
|
||||||
- Sonarr/Radarr
|
## Basic Configuration
|
||||||
- Settings -> Download Client -> Add Client -> qBittorrent
|
|
||||||
- Host: `localhost` # or the IP of the server
|
|
||||||
- Port: `8282` # or the port set in the config file/ docker-compose env
|
|
||||||
- Username: `http://sonarr:8989` # Your arr host with http/https
|
|
||||||
- Password: `sonarr_token` # Your arr token
|
|
||||||
- Category: e.g `sonarr`, `radarr`
|
|
||||||
- Use SSL -> `No`
|
|
||||||
- Sequential Download -> `No`|`Yes` (If you want to download the torrents locally instead of symlink)
|
|
||||||
- Click Test
|
|
||||||
- Click Save
|
|
||||||
|
|
||||||
#### Basic Sample Config
|
|
||||||
|
|
||||||
This is the default config file. You can create a `config.json` file in the root directory of the project or mount it to /app in the docker-compose file.
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"debrids": [
|
"debrids": [
|
||||||
{
|
{
|
||||||
"name": "realdebrid",
|
"name": "realdebrid",
|
||||||
"host": "https://api.real-debrid.com/rest/1.0",
|
"host": "https://api.real-debrid.com/rest/1.0",
|
||||||
"api_key": "realdebrid_key",
|
"api_key": "your_api_key_here",
|
||||||
"folder": "/mnt/remote/realdebrid/__all__/"
|
"folder": "/mnt/remote/realdebrid/__all__/",
|
||||||
|
"use_webdav": true
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"proxy": {
|
|
||||||
"enabled": true,
|
|
||||||
"port": "8100",
|
|
||||||
"username": "username",
|
|
||||||
"password": "password"
|
|
||||||
},
|
|
||||||
"qbittorrent": {
|
"qbittorrent": {
|
||||||
"port": "8282",
|
"port": "8282",
|
||||||
"download_folder": "/mnt/symlinks/",
|
"download_folder": "/mnt/symlinks/",
|
||||||
@@ -131,97 +83,14 @@ This is the default config file. You can create a `config.json` file in the root
|
|||||||
"interval": "12h",
|
"interval": "12h",
|
||||||
"run_on_start": false
|
"run_on_start": false
|
||||||
},
|
},
|
||||||
"use_auth": false
|
"use_auth": false,
|
||||||
|
"log_level": "info"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Full config are [here](doc/config.full.json)
|
## Contributing
|
||||||
|
|
||||||
<details>
|
Contributions are welcome! Please feel free to submit a Pull Request.
|
||||||
|
|
||||||
<summary>
|
## License
|
||||||
Click Here for the full config notes
|
This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
|
||||||
</summary>
|
|
||||||
|
|
||||||
- The `log_level` key is used to set the log level of the application. The default value is `info`. log level can be set to `debug`, `info`, `warn`, `error`
|
|
||||||
- The `max_cache_size` key is used to set the maximum number of infohashes that can be stored in the availability cache. This is used to prevent round trip to the debrid provider when using the proxy/Qbittorrent. The default value is `1000`
|
|
||||||
- The `allowed_file_types` key is an array of allowed file types that can be downloaded. By default, all movie, tv show and music file types are allowed
|
|
||||||
- The `use_auth` is used to enable basic authentication for the UI. The default value is `false`
|
|
||||||
|
|
||||||
##### Debrid Config
|
|
||||||
- The `debrids` key is an array of debrid providers
|
|
||||||
- The `name` key is the name of the debrid provider
|
|
||||||
- The `host` key is the API endpoint of the debrid provider
|
|
||||||
- The `api_key` key is the API key of the debrid provider
|
|
||||||
- The `folder` key is the folder where your debrid folder is mounted(webdav, rclone, zurg etc). e.g `data/realdebrid/torrents/`, `/media/remote/alldebrid/magnets/`
|
|
||||||
- The `rate_limit` key is the rate limit of the debrid provider(null by default)
|
|
||||||
- The `download_uncached` bool key is used to download uncached torrents(disabled by default)
|
|
||||||
- The `check_cached` bool key is used to check if the torrent is cached(disabled by default)
|
|
||||||
|
|
||||||
##### Repair Config (**BETA**)
|
|
||||||
The `repair` key is used to enable the repair worker
|
|
||||||
- The `enabled` key is used to enable the repair worker
|
|
||||||
- The `interval` key is the interval in either minutes, seconds, hours, days. Use any of this format, e.g 12:00, 5:00, 1h, 1d, 1m, 1s.
|
|
||||||
- The `run_on_start` key is used to run the repair worker on start
|
|
||||||
- The `zurg_url` is the url of the zurg server. Typically `http://localhost:9999` or `http://zurg:9999`
|
|
||||||
- The `skip_deletion`: true if you don't want to delete the files
|
|
||||||
|
|
||||||
##### Proxy Config
|
|
||||||
- The `enabled` key is used to enable the proxy
|
|
||||||
- The `port` key is the port the proxy will listen on
|
|
||||||
- The `log_level` key is used to set the log level of the proxy. The default value is `info`
|
|
||||||
- The `username` and `password` keys are used for basic authentication
|
|
||||||
- The `cached_only` means only cached torrents will be returned
|
|
||||||
|
|
||||||
|
|
||||||
##### Qbittorrent Config
|
|
||||||
- The `port` key is the port the qBittorrent will listen on
|
|
||||||
- The `download_folder` is the folder where the torrents will be downloaded. e.g `/media/symlinks/`
|
|
||||||
- The `categories` key is used to filter out torrents based on the category. e.g `sonarr`, `radarr`
|
|
||||||
- The `refresh_interval` key is used to set the interval in minutes to refresh the Arrs Monitored Downloads(it's in seconds). The default value is `5` seconds
|
|
||||||
|
|
||||||
|
|
||||||
##### Arrs Config
|
|
||||||
This is an array of Arrs(Sonarr, Radarr, etc) that will be used to download the torrents. This is not required if you already set up the Qbittorrent in the Arrs with the host, token.
|
|
||||||
This is particularly useful if you want to use the Repair tool without using Qbittorent
|
|
||||||
- The `name` key is the name of the Arr/ Category
|
|
||||||
- The `host` key is the host of the Arr
|
|
||||||
- The `token` key is the API token of the Arr
|
|
||||||
- THe `cleanup` key is used to cleanup your arr queues. This is usually for removing dangling queues(downloads that all the files have been import, sometimes, some incomplete season packs)
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
|
|
||||||
### Proxy
|
|
||||||
|
|
||||||
**Note**: Proxy has stopped working for Real Debrid, Debrid Link, and All Debrid. It still works for Torbox. This is due to the changes in the API of the Debrid Providers.
|
|
||||||
|
|
||||||
The proxy is useful in filtering out un-cached Debrid torrents.
|
|
||||||
The proxy is a simple HTTP proxy that requires basic authentication. The proxy can be enabled by setting the `proxy.enabled` to `true` in the config file.
|
|
||||||
The proxy listens on the port `8181` by default. The username and password can be set in the config file.
|
|
||||||
|
|
||||||
### Repair Worker
|
|
||||||
|
|
||||||
The repair worker is a simple worker that checks for missing files in the Arrs(Sonarr, Radarr, etc). It's particularly useful for files either deleted by the Debrid provider or files with bad symlinks.
|
|
||||||
|
|
||||||
**Note**: If you're using zurg, set the `zurg_url` under repair config. This will speed up the repair process, exponentially.
|
|
||||||
|
|
||||||
- Search for broken symlinks/files
|
|
||||||
- Search for missing files
|
|
||||||
- Search for deleted/unreadable files
|
|
||||||
|
|
||||||
|
|
||||||
### Changelog
|
|
||||||
|
|
||||||
- View the [CHANGELOG.md](CHANGELOG.md) for the latest changes
|
|
||||||
|
|
||||||
|
|
||||||
### TODO
|
|
||||||
- [x] A proper name!!!!
|
|
||||||
- [x] Debrid
|
|
||||||
- [x] Add more Debrid Providers
|
|
||||||
|
|
||||||
- [x] Qbittorrent
|
|
||||||
- [x] Add more Qbittorrent features
|
|
||||||
- [x] Persist torrents on restart/server crash
|
|
||||||
- [ ] Add tests
|
|
||||||
@@ -2,75 +2,100 @@ package decypharr
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"fmt"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/proxy"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/qbit"
|
"github.com/sirrobot01/decypharr/pkg/qbit"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/server"
|
"github.com/sirrobot01/decypharr/pkg/server"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
"github.com/sirrobot01/decypharr/pkg/service"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/version"
|
"github.com/sirrobot01/decypharr/pkg/version"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/web"
|
"github.com/sirrobot01/decypharr/pkg/web"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/worker"
|
"github.com/sirrobot01/decypharr/pkg/webdav"
|
||||||
"log"
|
"github.com/sirrobot01/decypharr/pkg/worker"
|
||||||
|
"os"
|
||||||
|
"runtime/debug"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Start(ctx context.Context) error {
|
func Start(ctx context.Context) error {
|
||||||
cfg := config.GetConfig()
|
|
||||||
|
if umaskStr := os.Getenv("UMASK"); umaskStr != "" {
|
||||||
|
umask, err := strconv.ParseInt(umaskStr, 8, 32)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid UMASK value: %s", umaskStr)
|
||||||
|
}
|
||||||
|
// Set umask
|
||||||
|
syscall.Umask(int(umask))
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := config.Get()
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
errChan := make(chan error)
|
errChan := make(chan error)
|
||||||
|
|
||||||
_log := logger.GetLogger(cfg.LogLevel)
|
_log := logger.GetDefaultLogger()
|
||||||
|
|
||||||
_log.Info().Msgf("Version: %s", version.GetInfo().String())
|
_log.Info().Msgf("Starting Decypher (%s)", version.GetInfo().String())
|
||||||
_log.Debug().Msgf("Config Loaded: %s", cfg.JsonFile())
|
_log.Info().Msgf("Default Log Level: %s", cfg.LogLevel)
|
||||||
_log.Debug().Msgf("Default Log Level: %s", cfg.LogLevel)
|
|
||||||
|
|
||||||
svc := service.New()
|
svc := service.New()
|
||||||
_qbit := qbit.New()
|
_qbit := qbit.New()
|
||||||
srv := server.New()
|
srv := server.New()
|
||||||
webRoutes := web.New(_qbit).Routes()
|
_webdav := webdav.New()
|
||||||
|
|
||||||
|
ui := web.New(_qbit).Routes()
|
||||||
|
webdavRoutes := _webdav.Routes()
|
||||||
qbitRoutes := _qbit.Routes()
|
qbitRoutes := _qbit.Routes()
|
||||||
|
|
||||||
// Register routes
|
// Register routes
|
||||||
srv.Mount("/", webRoutes)
|
srv.Mount("/", ui)
|
||||||
srv.Mount("/api/v2", qbitRoutes)
|
srv.Mount("/api/v2", qbitRoutes)
|
||||||
|
srv.Mount("/webdav", webdavRoutes)
|
||||||
|
|
||||||
if cfg.Proxy.Enabled {
|
safeGo := func(f func() error) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if err := proxy.NewProxy().Start(ctx); err != nil {
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
stack := debug.Stack()
|
||||||
|
_log.Error().
|
||||||
|
Interface("panic", r).
|
||||||
|
Str("stack", string(stack)).
|
||||||
|
Msg("Recovered from panic in goroutine")
|
||||||
|
|
||||||
|
// Send error to channel so the main goroutine is aware
|
||||||
|
errChan <- fmt.Errorf("panic: %v", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := f(); err != nil {
|
||||||
errChan <- err
|
errChan <- err
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Add(1)
|
safeGo(func() error {
|
||||||
go func() {
|
return _webdav.Start(ctx)
|
||||||
defer wg.Done()
|
})
|
||||||
if err := srv.Start(ctx); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
|
|
||||||
}()
|
safeGo(func() error {
|
||||||
|
return srv.Start(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
wg.Add(1)
|
safeGo(func() error {
|
||||||
go func() {
|
return worker.Start(ctx)
|
||||||
defer wg.Done()
|
})
|
||||||
if err := worker.Start(ctx); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if cfg.Repair.Enabled {
|
if cfg.Repair.Enabled {
|
||||||
wg.Add(1)
|
safeGo(func() error {
|
||||||
go func() {
|
err := svc.Repair.Start(ctx)
|
||||||
defer wg.Done()
|
if err != nil {
|
||||||
if err := svc.Repair.Start(ctx); err != nil {
|
_log.Error().Err(err).Msg("Error during repair")
|
||||||
log.Printf("Error during repair: %v", err)
|
|
||||||
}
|
}
|
||||||
}()
|
return nil // Not propagating repair errors to terminate the app
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
|||||||
BIN
doc/download.png
BIN
doc/download.png
Binary file not shown.
|
Before Width: | Height: | Size: 185 KiB |
BIN
doc/main.png
BIN
doc/main.png
Binary file not shown.
|
Before Width: | Height: | Size: 156 KiB |
@@ -1,119 +1,40 @@
|
|||||||
#### 0.1.0
|
# Changelog
|
||||||
- Initial Release
|
|
||||||
- Added Real Debrid Support
|
|
||||||
- Added Arrs Support
|
|
||||||
- Added Proxy Support
|
|
||||||
- Added Basic Authentication for Proxy
|
|
||||||
- Added Rate Limiting for Debrid Providers
|
|
||||||
|
|
||||||
#### 0.1.1
|
## 0.5.0
|
||||||
- Added support for "No Blackhole" for Arrs
|
|
||||||
- Added support for "Cached Only" for Proxy
|
|
||||||
- Bug Fixes
|
|
||||||
|
|
||||||
#### 0.1.2
|
- A more refined repair worker (with more control)
|
||||||
- Bug fixes
|
- UI Improvements
|
||||||
- Code cleanup
|
- Pagination for torrents
|
||||||
- Get available hashes at once
|
- Dark mode
|
||||||
|
- Ordered torrents table
|
||||||
|
- Fix Arr API flaky behavior
|
||||||
|
- Discord Notifications
|
||||||
|
- Minor bug fixes
|
||||||
|
- Add Tautulli support
|
||||||
|
- playback_failed event triggers a repair
|
||||||
|
- Miscellaneous improvements
|
||||||
|
- Add an option to skip the repair worker for a specific arr
|
||||||
|
- Arr specific uncached downloading option
|
||||||
|
- Option to download uncached torrents from UI
|
||||||
|
- Remove QbitTorrent Log level (Use the global log level)
|
||||||
|
|
||||||
#### 0.1.3
|
## 0.4.2
|
||||||
|
|
||||||
- Searching for infohashes in the xml description/summary/comments
|
- Hotfixes
|
||||||
- Added local cache support
|
- Fix saving torrents error
|
||||||
- Added max cache size
|
- Fix bugs with the UI
|
||||||
- Rewrite blackhole.go
|
- Speed improvements
|
||||||
- Bug fixes
|
|
||||||
- Fixed indexer getting disabled
|
|
||||||
- Fixed blackhole not working
|
|
||||||
|
|
||||||
#### 0.1.4
|
## 0.4.1
|
||||||
|
|
||||||
- Rewrote Report log
|
- Adds optional UI authentication
|
||||||
- Fix YTS, 1337x not grabbing infohash
|
- Downloaded Torrents persist on restart
|
||||||
- Fix Torrent symlink bug
|
- Fixes
|
||||||
|
- Fix Alldebrid struggling to find the correct file
|
||||||
|
- Minor bug fixes or speed-gains
|
||||||
|
- A new cleanup worker to clean up ARR queues
|
||||||
|
|
||||||
|
## 0.4.0
|
||||||
#### 0.2.0-beta
|
|
||||||
|
|
||||||
- Switch to QbitTorrent API instead of Blackhole
|
|
||||||
- Rewrote the whole codebase
|
|
||||||
|
|
||||||
|
|
||||||
### 0.2.0
|
|
||||||
- Implement 0.2.0-beta changes
|
|
||||||
- Removed Blackhole
|
|
||||||
- Added QbitTorrent API
|
|
||||||
- Cleaned up the code
|
|
||||||
|
|
||||||
#### 0.2.1
|
|
||||||
|
|
||||||
- Fix Uncached torrents not being downloaded/downloaded
|
|
||||||
- Minor bug fixed
|
|
||||||
- Fix Race condition in the cache and file system
|
|
||||||
|
|
||||||
#### 0.2.2
|
|
||||||
- Fix name mismatch in the cache
|
|
||||||
- Fix directory mapping with mounts
|
|
||||||
- Add Support for refreshing the *arrs
|
|
||||||
|
|
||||||
#### 0.2.3
|
|
||||||
|
|
||||||
- Delete uncached items from RD
|
|
||||||
- Fail if the torrent is not cached(optional)
|
|
||||||
- Fix cache not being updated
|
|
||||||
|
|
||||||
#### 0.2.4
|
|
||||||
|
|
||||||
- Add file download support(Sequential Download)
|
|
||||||
- Fix http handler error
|
|
||||||
- Fix *arrs map failing concurrently
|
|
||||||
- Fix cache not being updated
|
|
||||||
|
|
||||||
#### 0.2.5
|
|
||||||
- Fix ContentPath not being set prior
|
|
||||||
- Rewrote Readme
|
|
||||||
- Cleaned up the code
|
|
||||||
|
|
||||||
#### 0.2.6
|
|
||||||
- Delete torrent for empty matched files
|
|
||||||
- Update Readme
|
|
||||||
|
|
||||||
#### 0.2.7
|
|
||||||
|
|
||||||
- Add support for multiple debrid providers
|
|
||||||
- Add Torbox support
|
|
||||||
- Add support for configurable debrid cache checks
|
|
||||||
- Add support for configurable debrid download uncached torrents
|
|
||||||
|
|
||||||
#### 0.3.0
|
|
||||||
|
|
||||||
- Add UI for adding torrents
|
|
||||||
- Refraction of the code
|
|
||||||
- -Fix Torbox bug
|
|
||||||
- Update CI/CD
|
|
||||||
- Update Readme
|
|
||||||
|
|
||||||
#### 0.3.1
|
|
||||||
|
|
||||||
- Add DebridLink Support
|
|
||||||
- Refactor error handling
|
|
||||||
|
|
||||||
#### 0.3.2
|
|
||||||
|
|
||||||
- Fix DebridLink not downloading
|
|
||||||
- Fix Torbox with uncached torrents
|
|
||||||
- Add new /internal/cached endpoint to check if an hash is cached
|
|
||||||
- implement per-debrid local cache
|
|
||||||
- Fix file check for torbox
|
|
||||||
- Other minor bug fixes
|
|
||||||
|
|
||||||
#### 0.3.3
|
|
||||||
|
|
||||||
- Add AllDebrid Support
|
|
||||||
- Fix Torbox not downloading uncached torrents
|
|
||||||
- Fix Rar files being downloaded
|
|
||||||
|
|
||||||
#### 0.4.0
|
|
||||||
|
|
||||||
- Add support for multiple debrid providers
|
- Add support for multiple debrid providers
|
||||||
- A full-fledged UI for adding torrents, repairing files, viewing config and managing torrents
|
- A full-fledged UI for adding torrents, repairing files, viewing config and managing torrents
|
||||||
@@ -127,13 +48,124 @@
|
|||||||
- Qbittorrent
|
- Qbittorrent
|
||||||
- Add support for tags (creating, deleting, listing)
|
- Add support for tags (creating, deleting, listing)
|
||||||
- Add support for categories (creating, deleting, listing)
|
- Add support for categories (creating, deleting, listing)
|
||||||
- Fix issues with arr sending torrents using a different content type.
|
- Fix issues with arr sending torrents using a different content type
|
||||||
|
|
||||||
#### 0.4.1
|
## 0.3.3
|
||||||
|
|
||||||
- Adds optional UI authentication
|
- Add AllDebrid Support
|
||||||
- Downloaded Torrents persist on restart
|
- Fix Torbox not downloading uncached torrents
|
||||||
- Fixes
|
- Fix Rar files being downloaded
|
||||||
- Fix Alldebrid struggling to find the correct file
|
|
||||||
- Minor bug fixes or speed-gains
|
## 0.3.2
|
||||||
- A new cleanup worker to clean up ARR queues
|
|
||||||
|
- Fix DebridLink not downloading
|
||||||
|
- Fix Torbox with uncached torrents
|
||||||
|
- Add new /internal/cached endpoint to check if an hash is cached
|
||||||
|
- Implement per-debrid local cache
|
||||||
|
- Fix file check for torbox
|
||||||
|
- Other minor bug fixes
|
||||||
|
|
||||||
|
## 0.3.1
|
||||||
|
|
||||||
|
- Add DebridLink Support
|
||||||
|
- Refactor error handling
|
||||||
|
|
||||||
|
## 0.3.0
|
||||||
|
|
||||||
|
- Add UI for adding torrents
|
||||||
|
- Refraction of the code
|
||||||
|
- Fix Torbox bug
|
||||||
|
- Update CI/CD
|
||||||
|
- Update Readme
|
||||||
|
|
||||||
|
## 0.2.7
|
||||||
|
|
||||||
|
- Add support for multiple debrid providers
|
||||||
|
- Add Torbox support
|
||||||
|
- Add support for configurable debrid cache checks
|
||||||
|
- Add support for configurable debrid download uncached torrents
|
||||||
|
|
||||||
|
## 0.2.6
|
||||||
|
|
||||||
|
- Delete torrent for empty matched files
|
||||||
|
- Update Readme
|
||||||
|
|
||||||
|
## 0.2.5
|
||||||
|
|
||||||
|
- Fix ContentPath not being set prior
|
||||||
|
- Rewrote Readme
|
||||||
|
- Cleaned up the code
|
||||||
|
|
||||||
|
## 0.2.4
|
||||||
|
|
||||||
|
- Add file download support (Sequential Download)
|
||||||
|
- Fix http handler error
|
||||||
|
- Fix *arrs map failing concurrently
|
||||||
|
- Fix cache not being updated
|
||||||
|
|
||||||
|
## 0.2.3
|
||||||
|
|
||||||
|
- Delete uncached items from RD
|
||||||
|
- Fail if the torrent is not cached (optional)
|
||||||
|
- Fix cache not being updated
|
||||||
|
|
||||||
|
## 0.2.2
|
||||||
|
|
||||||
|
- Fix name mismatch in the cache
|
||||||
|
- Fix directory mapping with mounts
|
||||||
|
- Add Support for refreshing the *arrs
|
||||||
|
|
||||||
|
## 0.2.1
|
||||||
|
|
||||||
|
- Fix Uncached torrents not being downloaded/downloaded
|
||||||
|
- Minor bug fixed
|
||||||
|
- Fix Race condition in the cache and file system
|
||||||
|
|
||||||
|
## 0.2.0
|
||||||
|
|
||||||
|
- Implement 0.2.0-beta changes
|
||||||
|
- Removed Blackhole
|
||||||
|
- Added QbitTorrent API
|
||||||
|
- Cleaned up the code
|
||||||
|
|
||||||
|
## 0.2.0-beta
|
||||||
|
|
||||||
|
- Switch to QbitTorrent API instead of Blackhole
|
||||||
|
- Rewrote the whole codebase
|
||||||
|
|
||||||
|
## 0.1.4
|
||||||
|
|
||||||
|
- Rewrote Report log
|
||||||
|
- Fix YTS, 1337x not grabbing infohash
|
||||||
|
- Fix Torrent symlink bug
|
||||||
|
|
||||||
|
## 0.1.3
|
||||||
|
|
||||||
|
- Searching for infohashes in the xml description/summary/comments
|
||||||
|
- Added local cache support
|
||||||
|
- Added max cache size
|
||||||
|
- Rewrite blackhole.go
|
||||||
|
- Bug fixes
|
||||||
|
- Fixed indexer getting disabled
|
||||||
|
- Fixed blackhole not working
|
||||||
|
|
||||||
|
## 0.1.2
|
||||||
|
|
||||||
|
- Bug fixes
|
||||||
|
- Code cleanup
|
||||||
|
- Get available hashes at once
|
||||||
|
|
||||||
|
## 0.1.1
|
||||||
|
|
||||||
|
- Added support for "No Blackhole" for Arrs
|
||||||
|
- Added support for "Cached Only" for Proxy
|
||||||
|
- Bug Fixes
|
||||||
|
|
||||||
|
## 0.1.0
|
||||||
|
|
||||||
|
- Initial Release
|
||||||
|
- Added Real Debrid Support
|
||||||
|
- Added Arrs Support
|
||||||
|
- Added Proxy Support
|
||||||
|
- Added Basic Authentication for Proxy
|
||||||
|
- Added Rate Limiting for Debrid Providers
|
||||||
75
docs/docs/configuration/arrs.md
Normal file
75
docs/docs/configuration/arrs.md
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# Arr Applications Configuration
|
||||||
|
|
||||||
|
DecyphArr can integrate directly with Sonarr, Radarr, and other Arr applications. This section explains how to configure the Arr integration in your `config.json` file.
|
||||||
|
|
||||||
|
## Basic Configuration
|
||||||
|
|
||||||
|
The Arr applications are configured under the `arrs` key:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"arrs": [
|
||||||
|
{
|
||||||
|
"name": "sonarr",
|
||||||
|
"host": "http://sonarr:8989",
|
||||||
|
"token": "your-sonarr-api-key",
|
||||||
|
"cleanup": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "radarr",
|
||||||
|
"host": "http://radarr:7878",
|
||||||
|
"token": "your-radarr-api-key",
|
||||||
|
"cleanup": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
### !!! note
|
||||||
|
This configuration is optional if you've already set up the qBittorrent client in your Arr applications with the correct host and token information. It's particularly useful for the Repair Worker functionality.
|
||||||
|
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
Each Arr application supports the following options:
|
||||||
|
|
||||||
|
- `name`: The name of the Arr application, which should match the category in qBittorrent
|
||||||
|
- `host`: The host URL of the Arr application, including protocol and port
|
||||||
|
- `token`: The API token/key of the Arr application
|
||||||
|
- `cleanup`: Whether to clean up the Arr queue (removes completed downloads). This is only useful for Sonarr.
|
||||||
|
|
||||||
|
### Finding Your API Key
|
||||||
|
#### Sonarr/Radarr/Lidarr
|
||||||
|
|
||||||
|
1. Go to Sonarr > Settings > General
|
||||||
|
2. Look for "API Key" in the "General" section
|
||||||
|
3. Copy the API key
|
||||||
|
|
||||||
|
### Multiple Arr Applications
|
||||||
|
You can configure multiple Arr applications by adding more entries to the arrs array:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"arrs": [
|
||||||
|
{
|
||||||
|
"name": "sonarr",
|
||||||
|
"host": "http://sonarr:8989",
|
||||||
|
"token": "your-sonarr-api-key",
|
||||||
|
"cleanup": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "sonarr-anime",
|
||||||
|
"host": "http://sonarr-anime:8989",
|
||||||
|
"token": "your-sonarr-anime-api-key",
|
||||||
|
"cleanup": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "radarr",
|
||||||
|
"host": "http://radarr:7878",
|
||||||
|
"token": "your-radarr-api-key",
|
||||||
|
"cleanup": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "lidarr",
|
||||||
|
"host": "http://lidarr:8686",
|
||||||
|
"token": "your-lidarr-api-key",
|
||||||
|
"cleanup": false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
123
docs/docs/configuration/debrid.md
Normal file
123
docs/docs/configuration/debrid.md
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
|
||||||
|
# Debrid Providers Configuration
|
||||||
|
|
||||||
|
DecyphArr supports multiple Debrid providers. This section explains how to configure each provider in your `config.json` file.
|
||||||
|
|
||||||
|
## Basic Configuration
|
||||||
|
|
||||||
|
Each Debrid provider is configured in the `debrids` array:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"debrids": [
|
||||||
|
{
|
||||||
|
"name": "realdebrid",
|
||||||
|
"host": "https://api.real-debrid.com/rest/1.0",
|
||||||
|
"api_key": "your-api-key",
|
||||||
|
"folder": "/mnt/remote/realdebrid/__all__/"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "alldebrid",
|
||||||
|
"host": "https://api.alldebrid.com/v4",
|
||||||
|
"api_key": "your-api-key",
|
||||||
|
"folder": "/mnt/remote/alldebrid/downloads/"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Provider Options
|
||||||
|
|
||||||
|
Each Debrid provider accepts the following configuration options:
|
||||||
|
|
||||||
|
|
||||||
|
#### Basic Options
|
||||||
|
|
||||||
|
- `name`: The name of the Debrid provider (realdebrid, alldebrid, debridlink, torbox)
|
||||||
|
- `host`: The API endpoint of the Debrid provider
|
||||||
|
- `api_key`: Your API key for the Debrid service (can be comma-separated for multiple keys)
|
||||||
|
- `folder`: The folder where your Debrid content is mounted (via webdav, rclone, zurg, etc.)
|
||||||
|
|
||||||
|
#### Advanced Options
|
||||||
|
|
||||||
|
- `download_api_keys`: Array of API keys used specifically for downloading torrents (defaults to the same as api_key)
|
||||||
|
- `rate_limit`: Rate limit for API requests (null by default)
|
||||||
|
- `download_uncached`: Whether to download uncached torrents (disabled by default)
|
||||||
|
- `check_cached`: Whether to check if torrents are cached (disabled by default)
|
||||||
|
- `use_webdav`: Whether to create a WebDAV server for this Debrid provider (disabled by default)
|
||||||
|
|
||||||
|
|
||||||
|
### Using Multiple API Keys
|
||||||
|
For services that support it, you can provide multiple download API keys for better load balancing:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "realdebrid",
|
||||||
|
"host": "https://api.real-debrid.com/rest/1.0",
|
||||||
|
"api_key": "key1",
|
||||||
|
"download_api_keys": ["key1", "key2", "key3"],
|
||||||
|
"folder": "/mnt/remote/realdebrid/__all__/"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example Configuration
|
||||||
|
|
||||||
|
#### Real Debrid
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "realdebrid",
|
||||||
|
"host": "https://api.real-debrid.com/rest/1.0",
|
||||||
|
"api_key": "your-api-key",
|
||||||
|
"folder": "/mnt/remote/realdebrid/__all__/",
|
||||||
|
"rate_limit": null,
|
||||||
|
"download_uncached": false,
|
||||||
|
"check_cached": true,
|
||||||
|
"use_webdav": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### All Debrid
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "alldebrid",
|
||||||
|
"host": "https://api.alldebrid.com/v4",
|
||||||
|
"api_key": "your-api-key",
|
||||||
|
"folder": "/mnt/remote/alldebrid/torrents/",
|
||||||
|
"rate_limit": null,
|
||||||
|
"download_uncached": false,
|
||||||
|
"check_cached": true,
|
||||||
|
"use_webdav": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Debrid Link
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "debridlink",
|
||||||
|
"host": "https://debrid-link.com/api/v2",
|
||||||
|
"api_key": "your-api-key",
|
||||||
|
"folder": "/mnt/remote/debridlink/torrents/",
|
||||||
|
"rate_limit": null,
|
||||||
|
"download_uncached": false,
|
||||||
|
"check_cached": true,
|
||||||
|
"use_webdav": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Torbox
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "torbox",
|
||||||
|
"host": "https://api.torbox.com/v1",
|
||||||
|
"api_key": "your-api-key",
|
||||||
|
"folder": "/mnt/remote/torbox/torrents/",
|
||||||
|
"rate_limit": null,
|
||||||
|
"download_uncached": false,
|
||||||
|
"check_cached": true,
|
||||||
|
"use_webdav": true
|
||||||
|
}
|
||||||
|
```
|
||||||
69
docs/docs/configuration/general.md
Normal file
69
docs/docs/configuration/general.md
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# General Configuration
|
||||||
|
|
||||||
|
This section covers the basic configuration options for DecyphArr that apply to the entire application.
|
||||||
|
|
||||||
|
## Basic Settings
|
||||||
|
|
||||||
|
Here are the fundamental configuration options:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"use_auth": false,
|
||||||
|
"log_level": "info",
|
||||||
|
"discord_webhook_url": "",
|
||||||
|
"min_file_size": 0,
|
||||||
|
"max_file_size": 0,
|
||||||
|
"allowed_file_types": [".mp4", ".mkv", ".avi", ...]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
|
||||||
|
#### Log Level
|
||||||
|
The `log_level` setting determines how verbose the application logs will be:
|
||||||
|
|
||||||
|
- `debug`: Detailed information, useful for troubleshooting
|
||||||
|
- `info`: General operational information (default)
|
||||||
|
- `warn`: Warning messages
|
||||||
|
- `error`: Error messages only
|
||||||
|
- `trace`: Very detailed information, including all requests and responses
|
||||||
|
|
||||||
|
|
||||||
|
#### Authentication
|
||||||
|
The `use_auth` option enables basic authentication for the UI:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"use_auth": true
|
||||||
|
```
|
||||||
|
|
||||||
|
When enabled, you'll need to provide a username and password to access the DecyphArr interface.
|
||||||
|
|
||||||
|
|
||||||
|
#### File Size Limits
|
||||||
|
|
||||||
|
You can set minimum and maximum file size limits for torrents:
|
||||||
|
```json
|
||||||
|
"min_file_size": 0, // Minimum file size in bytes (0 = no minimum)
|
||||||
|
"max_file_size": 0 // Maximum file size in bytes (0 = no maximum)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Allowed File Types
|
||||||
|
You can restrict the types of files that DecyphArr will process by specifying allowed file extensions. This is useful for filtering out unwanted file types.
|
||||||
|
|
||||||
|
```json
|
||||||
|
"allowed_file_types": [
|
||||||
|
".mp4", ".mkv", ".avi", ".mov",
|
||||||
|
".m4v", ".mpg", ".mpeg", ".wmv",
|
||||||
|
".m4a", ".mp3", ".flac", ".wav"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
If not specified, all movie, TV show, and music file types are allowed by default.
|
||||||
|
|
||||||
|
|
||||||
|
#### Discord Notifications
|
||||||
|
To receive notifications on Discord, add your webhook URL:
|
||||||
|
```json
|
||||||
|
"discord_webhook_url": "https://discord.com/api/webhooks/..."
|
||||||
|
```
|
||||||
|
This will send notifications for various events, such as successful downloads or errors.
|
||||||
44
docs/docs/configuration/index.md
Normal file
44
docs/docs/configuration/index.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Configuration Overview
|
||||||
|
|
||||||
|
DecyphArr uses a JSON configuration file to manage its settings. This file should be named `config.json` and placed in your configured directory.
|
||||||
|
|
||||||
|
## Basic Configuration
|
||||||
|
|
||||||
|
Here's a minimal configuration to get started:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"debrids": [
|
||||||
|
{
|
||||||
|
"name": "realdebrid",
|
||||||
|
"host": "https://api.real-debrid.com/rest/1.0",
|
||||||
|
"api_key": "realdebrid_key",
|
||||||
|
"folder": "/mnt/remote/realdebrid/__all__/"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"qbittorrent": {
|
||||||
|
"port": "8282",
|
||||||
|
"download_folder": "/mnt/symlinks/",
|
||||||
|
"categories": ["sonarr", "radarr"]
|
||||||
|
},
|
||||||
|
"repair": {
|
||||||
|
"enabled": false,
|
||||||
|
"interval": "12h",
|
||||||
|
"run_on_start": false
|
||||||
|
},
|
||||||
|
"use_auth": false,
|
||||||
|
"log_level": "info"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Sections
|
||||||
|
|
||||||
|
DecyphArr's configuration is divided into several sections:
|
||||||
|
|
||||||
|
- [General Configuration](general.md) - Basic settings like logging and authentication
|
||||||
|
- [Debrid Providers](debrid.md) - Configure one or more Debrid services
|
||||||
|
- [qBittorrent Settings](qbittorrent.md) - Settings for the qBittorrent API
|
||||||
|
- [Arr Integration](arrs.md) - Configuration for Sonarr, Radarr, etc.
|
||||||
|
|
||||||
|
Full Configuration Example
|
||||||
|
For a complete configuration file with all available options, see our [full configuration example](../extras/config.full.json).
|
||||||
74
docs/docs/configuration/qbittorrent.md
Normal file
74
docs/docs/configuration/qbittorrent.md
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# qBittorrent Configuration
|
||||||
|
|
||||||
|
DecyphArr emulates a qBittorrent instance to integrate with Arr applications. This section explains how to configure the qBittorrent settings in your `config.json` file.
|
||||||
|
|
||||||
|
## Basic Configuration
|
||||||
|
|
||||||
|
The qBittorrent functionality is configured under the `qbittorrent` key:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"qbittorrent": {
|
||||||
|
"port": "8282",
|
||||||
|
"download_folder": "/mnt/symlinks/",
|
||||||
|
"categories": ["sonarr", "radarr", "lidarr"],
|
||||||
|
"refresh_interval": 5
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
#### Essential Settings
|
||||||
|
|
||||||
|
- `port`: The port on which the qBittorrent API will listen (default: 8282)
|
||||||
|
- `download_folder`: The folder where symlinks or downloaded files will be placed
|
||||||
|
- `categories`: An array of categories to organize downloads (usually matches your Arr applications)
|
||||||
|
|
||||||
|
#### Advanced Settings
|
||||||
|
|
||||||
|
- `refresh_interval`: How often (in seconds) to refresh the Arrs Monitored Downloads (default: 5)
|
||||||
|
|
||||||
|
#### Categories
|
||||||
|
Categories help organize your downloads and match them to specific Arr applications. Typically, you'll want to configure categories that match your Sonarr, Radarr, or other Arr applications:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"categories": ["sonarr", "radarr", "lidarr", "readarr"]
|
||||||
|
```
|
||||||
|
|
||||||
|
When setting up your Arr applications to connect to DecyphArr, you'll specify these same category names.
|
||||||
|
|
||||||
|
#### Download Folder
|
||||||
|
|
||||||
|
The `download_folder` setting specifies where DecyphArr will place downloaded files or create symlinks:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"download_folder": "/mnt/symlinks/"
|
||||||
|
```
|
||||||
|
|
||||||
|
This folder should be:
|
||||||
|
|
||||||
|
- Accessible to DecyphArr
|
||||||
|
- Accessible to your Arr applications
|
||||||
|
- Have sufficient space if downloading files locally
|
||||||
|
|
||||||
|
|
||||||
|
#### Port Configuration
|
||||||
|
The `port` setting determines which port the qBittorrent API will listen on:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"port": "8282"
|
||||||
|
```
|
||||||
|
|
||||||
|
Ensure this port:
|
||||||
|
|
||||||
|
- Is not used by other applications
|
||||||
|
- Is accessible to your Arr applications
|
||||||
|
- Is properly exposed if using Docker (see the Docker Compose example in the Installation guide)
|
||||||
|
|
||||||
|
#### Refresh Interval
|
||||||
|
The refresh_interval setting controls how often DecyphArr checks for updates from your Arr applications:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"refresh_interval": 5
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
This value is in seconds. Lower values provide more responsive updates but may increase CPU usage.
|
||||||
@@ -1,5 +1,23 @@
|
|||||||
{
|
{
|
||||||
"debrids": [
|
"debrids": [
|
||||||
|
{
|
||||||
|
"name": "realdebrid",
|
||||||
|
"host": "https://api.real-debrid.com/rest/1.0",
|
||||||
|
"api_key": "realdebrid_key",
|
||||||
|
"folder": "/mnt/remote/realdebrid/__all__/",
|
||||||
|
"download_api_keys": [],
|
||||||
|
"proxy": "",
|
||||||
|
"rate_limit": "250/minute",
|
||||||
|
"download_uncached": false,
|
||||||
|
"check_cached": false,
|
||||||
|
"use_webdav": true,
|
||||||
|
"torrents_refresh_interval": "15s",
|
||||||
|
"folder_naming": "original_no_ext",
|
||||||
|
"auto_expire_links_after": "3d",
|
||||||
|
"rc_url": "http://your-ip-address:9990",
|
||||||
|
"rc_user": "your_rclone_rc_user",
|
||||||
|
"rc_pass": "your_rclone_rc_pass"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "torbox",
|
"name": "torbox",
|
||||||
"host": "https://api.torbox.app/v1",
|
"host": "https://api.torbox.app/v1",
|
||||||
@@ -9,15 +27,6 @@
|
|||||||
"download_uncached": false,
|
"download_uncached": false,
|
||||||
"check_cached": true
|
"check_cached": true
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"name": "realdebrid",
|
|
||||||
"host": "https://api.real-debrid.com/rest/1.0",
|
|
||||||
"api_key": "realdebrid_key",
|
|
||||||
"folder": "/mnt/remote/realdebrid/__all__/",
|
|
||||||
"rate_limit": "250/minute",
|
|
||||||
"download_uncached": false,
|
|
||||||
"check_cached": false
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"name": "debridlink",
|
"name": "debridlink",
|
||||||
"host": "https://debrid-link.com/api/v2",
|
"host": "https://debrid-link.com/api/v2",
|
||||||
@@ -37,46 +46,51 @@
|
|||||||
"check_cached": false
|
"check_cached": false
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"proxy": {
|
|
||||||
"enabled": true,
|
|
||||||
"port": "8100",
|
|
||||||
"log_level": "info",
|
|
||||||
"username": "username",
|
|
||||||
"password": "password",
|
|
||||||
"cached_only": true
|
|
||||||
},
|
|
||||||
"max_cache_size": 1000,
|
"max_cache_size": 1000,
|
||||||
"qbittorrent": {
|
"qbittorrent": {
|
||||||
"port": "8282",
|
"port": "8282",
|
||||||
"download_folder": "/mnt/symlinks/",
|
"download_folder": "/mnt/symlinks/",
|
||||||
"categories": ["sonarr", "radarr"],
|
"categories": ["sonarr", "radarr"],
|
||||||
"refresh_interval": 5,
|
"refresh_interval": 5,
|
||||||
"log_level": "info"
|
"skip_pre_cache": false
|
||||||
},
|
},
|
||||||
"arrs": [
|
"arrs": [
|
||||||
{
|
{
|
||||||
"name": "sonarr",
|
"name": "sonarr",
|
||||||
"host": "http://host:8989",
|
"host": "http://radarr:8989",
|
||||||
"token": "arr_key",
|
"token": "arr_key",
|
||||||
"cleanup": false
|
"cleanup": true,
|
||||||
|
"skip_repair": true,
|
||||||
|
"download_uncached": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "radarr",
|
"name": "radarr",
|
||||||
"host": "http://host:7878",
|
"host": "http://radarr:7878",
|
||||||
"token": "arr_key",
|
"token": "arr_key",
|
||||||
"cleanup": false
|
"cleanup": false,
|
||||||
|
"download_uncached": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "lidarr",
|
||||||
|
"host": "http://lidarr:7878",
|
||||||
|
"token": "arr_key",
|
||||||
|
"cleanup": false,
|
||||||
|
"skip_repair": true,
|
||||||
|
"download_uncached": false
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"repair": {
|
"repair": {
|
||||||
"enabled": false,
|
"enabled": false,
|
||||||
"interval": "12h",
|
"interval": "12h",
|
||||||
"run_on_start": false,
|
"run_on_start": false,
|
||||||
"zurg_url": "http://zurg:9999",
|
"zurg_url": "",
|
||||||
"skip_deletion": false
|
"use_webdav": false,
|
||||||
|
"auto_process": false
|
||||||
},
|
},
|
||||||
"log_level": "info",
|
"log_level": "info",
|
||||||
"min_file_size": "",
|
"min_file_size": "",
|
||||||
"max_file_size": "",
|
"max_file_size": "",
|
||||||
"allowed_file_types": [],
|
"allowed_file_types": [],
|
||||||
"use_auth": false
|
"use_auth": false,
|
||||||
|
"discord_webhook_url": "https://discord.com/api/webhooks/..."
|
||||||
}
|
}
|
||||||
5
docs/docs/extras/rclone.conf
Normal file
5
docs/docs/extras/rclone.conf
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[decypharr]
|
||||||
|
type = webdav
|
||||||
|
url = http://decypharr:8282/webdav/realdebrid
|
||||||
|
vendor = other
|
||||||
|
pacer_min_sleep = 0
|
||||||
40
docs/docs/features/index.md
Normal file
40
docs/docs/features/index.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Features Overview
|
||||||
|
|
||||||
|
DecyphArr extends the functionality of qBittorrent by integrating with Debrid services, providing several powerful features that enhance your media management experience.
|
||||||
|
|
||||||
|
## Core Features
|
||||||
|
|
||||||
|
### Mock qBittorrent API
|
||||||
|
|
||||||
|
DecyphArr implements a complete qBittorrent-compatible API that can be used with Sonarr, Radarr, Lidarr, and other Arr applications. This allows you to:
|
||||||
|
|
||||||
|
- Seamlessly integrate with your existing Arr setup
|
||||||
|
- Use familiar interfaces to manage your downloads
|
||||||
|
- Benefit from Debrid services without changing your workflow
|
||||||
|
|
||||||
|
### Comprehensive UI
|
||||||
|
|
||||||
|
The DecyphArr user interface provides:
|
||||||
|
|
||||||
|
- Torrent management capabilities
|
||||||
|
- Status monitoring
|
||||||
|
- Configuration options
|
||||||
|
- Multiple Debrid provider integration
|
||||||
|
|
||||||
|
## Advanced Features
|
||||||
|
|
||||||
|
DecyphArr includes several advanced features that extend its capabilities:
|
||||||
|
|
||||||
|
- [Repair Worker](repair-worker.md): Identifies and fixes issues with your media files
|
||||||
|
- [WebDAV Server](webdav.md): Provides direct access to your Debrid files
|
||||||
|
|
||||||
|
## Supported Debrid Providers
|
||||||
|
|
||||||
|
DecyphArr supports multiple Debrid providers:
|
||||||
|
|
||||||
|
- Real Debrid
|
||||||
|
- Torbox
|
||||||
|
- Debrid Link
|
||||||
|
- All Debrid
|
||||||
|
|
||||||
|
Each provider can be configured separately, allowing you to use one or multiple services simultaneously.
|
||||||
41
docs/docs/features/repair-worker.md
Normal file
41
docs/docs/features/repair-worker.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# Repair Worker
|
||||||
|
|
||||||
|
The Repair Worker is a powerful feature that helps maintain the health of your media library by scanning for and fixing issues with files.
|
||||||
|
|
||||||
|
## What It Does
|
||||||
|
|
||||||
|
The Repair Worker performs the following tasks:
|
||||||
|
|
||||||
|
- Searches for broken symlinks or file references
|
||||||
|
- Identifies missing files in your library
|
||||||
|
- Locates deleted or unreadable files
|
||||||
|
- Automatically repairs issues when possible
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
To enable and configure the Repair Worker, add the following to your `config.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"repair": {
|
||||||
|
"enabled": true,
|
||||||
|
"interval": "12h",
|
||||||
|
"run_on_start": false,
|
||||||
|
"use_webdav": false,
|
||||||
|
"zurg_url": "http://localhost:9999",
|
||||||
|
"auto_process": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
|
||||||
|
- `enabled`: Set to `true` to enable the Repair Worker.
|
||||||
|
- `interval`: The time interval for the Repair Worker to run (e.g., `12h`, `1d`).
|
||||||
|
- `run_on_start`: If set to `true`, the Repair Worker will run immediately after DecyphArr starts.
|
||||||
|
- `use_webdav`: If set to `true`, the Repair Worker will use WebDAV for file operations.
|
||||||
|
- `zurg_url`: The URL for the Zurg service (if using).
|
||||||
|
- `auto_process`: If set to `true`, the Repair Worker will automatically process files that it finds issues with.
|
||||||
|
|
||||||
|
|
||||||
|
### Performance Tips
|
||||||
|
- For users of the WebDAV server, enable `use_webdav` for exponentially faster repair processes
|
||||||
|
- If using Zurg, set the `zurg_url` parameter to greatly improve repair speed
|
||||||
60
docs/docs/features/webdav.md
Normal file
60
docs/docs/features/webdav.md
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
# WebDAV Server
|
||||||
|
|
||||||
|
DecyphArr includes a built-in WebDAV server that provides direct access to your Debrid files, making them easily accessible to media players and other applications.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
While most Debrid providers have their own WebDAV servers, DecyphArr's implementation offers faster access and additional features. The WebDAV server listens on port `8080` by default.
|
||||||
|
|
||||||
|
## Accessing the WebDAV Server
|
||||||
|
|
||||||
|
- URL: `http://localhost:8282/webdav` or `http://<your-server-ip>:8080/webdav`
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
You can configure WebDAV settings either globally or per-Debrid provider in your `config.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"webdav": {
|
||||||
|
"torrents_refresh_interval": "15s",
|
||||||
|
"download_links_refresh_interval": "40m",
|
||||||
|
"folder_naming": "original_no_ext",
|
||||||
|
"auto_expire_links_after": "3d",
|
||||||
|
"rc_url": "http://localhost:5572",
|
||||||
|
"rc_user": "username",
|
||||||
|
"rc_pass": "password"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
|
||||||
|
- `torrents_refresh_interval`: Interval for refreshing torrent data (e.g., `15s`, `1m`, `1h`).
|
||||||
|
- `download_links_refresh_interval`: Interval for refreshing download links (e.g., `40m`, `1h`).
|
||||||
|
- `workers`: Number of concurrent workers for processing requests.
|
||||||
|
- folder_naming: Naming convention for folders:
|
||||||
|
- `original_no_ext`: Original file name without extension
|
||||||
|
- `original`: Original file name with extension
|
||||||
|
- `filename`: Torrent filename
|
||||||
|
- `filename_no_ext`: Torrent filename without extension
|
||||||
|
- `id`: Torrent ID
|
||||||
|
- `auto_expire_links_after`: Time after which download links will expire (e.g., `3d`, `1w`).
|
||||||
|
- `rc_url`, `rc_user`, `rc_pass`: Rclone RC configuration for VFS refreshes
|
||||||
|
|
||||||
|
### Using with Media Players
|
||||||
|
The WebDAV server works well with media players like:
|
||||||
|
|
||||||
|
- Infuse
|
||||||
|
- VidHub
|
||||||
|
- Plex (via mounting)
|
||||||
|
- Kodi
|
||||||
|
|
||||||
|
### Mounting with Rclone
|
||||||
|
You can mount the WebDAV server locally using Rclone. Example configuration:
|
||||||
|
|
||||||
|
```conf
|
||||||
|
[decypharr]
|
||||||
|
type = webdav
|
||||||
|
url = http://localhost:8080/webdav/realdebrid
|
||||||
|
vendor = other
|
||||||
|
```
|
||||||
|
For a complete Rclone configuration example, see our [sample rclone.conf](../extras/rclone.conf).
|
||||||
BIN
docs/docs/images/logo.png
Normal file
BIN
docs/docs/images/logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.2 MiB |
BIN
docs/docs/images/main.png
Normal file
BIN
docs/docs/images/main.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 188 KiB |
BIN
docs/docs/images/sonarr-setup.png
Normal file
BIN
docs/docs/images/sonarr-setup.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 264 KiB |
28
docs/docs/index.md
Normal file
28
docs/docs/index.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# DecyphArr
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**DecyphArr** is an implementation of QbitTorrent with **Multiple Debrid service support**, written in Go.
|
||||||
|
|
||||||
|
## What is DecyphArr?
|
||||||
|
|
||||||
|
DecyphArr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications while leveraging the capabilities of Debrid providers.
|
||||||
|
|
||||||
|
## Key Features
|
||||||
|
|
||||||
|
- 🔄 Mock Qbittorent API that supports Sonarr, Radarr, Lidarr and other Arr applications
|
||||||
|
- 🖥️ Full-fledged UI for managing torrents
|
||||||
|
- 🔌 Multiple Debrid providers support
|
||||||
|
- 📁 WebDAV server support for each Debrid provider
|
||||||
|
- 🔧 Repair Worker for missing files
|
||||||
|
|
||||||
|
## Supported Debrid Providers
|
||||||
|
|
||||||
|
- [Real Debrid](https://real-debrid.com)
|
||||||
|
- [Torbox](https://torbox.app)
|
||||||
|
- [Debrid Link](https://debrid-link.com)
|
||||||
|
- [All Debrid](https://alldebrid.com)
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
Check out our [Installation Guide](installation.md) to get started with DecyphArr.
|
||||||
71
docs/docs/installation.md
Normal file
71
docs/docs/installation.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Installation
|
||||||
|
|
||||||
|
There are multiple ways to install and run DecyphArr. Choose the method that works best for your setup.
|
||||||
|
|
||||||
|
## Docker Installation (Recommended)
|
||||||
|
|
||||||
|
Docker is the easiest way to get started with DecyphArr.
|
||||||
|
|
||||||
|
### Available Docker Registries
|
||||||
|
|
||||||
|
You can use either Docker Hub or GitHub Container Registry to pull the image:
|
||||||
|
|
||||||
|
- Docker Hub: `cy01/blackhole:latest`
|
||||||
|
- GitHub Container Registry: `ghcr.io/sirrobot01/decypharr:latest`
|
||||||
|
|
||||||
|
### Docker Tags
|
||||||
|
|
||||||
|
- `latest`: The latest stable release
|
||||||
|
- `beta`: The latest beta release
|
||||||
|
- `vX.Y.Z`: A specific version (e.g., `v0.1.0`)
|
||||||
|
- `nightly`: The latest nightly build (usually unstable)
|
||||||
|
- `experimental`: The latest experimental build (highly unstable)
|
||||||
|
|
||||||
|
### Docker Compose Setup
|
||||||
|
|
||||||
|
Create a `docker-compose.yml` file with the following content:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: '3.7'
|
||||||
|
services:
|
||||||
|
decypharr:
|
||||||
|
image: cy01/blackhole:latest # or cy01/blackhole:beta
|
||||||
|
container_name: decypharr
|
||||||
|
ports:
|
||||||
|
- "8282:8282" # qBittorrent
|
||||||
|
- "8181:8181" # Proxy
|
||||||
|
user: "1000:1000"
|
||||||
|
volumes:
|
||||||
|
- /mnt/:/mnt
|
||||||
|
- ./configs/:/app # config.json must be in this directory
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- UMASK=002
|
||||||
|
- QBIT_PORT=8282 # qBittorrent Port (optional)
|
||||||
|
- PORT=8181 # Proxy Port (optional)
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
- rclone # If you are using rclone with docker
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the Docker Compose setup:
|
||||||
|
```bash
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Binary Installation
|
||||||
|
If you prefer not to use Docker, you can download and run the binary directly.
|
||||||
|
|
||||||
|
Download the binary from the releases page
|
||||||
|
Create a configuration file (see Configuration)
|
||||||
|
Run the binary:
|
||||||
|
```bash
|
||||||
|
chmod +x decypharr
|
||||||
|
./decypharr --config /path/to/config
|
||||||
|
```
|
||||||
|
|
||||||
|
The config directory should contain your config.json file.
|
||||||
39
docs/docs/usage.md
Normal file
39
docs/docs/usage.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# Usage Guide
|
||||||
|
|
||||||
|
This guide will help you get started with DecyphArr after installation.
|
||||||
|
|
||||||
|
## Basic Setup
|
||||||
|
|
||||||
|
1. Create your `config.json` file (see [Configuration](configuration/index.md) for details)
|
||||||
|
2. Start the DecyphArr service using Docker or binary
|
||||||
|
3. Access the UI at `http://localhost:8282` (or your configured host/port)
|
||||||
|
4. Connect your Arr applications (Sonarr, Radarr, etc.)
|
||||||
|
|
||||||
|
## Connecting to Sonarr/Radarr
|
||||||
|
|
||||||
|
To connect DecyphArr to your Sonarr or Radarr instance:
|
||||||
|
|
||||||
|
1. In Sonarr/Radarr, go to **Settings → Download Client → Add Client → qBittorrent**
|
||||||
|
2. Configure the following settings:
|
||||||
|
- **Host**: `localhost` (or the IP of your DecyphArr server)
|
||||||
|
- **Port**: `8282` (or your configured qBittorrent port)
|
||||||
|
- **Username**: `http://sonarr:8989` (your Arr host with http/https)
|
||||||
|
- **Password**: `sonarr_token` (your Arr API token)
|
||||||
|
- **Category**: e.g., `sonarr`, `radarr` (match what you configured in DecyphArr)
|
||||||
|
- **Use SSL**: `No`
|
||||||
|
- **Sequential Download**: `No` or `Yes` (if you want to download torrents locally instead of symlink)
|
||||||
|
3. Click **Test** to verify the connection
|
||||||
|
4. Click **Save** to add the download client
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Using the UI
|
||||||
|
|
||||||
|
The DecyphArr UI provides a familiar qBittorrent-like interface with additional features for Debrid services:
|
||||||
|
|
||||||
|
- View and manage all your torrents
|
||||||
|
- Monitor download status
|
||||||
|
- Check cache status across different Debrid providers
|
||||||
|
- Access WebDAV functionality
|
||||||
|
|
||||||
|
Access the UI at `http://localhost:8282` or your configured host/port.
|
||||||
77
docs/mkdocs.yml
Normal file
77
docs/mkdocs.yml
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
site_name: Decypharr
|
||||||
|
site_url: https://sirrobot01.github.io/decypharr
|
||||||
|
site_description: QbitTorrent with Debrid Support
|
||||||
|
repo_url: https://github.com/sirrobot01/decypharr
|
||||||
|
repo_name: sirrobot01/decypharr
|
||||||
|
edit_uri: blob/main/docs
|
||||||
|
|
||||||
|
|
||||||
|
theme:
|
||||||
|
name: material
|
||||||
|
logo: images/logo.png
|
||||||
|
font:
|
||||||
|
text: Roboto
|
||||||
|
code: Roboto Mono
|
||||||
|
palette:
|
||||||
|
- media: "(prefers-color-scheme: light)"
|
||||||
|
scheme: default
|
||||||
|
primary: indigo
|
||||||
|
accent: indigo
|
||||||
|
toggle:
|
||||||
|
icon: material/weather-night
|
||||||
|
name: Switch to dark mode
|
||||||
|
- media: "(prefers-color-scheme: dark)"
|
||||||
|
scheme: slate
|
||||||
|
primary: indigo
|
||||||
|
accent: indigo
|
||||||
|
toggle:
|
||||||
|
icon: material/weather-sunny
|
||||||
|
name: Switch to light mode
|
||||||
|
features:
|
||||||
|
- navigation.search.highlight
|
||||||
|
- navigation.search.suggest
|
||||||
|
- navigation.search.share
|
||||||
|
- navigation.search.suggest
|
||||||
|
- navigation.search.share
|
||||||
|
- navigation.search.highlight
|
||||||
|
- navigation.search.suggest
|
||||||
|
- navigation.search.share
|
||||||
|
icon:
|
||||||
|
repo: fontawesome/brands/github
|
||||||
|
|
||||||
|
markdown_extensions:
|
||||||
|
- admonition
|
||||||
|
- pymdownx.details
|
||||||
|
- pymdownx.superfences
|
||||||
|
- pymdownx.highlight
|
||||||
|
- pymdownx.inlinehilite
|
||||||
|
- pymdownx.tabbed
|
||||||
|
- pymdownx.emoji:
|
||||||
|
emoji_index: !!python/name:material.extensions.emoji.twemoji
|
||||||
|
emoji_generator: !!python/name:materialx.emoji.to_svg
|
||||||
|
- attr_list
|
||||||
|
- md_in_html
|
||||||
|
- def_list
|
||||||
|
- toc:
|
||||||
|
permalink: true
|
||||||
|
|
||||||
|
nav:
|
||||||
|
- Home: index.md
|
||||||
|
- Installation: installation.md
|
||||||
|
- Usage: usage.md
|
||||||
|
- Configuration:
|
||||||
|
- Overview: configuration/index.md
|
||||||
|
- General: configuration/general.md
|
||||||
|
- Debrid Providers: configuration/debrid.md
|
||||||
|
- qBittorrent: configuration/qbittorrent.md
|
||||||
|
- Arr Integration: configuration/arrs.md
|
||||||
|
- Features:
|
||||||
|
- Overview: features/index.md
|
||||||
|
- Repair Worker: features/repair-worker.md
|
||||||
|
- WebDAV: features/webdav.md
|
||||||
|
- Changelog: changelog.md
|
||||||
|
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- search
|
||||||
|
- tags
|
||||||
12
go.mod
12
go.mod
@@ -1,21 +1,25 @@
|
|||||||
module github.com/sirrobot01/debrid-blackhole
|
module github.com/sirrobot01/decypharr
|
||||||
|
|
||||||
go 1.23
|
go 1.23.0
|
||||||
|
|
||||||
toolchain go1.23.2
|
toolchain go1.23.2
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/anacrolix/torrent v1.55.0
|
github.com/anacrolix/torrent v1.55.0
|
||||||
|
github.com/beevik/etree v1.5.0
|
||||||
github.com/cavaliergopher/grab/v3 v3.0.1
|
github.com/cavaliergopher/grab/v3 v3.0.1
|
||||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380
|
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380
|
||||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2
|
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2
|
||||||
github.com/go-chi/chi/v5 v5.1.0
|
github.com/go-chi/chi/v5 v5.1.0
|
||||||
|
github.com/goccy/go-json v0.10.5
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/sessions v1.4.0
|
github.com/gorilla/sessions v1.4.0
|
||||||
|
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||||
github.com/rs/zerolog v1.33.0
|
github.com/rs/zerolog v1.33.0
|
||||||
github.com/valyala/fastjson v1.6.4
|
github.com/valyala/fastjson v1.6.4
|
||||||
golang.org/x/crypto v0.33.0
|
golang.org/x/crypto v0.33.0
|
||||||
golang.org/x/net v0.33.0
|
golang.org/x/net v0.35.0
|
||||||
|
golang.org/x/sync v0.12.0
|
||||||
golang.org/x/time v0.8.0
|
golang.org/x/time v0.8.0
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||||
)
|
)
|
||||||
@@ -31,7 +35,7 @@ require (
|
|||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||||
github.com/stretchr/testify v1.10.0 // indirect
|
github.com/stretchr/testify v1.10.0 // indirect
|
||||||
golang.org/x/sys v0.30.0 // indirect
|
golang.org/x/sys v0.30.0 // indirect
|
||||||
golang.org/x/text v0.22.0 // indirect
|
golang.org/x/text v0.22.0 // indirect
|
||||||
|
|||||||
16
go.sum
16
go.sum
@@ -36,6 +36,8 @@ github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CM
|
|||||||
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
|
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
|
||||||
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
|
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
|
||||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||||
|
github.com/beevik/etree v1.5.0 h1:iaQZFSDS+3kYZiGoc9uKeOkUY3nYMXOKLl6KIJxiJWs=
|
||||||
|
github.com/beevik/etree v1.5.0/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs=
|
||||||
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
@@ -79,6 +81,8 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
|||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||||
|
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
@@ -183,10 +187,12 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
|
|||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||||
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
|
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||||
|
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
|
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
|
||||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||||
@@ -234,8 +240,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
|
|||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
||||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@@ -243,6 +249,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||||
|
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
|||||||
90
internal/cache/cache.go
vendored
90
internal/cache/cache.go
vendored
@@ -1,90 +0,0 @@
|
|||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Cache struct {
|
|
||||||
data map[string]struct{}
|
|
||||||
order []string
|
|
||||||
maxItems int
|
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(maxItems int) *Cache {
|
|
||||||
if maxItems <= 0 {
|
|
||||||
maxItems = 1000
|
|
||||||
}
|
|
||||||
return &Cache{
|
|
||||||
data: make(map[string]struct{}, maxItems),
|
|
||||||
order: make([]string, 0, maxItems),
|
|
||||||
maxItems: maxItems,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) Add(value string) {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
if _, exists := c.data[value]; !exists {
|
|
||||||
if len(c.order) >= c.maxItems {
|
|
||||||
delete(c.data, c.order[0])
|
|
||||||
c.order = c.order[1:]
|
|
||||||
}
|
|
||||||
c.data[value] = struct{}{}
|
|
||||||
c.order = append(c.order, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) AddMultiple(values map[string]bool) {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
for value, exists := range values {
|
|
||||||
if !exists {
|
|
||||||
if _, exists := c.data[value]; !exists {
|
|
||||||
if len(c.order) >= c.maxItems {
|
|
||||||
delete(c.data, c.order[0])
|
|
||||||
c.order = c.order[1:]
|
|
||||||
}
|
|
||||||
c.data[value] = struct{}{}
|
|
||||||
c.order = append(c.order, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) Get(index int) (string, bool) {
|
|
||||||
c.mu.RLock()
|
|
||||||
defer c.mu.RUnlock()
|
|
||||||
if index < 0 || index >= len(c.order) {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
return c.order[index], true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) GetMultiple(values []string) map[string]bool {
|
|
||||||
c.mu.RLock()
|
|
||||||
defer c.mu.RUnlock()
|
|
||||||
|
|
||||||
result := make(map[string]bool, len(values))
|
|
||||||
for _, value := range values {
|
|
||||||
if _, exists := c.data[value]; exists {
|
|
||||||
result[value] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) Exists(value string) bool {
|
|
||||||
c.mu.RLock()
|
|
||||||
defer c.mu.RUnlock()
|
|
||||||
_, exists := c.data[value]
|
|
||||||
return exists
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) Len() int {
|
|
||||||
c.mu.RLock()
|
|
||||||
defer c.mu.RUnlock()
|
|
||||||
return len(c.order)
|
|
||||||
}
|
|
||||||
@@ -1,9 +1,10 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"cmp"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -19,29 +20,25 @@ type Debrid struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
APIKey string `json:"api_key"`
|
APIKey string `json:"api_key"`
|
||||||
|
DownloadAPIKeys []string `json:"download_api_keys"`
|
||||||
Folder string `json:"folder"`
|
Folder string `json:"folder"`
|
||||||
DownloadUncached bool `json:"download_uncached"`
|
DownloadUncached bool `json:"download_uncached"`
|
||||||
CheckCached bool `json:"check_cached"`
|
CheckCached bool `json:"check_cached"`
|
||||||
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
|
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
|
||||||
}
|
Proxy string `json:"proxy"`
|
||||||
|
|
||||||
type Proxy struct {
|
UseWebDav bool `json:"use_webdav"`
|
||||||
Port string `json:"port"`
|
WebDav
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
LogLevel string `json:"log_level"`
|
|
||||||
Username string `json:"username"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
CachedOnly bool `json:"cached_only"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type QBitTorrent struct {
|
type QBitTorrent struct {
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
Port string `json:"port"`
|
Port string `json:"port"`
|
||||||
LogLevel string `json:"log_level"`
|
|
||||||
DownloadFolder string `json:"download_folder"`
|
DownloadFolder string `json:"download_folder"`
|
||||||
Categories []string `json:"categories"`
|
Categories []string `json:"categories"`
|
||||||
RefreshInterval int `json:"refresh_interval"`
|
RefreshInterval int `json:"refresh_interval"`
|
||||||
|
SkipPreCache bool `json:"skip_pre_cache"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Arr struct {
|
type Arr struct {
|
||||||
@@ -49,6 +46,8 @@ type Arr struct {
|
|||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
Cleanup bool `json:"cleanup"`
|
Cleanup bool `json:"cleanup"`
|
||||||
|
SkipRepair bool `json:"skip_repair"`
|
||||||
|
DownloadUncached *bool `json:"download_uncached"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Repair struct {
|
type Repair struct {
|
||||||
@@ -56,7 +55,9 @@ type Repair struct {
|
|||||||
Interval string `json:"interval"`
|
Interval string `json:"interval"`
|
||||||
RunOnStart bool `json:"run_on_start"`
|
RunOnStart bool `json:"run_on_start"`
|
||||||
ZurgURL string `json:"zurg_url"`
|
ZurgURL string `json:"zurg_url"`
|
||||||
SkipDeletion bool `json:"skip_deletion"`
|
AutoProcess bool `json:"auto_process"`
|
||||||
|
UseWebDav bool `json:"use_webdav"`
|
||||||
|
Workers int `json:"workers"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Auth struct {
|
type Auth struct {
|
||||||
@@ -64,21 +65,36 @@ type Auth struct {
|
|||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type WebDav struct {
|
||||||
|
TorrentsRefreshInterval string `json:"torrents_refresh_interval"`
|
||||||
|
DownloadLinksRefreshInterval string `json:"download_links_refresh_interval"`
|
||||||
|
Workers int `json:"workers"`
|
||||||
|
AutoExpireLinksAfter string `json:"auto_expire_links_after"`
|
||||||
|
|
||||||
|
// Folder
|
||||||
|
FolderNaming string `json:"folder_naming"`
|
||||||
|
|
||||||
|
// Rclone
|
||||||
|
RcUrl string `json:"rc_url"`
|
||||||
|
RcUser string `json:"rc_user"`
|
||||||
|
RcPass string `json:"rc_pass"`
|
||||||
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
LogLevel string `json:"log_level"`
|
LogLevel string `json:"log_level"`
|
||||||
Debrid Debrid `json:"debrid"`
|
|
||||||
Debrids []Debrid `json:"debrids"`
|
Debrids []Debrid `json:"debrids"`
|
||||||
Proxy Proxy `json:"proxy"`
|
|
||||||
MaxCacheSize int `json:"max_cache_size"`
|
MaxCacheSize int `json:"max_cache_size"`
|
||||||
QBitTorrent QBitTorrent `json:"qbittorrent"`
|
QBitTorrent QBitTorrent `json:"qbittorrent"`
|
||||||
Arrs []Arr `json:"arrs"`
|
Arrs []Arr `json:"arrs"`
|
||||||
Repair Repair `json:"repair"`
|
Repair Repair `json:"repair"`
|
||||||
|
WebDav WebDav `json:"webdav"`
|
||||||
AllowedExt []string `json:"allowed_file_types"`
|
AllowedExt []string `json:"allowed_file_types"`
|
||||||
MinFileSize string `json:"min_file_size"` // Minimum file size to download, 10MB, 1GB, etc
|
MinFileSize string `json:"min_file_size"` // Minimum file size to download, 10MB, 1GB, etc
|
||||||
MaxFileSize string `json:"max_file_size"` // Maximum file size to download (0 means no limit)
|
MaxFileSize string `json:"max_file_size"` // Maximum file size to download (0 means no limit)
|
||||||
Path string `json:"-"` // Path to save the config file
|
Path string `json:"-"` // Path to save the config file
|
||||||
UseAuth bool `json:"use_auth"`
|
UseAuth bool `json:"use_auth"`
|
||||||
Auth *Auth `json:"-"`
|
Auth *Auth `json:"-"`
|
||||||
|
DiscordWebhook string `json:"discord_webhook_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) JsonFile() string {
|
func (c *Config) JsonFile() string {
|
||||||
@@ -103,8 +119,8 @@ func (c *Config) loadConfig() error {
|
|||||||
return fmt.Errorf("error unmarshaling config: %w", err)
|
return fmt.Errorf("error unmarshaling config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Debrid.Name != "" {
|
for i, debrid := range c.Debrids {
|
||||||
c.Debrids = append(c.Debrids, c.Debrid)
|
c.Debrids[i] = c.updateDebrid(debrid)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(c.AllowedExt) == 0 {
|
if len(c.AllowedExt) == 0 {
|
||||||
@@ -115,9 +131,9 @@ func (c *Config) loadConfig() error {
|
|||||||
c.Auth = c.GetAuth()
|
c.Auth = c.GetAuth()
|
||||||
|
|
||||||
//Validate the config
|
//Validate the config
|
||||||
//if err := validateConfig(c); err != nil {
|
if err := validateConfig(c); err != nil {
|
||||||
// return err
|
return err
|
||||||
//}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -142,14 +158,14 @@ func validateDebrids(debrids []Debrid) error {
|
|||||||
return errors.New("debrid folder is required")
|
return errors.New("debrid folder is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check folder existence concurrently
|
// Check folder existence
|
||||||
wg.Add(1)
|
//wg.Add(1)
|
||||||
go func(folder string) {
|
//go func(folder string) {
|
||||||
defer wg.Done()
|
// defer wg.Done()
|
||||||
if _, err := os.Stat(folder); os.IsNotExist(err) {
|
// if _, err := os.Stat(folder); os.IsNotExist(err) {
|
||||||
errChan <- fmt.Errorf("debrid folder does not exist: %s", folder)
|
// errChan <- fmt.Errorf("debrid folder does not exist: %s", folder)
|
||||||
}
|
// }
|
||||||
}(debrid.Folder)
|
//}(debrid.Folder)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for all checks to complete
|
// Wait for all checks to complete
|
||||||
@@ -166,33 +182,21 @@ func validateDebrids(debrids []Debrid) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateQbitTorrent(config *QBitTorrent) error {
|
//func validateQbitTorrent(config *QBitTorrent) error {
|
||||||
if config.DownloadFolder == "" {
|
// if config.DownloadFolder == "" {
|
||||||
return errors.New("qbittorent download folder is required")
|
// return errors.New("qbittorent download folder is required")
|
||||||
}
|
// }
|
||||||
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
|
// if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
|
||||||
return errors.New("qbittorent download folder does not exist")
|
// return fmt.Errorf("qbittorent download folder(%s) does not exist", config.DownloadFolder)
|
||||||
}
|
// }
|
||||||
return nil
|
// return nil
|
||||||
}
|
//}
|
||||||
|
|
||||||
func validateConfig(config *Config) error {
|
func validateConfig(config *Config) error {
|
||||||
// Run validations concurrently
|
// Run validations concurrently
|
||||||
errChan := make(chan error, 2)
|
|
||||||
|
|
||||||
go func() {
|
if err := validateDebrids(config.Debrids); err != nil {
|
||||||
errChan <- validateDebrids(config.Debrids)
|
return fmt.Errorf("debrids validation error: %w", err)
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
errChan <- validateQbitTorrent(&config.QBitTorrent)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Check for errors
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
if err := <-errChan; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -203,14 +207,11 @@ func SetConfigPath(path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetConfig() *Config {
|
func Get() *Config {
|
||||||
once.Do(func() {
|
once.Do(func() {
|
||||||
instance = &Config{} // Initialize instance first
|
instance = &Config{} // Initialize instance first
|
||||||
if err := instance.loadConfig(); err != nil {
|
if err := instance.loadConfig(); err != nil {
|
||||||
_, err := fmt.Fprintf(os.Stderr, "configuration Error: %v\n", err)
|
fmt.Fprintf(os.Stderr, "configuration Error: %v\n", err)
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -285,3 +286,31 @@ func (c *Config) NeedsSetup() bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Config) updateDebrid(d Debrid) Debrid {
|
||||||
|
|
||||||
|
if len(d.DownloadAPIKeys) == 0 {
|
||||||
|
d.DownloadAPIKeys = append(d.DownloadAPIKeys, d.APIKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !d.UseWebDav {
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.TorrentsRefreshInterval == "" {
|
||||||
|
d.TorrentsRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "15s") // 15 seconds
|
||||||
|
}
|
||||||
|
if d.WebDav.DownloadLinksRefreshInterval == "" {
|
||||||
|
d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes
|
||||||
|
}
|
||||||
|
if d.Workers == 0 {
|
||||||
|
d.Workers = cmp.Or(c.WebDav.Workers, 30) // 30 workers
|
||||||
|
}
|
||||||
|
if d.FolderNaming == "" {
|
||||||
|
d.FolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext")
|
||||||
|
}
|
||||||
|
if d.AutoExpireLinksAfter == "" {
|
||||||
|
d.AutoExpireLinksAfter = cmp.Or(c.WebDav.AutoExpireLinksAfter, "3d") // 2 days
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|||||||
@@ -24,8 +24,8 @@ func (c *Config) IsAllowedFile(filename string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getDefaultExtensions() []string {
|
func getDefaultExtensions() []string {
|
||||||
videoExts := strings.Split("YUV,WMV,WEBM,VOB,VIV,SVI,ROQ,RMVB,RM,OGV,OGG,NSV,MXF,MPG,MPEG,M2V,MP2,MPE,MPV,MP4,M4P,M4V,MOV,QT,MNG,MKV,FLV,DRC,AVI,ASF,AMV,MKA,F4V,3GP,3G2,DIVX,X264,X265", ",")
|
videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,img,iso,vob,mkv,mk3d,ts,wtv,m2ts'", ",")
|
||||||
musicExts := strings.Split("MP3,WAV,FLAC,AAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",")
|
musicExts := strings.Split("MP3,WAV,FLAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",")
|
||||||
|
|
||||||
// Combine both slices
|
// Combine both slices
|
||||||
allExts := append(videoExts, musicExts...)
|
allExts := append(videoExts, musicExts...)
|
||||||
@@ -36,12 +36,12 @@ func getDefaultExtensions() []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove duplicates
|
// Remove duplicates
|
||||||
seen := make(map[string]bool)
|
seen := make(map[string]struct{})
|
||||||
var unique []string
|
var unique []string
|
||||||
|
|
||||||
for _, ext := range allExts {
|
for _, ext := range allExts {
|
||||||
if !seen[ext] {
|
if _, ok := seen[ext]; !ok {
|
||||||
seen[ext] = true
|
seen[ext] = struct{}{}
|
||||||
unique = append(unique, ext)
|
unique = append(unique, ext)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package logger
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"gopkg.in/natefinch/lumberjack.v2"
|
"gopkg.in/natefinch/lumberjack.v2"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -17,7 +17,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func GetLogPath() string {
|
func GetLogPath() string {
|
||||||
cfg := config.GetConfig()
|
cfg := config.Get()
|
||||||
logsDir := filepath.Join(cfg.Path, "logs")
|
logsDir := filepath.Join(cfg.Path, "logs")
|
||||||
|
|
||||||
if _, err := os.Stat(logsDir); os.IsNotExist(err) {
|
if _, err := os.Stat(logsDir); os.IsNotExist(err) {
|
||||||
@@ -29,18 +29,19 @@ func GetLogPath() string {
|
|||||||
return filepath.Join(logsDir, "decypharr.log")
|
return filepath.Join(logsDir, "decypharr.log")
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLogger(prefix string, level string, output *os.File) zerolog.Logger {
|
func New(prefix string) zerolog.Logger {
|
||||||
|
|
||||||
|
level := config.Get().LogLevel
|
||||||
|
|
||||||
rotatingLogFile := &lumberjack.Logger{
|
rotatingLogFile := &lumberjack.Logger{
|
||||||
Filename: GetLogPath(),
|
Filename: GetLogPath(),
|
||||||
MaxSize: 2,
|
MaxSize: 10,
|
||||||
MaxBackups: 2,
|
MaxAge: 15,
|
||||||
MaxAge: 28,
|
|
||||||
Compress: true,
|
Compress: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
consoleWriter := zerolog.ConsoleWriter{
|
consoleWriter := zerolog.ConsoleWriter{
|
||||||
Out: output,
|
Out: os.Stdout,
|
||||||
TimeFormat: "2006-01-02 15:04:05",
|
TimeFormat: "2006-01-02 15:04:05",
|
||||||
NoColor: false, // Set to true if you don't want colors
|
NoColor: false, // Set to true if you don't want colors
|
||||||
FormatLevel: func(i interface{}) string {
|
FormatLevel: func(i interface{}) string {
|
||||||
@@ -72,6 +73,7 @@ func NewLogger(prefix string, level string, output *os.File) zerolog.Logger {
|
|||||||
Level(zerolog.InfoLevel)
|
Level(zerolog.InfoLevel)
|
||||||
|
|
||||||
// Set the log level
|
// Set the log level
|
||||||
|
level = strings.ToLower(level)
|
||||||
switch level {
|
switch level {
|
||||||
case "debug":
|
case "debug":
|
||||||
logger = logger.Level(zerolog.DebugLevel)
|
logger = logger.Level(zerolog.DebugLevel)
|
||||||
@@ -81,13 +83,15 @@ func NewLogger(prefix string, level string, output *os.File) zerolog.Logger {
|
|||||||
logger = logger.Level(zerolog.WarnLevel)
|
logger = logger.Level(zerolog.WarnLevel)
|
||||||
case "error":
|
case "error":
|
||||||
logger = logger.Level(zerolog.ErrorLevel)
|
logger = logger.Level(zerolog.ErrorLevel)
|
||||||
|
case "trace":
|
||||||
|
logger = logger.Level(zerolog.TraceLevel)
|
||||||
}
|
}
|
||||||
return logger
|
return logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetLogger(level string) zerolog.Logger {
|
func GetDefaultLogger() zerolog.Logger {
|
||||||
once.Do(func() {
|
once.Do(func() {
|
||||||
logger = NewLogger("decypharr", level, os.Stdout)
|
logger = New("decypharr")
|
||||||
})
|
})
|
||||||
return logger
|
return logger
|
||||||
}
|
}
|
||||||
|
|||||||
100
internal/request/discord.go
Normal file
100
internal/request/discord.go
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DiscordEmbed struct {
|
||||||
|
Title string `json:"title"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
Color int `json:"color"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DiscordWebhook struct {
|
||||||
|
Embeds []DiscordEmbed `json:"embeds"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDiscordColor(status string) int {
|
||||||
|
switch status {
|
||||||
|
case "success":
|
||||||
|
return 3066993
|
||||||
|
case "error":
|
||||||
|
return 15158332
|
||||||
|
case "warning":
|
||||||
|
return 15844367
|
||||||
|
case "pending":
|
||||||
|
return 3447003
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDiscordHeader(event string) string {
|
||||||
|
switch event {
|
||||||
|
case "download_complete":
|
||||||
|
return "[Decypharr] Download Completed"
|
||||||
|
case "download_failed":
|
||||||
|
return "[Decypharr] Download Failed"
|
||||||
|
case "repair_pending":
|
||||||
|
return "[Decypharr] Repair Completed, Awaiting action"
|
||||||
|
case "repair_complete":
|
||||||
|
return "[Decypharr] Repair Complete"
|
||||||
|
default:
|
||||||
|
// split the event string and capitalize the first letter of each word
|
||||||
|
evs := strings.Split(event, "_")
|
||||||
|
for i, ev := range evs {
|
||||||
|
evs[i] = strings.ToTitle(ev)
|
||||||
|
}
|
||||||
|
return "[Decypharr] %s" + strings.Join(evs, " ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SendDiscordMessage(event string, status string, message string) error {
|
||||||
|
cfg := config.Get()
|
||||||
|
webhookURL := cfg.DiscordWebhook
|
||||||
|
if webhookURL == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the proper Discord webhook structure
|
||||||
|
|
||||||
|
webhook := DiscordWebhook{
|
||||||
|
Embeds: []DiscordEmbed{
|
||||||
|
{
|
||||||
|
Title: getDiscordHeader(event),
|
||||||
|
Description: message,
|
||||||
|
Color: getDiscordColor(status),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, err := json.Marshal(webhook)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal discord payload: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, webhookURL, bytes.NewReader(payload))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create discord request: %v", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to send discord message: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
return fmt.Errorf("discord returned error status code: %s, body: %s", resp.Status, string(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
29
internal/request/errors.go
Normal file
29
internal/request/errors.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package request
|
||||||
|
|
||||||
|
type HTTPError struct {
|
||||||
|
StatusCode int
|
||||||
|
Message string
|
||||||
|
Code string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *HTTPError) Error() string {
|
||||||
|
return e.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
var HosterUnavailableError = &HTTPError{
|
||||||
|
StatusCode: 503,
|
||||||
|
Message: "Hoster is unavailable",
|
||||||
|
Code: "hoster_unavailable",
|
||||||
|
}
|
||||||
|
|
||||||
|
var TrafficExceededError = &HTTPError{
|
||||||
|
StatusCode: 503,
|
||||||
|
Message: "Traffic exceeded",
|
||||||
|
Code: "traffic_exceeded",
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrLinkBroken = &HTTPError{
|
||||||
|
StatusCode: 404,
|
||||||
|
Message: "File is unavailable",
|
||||||
|
Code: "file_unavailable",
|
||||||
|
}
|
||||||
@@ -1,17 +1,26 @@
|
|||||||
package request
|
package request
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
"golang.org/x/net/proxy"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -34,100 +43,373 @@ func JoinURL(base string, paths ...string) (string, error) {
|
|||||||
return joined, nil
|
return joined, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type RLHTTPClient struct {
|
var (
|
||||||
|
once sync.Once
|
||||||
|
instance *Client
|
||||||
|
)
|
||||||
|
|
||||||
|
type ClientOption func(*Client)
|
||||||
|
|
||||||
|
// Client represents an HTTP client with additional capabilities
|
||||||
|
type Client struct {
|
||||||
client *http.Client
|
client *http.Client
|
||||||
Ratelimiter *rate.Limiter
|
rateLimiter *rate.Limiter
|
||||||
Headers map[string]string
|
headers map[string]string
|
||||||
|
headersMu sync.RWMutex
|
||||||
|
maxRetries int
|
||||||
|
timeout time.Duration
|
||||||
|
skipTLSVerify bool
|
||||||
|
retryableStatus map[int]struct{}
|
||||||
|
logger zerolog.Logger
|
||||||
|
proxy string
|
||||||
|
|
||||||
|
// cooldown
|
||||||
|
statusCooldowns map[int]time.Duration
|
||||||
|
statusCooldownsMu sync.RWMutex
|
||||||
|
lastStatusTime map[int]time.Time
|
||||||
|
lastStatusTimeMu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *RLHTTPClient) Doer(req *http.Request) (*http.Response, error) {
|
func WithStatusCooldown(statusCode int, cooldown time.Duration) ClientOption {
|
||||||
if c.Ratelimiter != nil {
|
return func(c *Client) {
|
||||||
err := c.Ratelimiter.Wait(req.Context())
|
c.statusCooldownsMu.Lock()
|
||||||
if err != nil {
|
if c.statusCooldowns == nil {
|
||||||
return nil, err
|
c.statusCooldowns = make(map[int]time.Duration)
|
||||||
}
|
}
|
||||||
|
c.statusCooldowns[statusCode] = cooldown
|
||||||
|
c.statusCooldownsMu.Unlock()
|
||||||
}
|
}
|
||||||
resp, err := c.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *RLHTTPClient) Do(req *http.Request) (*http.Response, error) {
|
// WithMaxRetries sets the maximum number of retry attempts
|
||||||
var resp *http.Response
|
func WithMaxRetries(maxRetries int) ClientOption {
|
||||||
|
return func(c *Client) {
|
||||||
|
c.maxRetries = maxRetries
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTimeout sets the request timeout
|
||||||
|
func WithTimeout(timeout time.Duration) ClientOption {
|
||||||
|
return func(c *Client) {
|
||||||
|
c.timeout = timeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithRedirectPolicy(policy func(req *http.Request, via []*http.Request) error) ClientOption {
|
||||||
|
return func(c *Client) {
|
||||||
|
c.client.CheckRedirect = policy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRateLimiter sets a rate limiter
|
||||||
|
func WithRateLimiter(rl *rate.Limiter) ClientOption {
|
||||||
|
return func(c *Client) {
|
||||||
|
c.rateLimiter = rl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHeaders sets default headers
|
||||||
|
func WithHeaders(headers map[string]string) ClientOption {
|
||||||
|
return func(c *Client) {
|
||||||
|
c.headersMu.Lock()
|
||||||
|
c.headers = headers
|
||||||
|
c.headersMu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) SetHeader(key, value string) {
|
||||||
|
c.headersMu.Lock()
|
||||||
|
c.headers[key] = value
|
||||||
|
c.headersMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithLogger(logger zerolog.Logger) ClientOption {
|
||||||
|
return func(c *Client) {
|
||||||
|
c.logger = logger
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTransport(transport *http.Transport) ClientOption {
|
||||||
|
return func(c *Client) {
|
||||||
|
c.client.Transport = transport
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRetryableStatus adds status codes that should trigger a retry
|
||||||
|
func WithRetryableStatus(statusCodes ...int) ClientOption {
|
||||||
|
return func(c *Client) {
|
||||||
|
for _, code := range statusCodes {
|
||||||
|
c.retryableStatus[code] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithProxy(proxyURL string) ClientOption {
|
||||||
|
return func(c *Client) {
|
||||||
|
c.proxy = proxyURL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// doRequest performs a single HTTP request with rate limiting
|
||||||
|
func (c *Client) doRequest(req *http.Request) (*http.Response, error) {
|
||||||
|
if c.rateLimiter != nil {
|
||||||
|
err := c.rateLimiter.Wait(req.Context())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("rate limiter wait: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.client.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do performs an HTTP request with retries for certain status codes
|
||||||
|
func (c *Client) Do(req *http.Request) (*http.Response, error) {
|
||||||
|
// Save the request body for reuse in retries
|
||||||
|
var bodyBytes []byte
|
||||||
var err error
|
var err error
|
||||||
backoff := time.Millisecond * 500
|
|
||||||
|
|
||||||
for i := 0; i < 3; i++ {
|
if req.Body != nil {
|
||||||
resp, err = c.Doer(req)
|
bodyBytes, err = io.ReadAll(req.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading request body: %w", err)
|
||||||
|
}
|
||||||
|
req.Body.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
backoff := time.Millisecond * 500
|
||||||
|
var resp *http.Response
|
||||||
|
|
||||||
|
for attempt := 0; attempt <= c.maxRetries; attempt++ {
|
||||||
|
// Reset the request body if it exists
|
||||||
|
if bodyBytes != nil {
|
||||||
|
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply headers
|
||||||
|
c.headersMu.RLock()
|
||||||
|
if c.headers != nil {
|
||||||
|
for key, value := range c.headers {
|
||||||
|
req.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.headersMu.RUnlock()
|
||||||
|
|
||||||
|
if attempt > 0 && resp != nil {
|
||||||
|
c.statusCooldownsMu.RLock()
|
||||||
|
cooldown, exists := c.statusCooldowns[resp.StatusCode]
|
||||||
|
c.statusCooldownsMu.RUnlock()
|
||||||
|
|
||||||
|
if exists {
|
||||||
|
c.lastStatusTimeMu.RLock()
|
||||||
|
lastTime, timeExists := c.lastStatusTime[resp.StatusCode]
|
||||||
|
c.lastStatusTimeMu.RUnlock()
|
||||||
|
|
||||||
|
if timeExists {
|
||||||
|
elapsed := time.Since(lastTime)
|
||||||
|
if elapsed < cooldown {
|
||||||
|
// We need to wait longer for this status code
|
||||||
|
waitTime := cooldown - elapsed
|
||||||
|
select {
|
||||||
|
case <-req.Context().Done():
|
||||||
|
return nil, req.Context().Err()
|
||||||
|
case <-time.After(waitTime):
|
||||||
|
// Continue after waiting
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err = c.doRequest(req)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
c.lastStatusTimeMu.Lock()
|
||||||
|
c.lastStatusTime[resp.StatusCode] = time.Now()
|
||||||
|
c.lastStatusTimeMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// Check if this is a network error that might be worth retrying
|
||||||
|
if attempt < c.maxRetries {
|
||||||
|
// Apply backoff with jitter
|
||||||
|
jitter := time.Duration(rand.Int63n(int64(backoff / 4)))
|
||||||
|
sleepTime := backoff + jitter
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-req.Context().Done():
|
||||||
|
return nil, req.Context().Err()
|
||||||
|
case <-time.After(sleepTime):
|
||||||
|
// Continue to next retry attempt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exponential backoff
|
||||||
|
backoff *= 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusTooManyRequests {
|
// Check if the status code is retryable
|
||||||
|
if _, ok := c.retryableStatus[resp.StatusCode]; !ok || attempt == c.maxRetries {
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the response body to prevent resource leakage
|
// Close the response body before retrying
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
|
|
||||||
// Wait for the backoff duration before retrying
|
// Apply backoff with jitter
|
||||||
time.Sleep(backoff)
|
jitter := time.Duration(rand.Int63n(int64(backoff / 4)))
|
||||||
|
sleepTime := backoff + jitter
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-req.Context().Done():
|
||||||
|
return nil, req.Context().Err()
|
||||||
|
case <-time.After(sleepTime):
|
||||||
|
// Continue to next retry attempt
|
||||||
|
}
|
||||||
|
|
||||||
// Exponential backoff
|
// Exponential backoff
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, fmt.Errorf("max retries exceeded")
|
return nil, fmt.Errorf("max retries exceeded")
|
||||||
}
|
|
||||||
|
|
||||||
func (c *RLHTTPClient) MakeRequest(req *http.Request) ([]byte, error) {
|
|
||||||
if c.Headers != nil {
|
|
||||||
for key, value := range c.Headers {
|
|
||||||
req.Header.Set(key, value)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MakeRequest performs an HTTP request and returns the response body as bytes
|
||||||
|
func (c *Client) MakeRequest(req *http.Request) ([]byte, error) {
|
||||||
res, err := c.Do(req)
|
res, err := c.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func(Body io.ReadCloser) {
|
defer func() {
|
||||||
err := Body.Close()
|
if err := res.Body.Close(); err != nil {
|
||||||
|
c.logger.Printf("Failed to close response body: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
bodyBytes, err := io.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
return nil, fmt.Errorf("reading response body: %w", err)
|
||||||
}
|
}
|
||||||
}(res.Body)
|
|
||||||
|
|
||||||
b, err := io.ReadAll(res.Body)
|
if res.StatusCode < 200 || res.StatusCode >= 300 {
|
||||||
|
return nil, fmt.Errorf("HTTP error %d: %s", res.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
return bodyBytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Get(url string) (*http.Response, error) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("creating GET request: %w", err)
|
||||||
}
|
|
||||||
statusOk := res.StatusCode >= 200 && res.StatusCode < 300
|
|
||||||
if !statusOk {
|
|
||||||
// Add status code error to the body
|
|
||||||
b = append(b, []byte(fmt.Sprintf("\nstatus code: %d", res.StatusCode))...)
|
|
||||||
return nil, fmt.Errorf(string(b))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return b, nil
|
return c.Do(req)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRLHTTPClient(rl *rate.Limiter, headers map[string]string) *RLHTTPClient {
|
// New creates a new HTTP client with the specified options
|
||||||
tr := &http.Transport{
|
func New(options ...ClientOption) *Client {
|
||||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
client := &Client{
|
||||||
Proxy: http.ProxyFromEnvironment,
|
maxRetries: 3,
|
||||||
}
|
skipTLSVerify: true,
|
||||||
c := &RLHTTPClient{
|
retryableStatus: map[int]struct{}{
|
||||||
client: &http.Client{
|
http.StatusTooManyRequests: struct{}{},
|
||||||
Transport: tr,
|
http.StatusInternalServerError: struct{}{},
|
||||||
|
http.StatusBadGateway: struct{}{},
|
||||||
|
http.StatusServiceUnavailable: struct{}{},
|
||||||
|
http.StatusGatewayTimeout: struct{}{},
|
||||||
},
|
},
|
||||||
Ratelimiter: rl,
|
logger: logger.New("request"),
|
||||||
Headers: headers,
|
timeout: 60 * time.Second,
|
||||||
|
proxy: "",
|
||||||
|
headers: make(map[string]string), // Initialize headers map
|
||||||
|
statusCooldowns: make(map[int]time.Duration),
|
||||||
|
lastStatusTime: make(map[int]time.Time),
|
||||||
}
|
}
|
||||||
return c
|
|
||||||
|
// default http client
|
||||||
|
client.client = &http.Client{
|
||||||
|
Timeout: client.timeout,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply options before configuring transport
|
||||||
|
for _, option := range options {
|
||||||
|
option(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if transport was set by WithTransport option
|
||||||
|
if client.client.Transport == nil {
|
||||||
|
transport := &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
InsecureSkipVerify: client.skipTLSVerify,
|
||||||
|
},
|
||||||
|
// Connection pooling
|
||||||
|
MaxIdleConns: 100,
|
||||||
|
MaxIdleConnsPerHost: 50,
|
||||||
|
MaxConnsPerHost: 100,
|
||||||
|
|
||||||
|
// Timeouts
|
||||||
|
IdleConnTimeout: 90 * time.Second,
|
||||||
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
|
ResponseHeaderTimeout: 10 * time.Second,
|
||||||
|
ExpectContinueTimeout: 1 * time.Second,
|
||||||
|
|
||||||
|
// TCP keep-alive
|
||||||
|
DialContext: (&net.Dialer{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).DialContext,
|
||||||
|
|
||||||
|
// Enable HTTP/2
|
||||||
|
ForceAttemptHTTP2: true,
|
||||||
|
|
||||||
|
// Disable compression to save CPU
|
||||||
|
DisableCompression: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure proxy if needed
|
||||||
|
if client.proxy != "" {
|
||||||
|
if strings.HasPrefix(client.proxy, "socks5://") {
|
||||||
|
// Handle SOCKS5 proxy
|
||||||
|
socksURL, err := url.Parse(client.proxy)
|
||||||
|
if err != nil {
|
||||||
|
client.logger.Error().Msgf("Failed to parse SOCKS5 proxy URL: %v", err)
|
||||||
|
} else {
|
||||||
|
auth := &proxy.Auth{}
|
||||||
|
if socksURL.User != nil {
|
||||||
|
auth.User = socksURL.User.Username()
|
||||||
|
password, _ := socksURL.User.Password()
|
||||||
|
auth.Password = password
|
||||||
|
}
|
||||||
|
|
||||||
|
dialer, err := proxy.SOCKS5("tcp", socksURL.Host, auth, proxy.Direct)
|
||||||
|
if err != nil {
|
||||||
|
client.logger.Error().Msgf("Failed to create SOCKS5 dialer: %v", err)
|
||||||
|
} else {
|
||||||
|
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||||
|
return dialer.Dial(network, addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
proxyURL, err := url.Parse(client.proxy)
|
||||||
|
if err != nil {
|
||||||
|
client.logger.Error().Msgf("Failed to parse proxy URL: %v", err)
|
||||||
|
} else {
|
||||||
|
transport.Proxy = http.ProxyURL(proxyURL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
transport.Proxy = http.ProxyFromEnvironment
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the transport to the client
|
||||||
|
client.client.Transport = transport
|
||||||
|
}
|
||||||
|
|
||||||
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseRateLimit(rateStr string) *rate.Limiter {
|
func ParseRateLimit(rateStr string) *rate.Limiter {
|
||||||
@@ -149,9 +431,11 @@ func ParseRateLimit(rateStr string) *rate.Limiter {
|
|||||||
switch unit {
|
switch unit {
|
||||||
case "minute":
|
case "minute":
|
||||||
reqsPerSecond := float64(count) / 60.0
|
reqsPerSecond := float64(count) / 60.0
|
||||||
return rate.NewLimiter(rate.Limit(reqsPerSecond), 5)
|
burstSize := int(math.Max(30, float64(count)*0.25))
|
||||||
|
return rate.NewLimiter(rate.Limit(reqsPerSecond), burstSize)
|
||||||
case "second":
|
case "second":
|
||||||
return rate.NewLimiter(rate.Limit(float64(count)), 5)
|
burstSize := int(math.Max(30, float64(count)*5))
|
||||||
|
return rate.NewLimiter(rate.Limit(float64(count)), burstSize)
|
||||||
default:
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -160,5 +444,33 @@ func ParseRateLimit(rateStr string) *rate.Limiter {
|
|||||||
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
|
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
w.WriteHeader(code)
|
w.WriteHeader(code)
|
||||||
json.NewEncoder(w).Encode(data)
|
err := json.NewEncoder(w).Encode(data)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Gzip(body []byte) []byte {
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
if len(body) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
gz := gzip.NewWriter(&b)
|
||||||
|
_, err := gz.Write(body)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err = gz.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return b.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Default() *Client {
|
||||||
|
once.Do(func() {
|
||||||
|
instance = New()
|
||||||
|
})
|
||||||
|
return instance
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -24,21 +25,38 @@ type Magnet struct {
|
|||||||
InfoHash string
|
InfoHash string
|
||||||
Size int64
|
Size int64
|
||||||
Link string
|
Link string
|
||||||
|
File []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Magnet) IsTorrent() bool {
|
||||||
|
return m.File != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetMagnetFromFile(file io.Reader, filePath string) (*Magnet, error) {
|
func GetMagnetFromFile(file io.Reader, filePath string) (*Magnet, error) {
|
||||||
|
var (
|
||||||
|
m *Magnet
|
||||||
|
err error
|
||||||
|
)
|
||||||
if filepath.Ext(filePath) == ".torrent" {
|
if filepath.Ext(filePath) == ".torrent" {
|
||||||
torrentData, err := io.ReadAll(file)
|
torrentData, err := io.ReadAll(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return GetMagnetFromBytes(torrentData)
|
m, err = GetMagnetFromBytes(torrentData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// .magnet file
|
// .magnet file
|
||||||
magnetLink := ReadMagnetFile(file)
|
magnetLink := ReadMagnetFile(file)
|
||||||
return GetMagnetInfo(magnetLink)
|
m, err = GetMagnetInfo(magnetLink)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
m.Name = strings.TrimSuffix(filePath, filepath.Ext(filePath))
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
func GetMagnetFromUrl(url string) (*Magnet, error) {
|
func GetMagnetFromUrl(url string) (*Magnet, error) {
|
||||||
if strings.HasPrefix(url, "magnet:") {
|
if strings.HasPrefix(url, "magnet:") {
|
||||||
@@ -67,6 +85,7 @@ func GetMagnetFromBytes(torrentData []byte) (*Magnet, error) {
|
|||||||
Name: info.Name,
|
Name: info.Name,
|
||||||
Size: info.Length,
|
Size: info.Length,
|
||||||
Link: mi.Magnet(&hash, &info).String(),
|
Link: mi.Magnet(&hash, &info).String(),
|
||||||
|
File: torrentData,
|
||||||
}
|
}
|
||||||
return magnet, nil
|
return magnet, nil
|
||||||
}
|
}
|
||||||
@@ -198,9 +217,7 @@ func GetInfohashFromURL(url string) (string, error) {
|
|||||||
var magnetLink string
|
var magnetLink string
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
client := &http.Client{
|
redirectFunc := func(req *http.Request, via []*http.Request) error {
|
||||||
Timeout: 30 * time.Second,
|
|
||||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
|
||||||
if len(via) >= 3 {
|
if len(via) >= 3 {
|
||||||
return fmt.Errorf("stopped after 3 redirects")
|
return fmt.Errorf("stopped after 3 redirects")
|
||||||
}
|
}
|
||||||
@@ -210,8 +227,11 @@ func GetInfohashFromURL(url string) (string, error) {
|
|||||||
return http.ErrUseLastResponse
|
return http.ErrUseLastResponse
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
client := request.New(
|
||||||
|
request.WithTimeout(30*time.Second),
|
||||||
|
request.WithRedirectPolicy(redirectFunc),
|
||||||
|
)
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -233,3 +253,15 @@ func GetInfohashFromURL(url string) (string, error) {
|
|||||||
infoHash := hash.HexString()
|
infoHash := hash.HexString()
|
||||||
return infoHash, nil
|
return infoHash, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ConstructMagnet(infoHash, name string) *Magnet {
|
||||||
|
// Create a magnet link from the infohash and name
|
||||||
|
name = url.QueryEscape(strings.TrimSpace(name))
|
||||||
|
magnetUri := fmt.Sprintf("magnet:?xt=urn:btih:%s&dn=%s", infoHash, name)
|
||||||
|
return &Magnet{
|
||||||
|
InfoHash: infoHash,
|
||||||
|
Name: name,
|
||||||
|
Size: 0,
|
||||||
|
Link: magnetUri,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
VIDEOMATCH = "(?i)(\\.)(YUV|WMV|WEBM|VOB|VIV|SVI|ROQ|RMVB|RM|OGV|OGG|NSV|MXF|MPG|MPEG|M2V|MP2|MPE|MPV|MP4|M4P|M4V|MOV|QT|MNG|MKV|FLV|DRC|AVI|ASF|AMV|MKA|F4V|3GP|3G2|DIVX|X264|X265)$"
|
VIDEOMATCH = "(?i)(\\.)(webm|m4v|3gp|nsv|ty|strm|rm|rmvb|m3u|ifo|mov|qt|divx|xvid|bivx|nrg|pva|wmv|asf|asx|ogm|ogv|m2v|avi|bin|dat|dvr-ms|mpg|mpeg|mp4|avc|vp3|svq3|nuv|viv|dv|fli|flv|wpl|img|iso|vob|mkv|mk3d|ts|wtv|m2ts)$"
|
||||||
MUSICMATCH = "(?i)(\\.)(?:MP3|WAV|FLAC|AAC|OGG|WMA|AIFF|ALAC|M4A|APE|AC3|DTS|M4P|MID|MIDI|MKA|MP2|MPA|RA|VOC|WV|AMR)$"
|
MUSICMATCH = "(?i)(\\.)(mp2|mp3|m4a|m4b|m4p|ogg|oga|opus|wma|wav|wv|flac|ape|aif|aiff|aifc)$"
|
||||||
)
|
)
|
||||||
|
|
||||||
var SAMPLEMATCH = `(?i)(^|[\\/]|[._-])(sample|trailer|thumb)s?([._-]|$)`
|
var SAMPLEMATCH = `(?i)(^|[\\/])(sample|trailer|thumb|special|extras?)s?([\s._-]|$|/)|(\(sample\))|(-\s*sample)`
|
||||||
|
|
||||||
func RegexMatch(regex string, value string) bool {
|
func RegexMatch(regex string, value string) bool {
|
||||||
re := regexp.MustCompile(regex)
|
re := regexp.MustCompile(regex)
|
||||||
@@ -37,7 +37,7 @@ func RemoveInvalidChars(value string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func RemoveExtension(value string) string {
|
func RemoveExtension(value string) string {
|
||||||
re := regexp.MustCompile(VIDEOMATCH + "|" + SAMPLEMATCH + "|" + MUSICMATCH)
|
re := regexp.MustCompile(VIDEOMATCH + "|" + MUSICMATCH)
|
||||||
|
|
||||||
// Find the last index of the matched extension
|
// Find the last index of the matched extension
|
||||||
loc := re.FindStringIndex(value)
|
loc := re.FindStringIndex(value)
|
||||||
|
|||||||
36
main.go
36
main.go
@@ -3,12 +3,34 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"github.com/sirrobot01/debrid-blackhole/cmd/decypharr"
|
"github.com/sirrobot01/decypharr/cmd/decypharr"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/version"
|
||||||
"log"
|
"log"
|
||||||
|
"net/http"
|
||||||
|
_ "net/http/pprof" // registers pprof handlers
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"runtime/debug"
|
||||||
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
log.Printf("FATAL: Recovered from panic in main: %v\n", r)
|
||||||
|
debug.PrintStack()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if version.GetInfo().Channel == "dev" {
|
||||||
|
log.Println("Running in dev mode")
|
||||||
|
go func() {
|
||||||
|
if err := http.ListenAndServe(":6060", nil); err != nil {
|
||||||
|
log.Fatalf("pprof server failed: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
var configPath string
|
var configPath string
|
||||||
flag.StringVar(&configPath, "config", "/data", "path to the data folder")
|
flag.StringVar(&configPath, "config", "/data", "path to the data folder")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
@@ -16,10 +38,14 @@ func main() {
|
|||||||
if err := config.SetConfigPath(configPath); err != nil {
|
if err := config.SetConfigPath(configPath); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
config.GetConfig()
|
|
||||||
ctx := context.Background()
|
config.Get()
|
||||||
|
|
||||||
|
// Create a context that's cancelled on SIGINT/SIGTERM
|
||||||
|
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||||
|
defer stop()
|
||||||
|
|
||||||
if err := decypharr.Start(ctx); err != nil {
|
if err := decypharr.Start(ctx); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,12 +2,16 @@ package arr
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"fmt"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/goccy/go-json"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Type is a type of arr
|
// Type is a type of arr
|
||||||
@@ -20,51 +24,92 @@ const (
|
|||||||
Readarr Type = "readarr"
|
Readarr Type = "readarr"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
client *request.RLHTTPClient = request.NewRLHTTPClient(nil, nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
type Arr struct {
|
type Arr struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
Type Type `json:"type"`
|
Type Type `json:"type"`
|
||||||
Cleanup bool `json:"cleanup"`
|
Cleanup bool `json:"cleanup"`
|
||||||
|
SkipRepair bool `json:"skip_repair"`
|
||||||
|
DownloadUncached *bool `json:"download_uncached"`
|
||||||
|
client *request.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(name, host, token string, cleanup bool) *Arr {
|
func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool) *Arr {
|
||||||
return &Arr{
|
return &Arr{
|
||||||
Name: name,
|
Name: name,
|
||||||
Host: host,
|
Host: host,
|
||||||
Token: token,
|
Token: strings.TrimSpace(token),
|
||||||
Type: InferType(host, name),
|
Type: InferType(host, name),
|
||||||
Cleanup: cleanup,
|
Cleanup: cleanup,
|
||||||
|
SkipRepair: skipRepair,
|
||||||
|
DownloadUncached: downloadUncached,
|
||||||
|
client: request.New(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Response, error) {
|
func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Response, error) {
|
||||||
if a.Token == "" || a.Host == "" {
|
if a.Token == "" || a.Host == "" {
|
||||||
return nil, nil
|
return nil, fmt.Errorf("arr not configured")
|
||||||
}
|
}
|
||||||
url, err := request.JoinURL(a.Host, endpoint)
|
url, err := request.JoinURL(a.Host, endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var jsonPayload []byte
|
var body io.Reader
|
||||||
|
|
||||||
if payload != nil {
|
if payload != nil {
|
||||||
jsonPayload, err = json.Marshal(payload)
|
b, err := json.Marshal(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
body = bytes.NewReader(b)
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest(method, url, bytes.NewBuffer(jsonPayload))
|
req, err := http.NewRequest(method, url, body)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
req.Header.Set("X-Api-Key", a.Token)
|
req.Header.Set("X-Api-Key", a.Token)
|
||||||
return client.Do(req)
|
if a.client == nil {
|
||||||
|
a.client = request.New()
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp *http.Response
|
||||||
|
|
||||||
|
for attempts := 0; attempts < 5; attempts++ {
|
||||||
|
resp, err = a.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we got a 401, wait briefly and retry
|
||||||
|
if resp.StatusCode == http.StatusUnauthorized {
|
||||||
|
resp.Body.Close() // Don't leak response bodies
|
||||||
|
if attempts < 4 { // Don't sleep on the last attempt
|
||||||
|
time.Sleep(time.Duration(attempts+1) * 100 * time.Millisecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) Validate() error {
|
||||||
|
if a.Token == "" || a.Host == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
resp, err := a.Request("GET", "/api/v3/health", nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("arr test failed: %s", resp.Status)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type Storage struct {
|
type Storage struct {
|
||||||
@@ -89,9 +134,9 @@ func InferType(host, name string) Type {
|
|||||||
|
|
||||||
func NewStorage() *Storage {
|
func NewStorage() *Storage {
|
||||||
arrs := make(map[string]*Arr)
|
arrs := make(map[string]*Arr)
|
||||||
for _, a := range config.GetConfig().Arrs {
|
for _, a := range config.Get().Arrs {
|
||||||
name := a.Name
|
name := a.Name
|
||||||
arrs[name] = New(name, a.Host, a.Token, a.Cleanup)
|
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached)
|
||||||
}
|
}
|
||||||
return &Storage{
|
return &Storage{
|
||||||
Arrs: arrs,
|
Arrs: arrs,
|
||||||
@@ -124,3 +169,21 @@ func (as *Storage) GetAll() []*Arr {
|
|||||||
}
|
}
|
||||||
return arrs
|
return arrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Arr) Refresh() error {
|
||||||
|
payload := struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}{
|
||||||
|
Name: "RefreshMonitoredDownloads",
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
||||||
|
if err == nil && resp != nil {
|
||||||
|
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
|
||||||
|
if statusOk {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("failed to refresh: %v", err)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,31 +1,58 @@
|
|||||||
package arr
|
package arr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (a *Arr) GetMedia(tvId string) ([]Content, error) {
|
type episode struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
EpisodeFileID int `json:"episodeFileId"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type sonarrSearch struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
|
SeriesId int `json:"seriesId"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type radarrSearch struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
MovieIds []int `json:"movieIds"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) GetMedia(mediaId string) ([]Content, error) {
|
||||||
// Get series
|
// Get series
|
||||||
resp, err := a.Request(http.MethodGet, fmt.Sprintf("api/v3/series?tvdbId=%s", tvId), nil)
|
if a.Type == Radarr {
|
||||||
|
return GetMovies(a, mediaId)
|
||||||
|
}
|
||||||
|
// This is likely Sonarr
|
||||||
|
resp, err := a.Request(http.MethodGet, fmt.Sprintf("api/v3/series?tvdbId=%s", mediaId), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
// This is likely Radarr
|
// This is likely Radarr
|
||||||
return GetMovies(a, tvId)
|
return GetMovies(a, mediaId)
|
||||||
}
|
}
|
||||||
a.Type = Sonarr
|
a.Type = Sonarr
|
||||||
defer resp.Body.Close()
|
|
||||||
type series struct {
|
type series struct {
|
||||||
Title string `json:"title"`
|
Title string `json:"title"`
|
||||||
Id int `json:"id"`
|
Id int `json:"id"`
|
||||||
}
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("failed to get series: %s", resp.Status)
|
||||||
|
}
|
||||||
var data []series
|
var data []series
|
||||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("failed to decode series: %v", err)
|
||||||
}
|
}
|
||||||
// Get series files
|
// Get series files
|
||||||
contents := make([]Content, 0)
|
contents := make([]Content, 0)
|
||||||
@@ -34,45 +61,56 @@ func (a *Arr) GetMedia(tvId string) ([]Content, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
var ct Content
|
||||||
var seriesFiles []seriesFile
|
var seriesFiles []seriesFile
|
||||||
|
episodeFileIDMap := make(map[int]int)
|
||||||
|
func() {
|
||||||
|
defer resp.Body.Close()
|
||||||
if err = json.NewDecoder(resp.Body).Decode(&seriesFiles); err != nil {
|
if err = json.NewDecoder(resp.Body).Decode(&seriesFiles); err != nil {
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
ct := Content{
|
ct = Content{
|
||||||
Title: d.Title,
|
Title: d.Title,
|
||||||
Id: d.Id,
|
Id: d.Id,
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
type episode struct {
|
|
||||||
Id int `json:"id"`
|
|
||||||
EpisodeFileID int `json:"episodeFileId"`
|
|
||||||
}
|
|
||||||
resp, err = a.Request(http.MethodGet, fmt.Sprintf("api/v3/episode?seriesId=%d", d.Id), nil)
|
resp, err = a.Request(http.MethodGet, fmt.Sprintf("api/v3/episode?seriesId=%d", d.Id), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
func() {
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
var episodes []episode
|
var episodes []episode
|
||||||
if err = json.NewDecoder(resp.Body).Decode(&episodes); err != nil {
|
if err = json.NewDecoder(resp.Body).Decode(&episodes); err != nil {
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
episodeFileIDMap := make(map[int]int)
|
|
||||||
for _, e := range episodes {
|
for _, e := range episodes {
|
||||||
episodeFileIDMap[e.EpisodeFileID] = e.Id
|
episodeFileIDMap[e.EpisodeFileID] = e.Id
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
files := make([]ContentFile, 0)
|
files := make([]ContentFile, 0)
|
||||||
for _, file := range seriesFiles {
|
for _, file := range seriesFiles {
|
||||||
eId, ok := episodeFileIDMap[file.Id]
|
eId, ok := episodeFileIDMap[file.Id]
|
||||||
if !ok {
|
if !ok {
|
||||||
eId = 0
|
eId = 0
|
||||||
}
|
}
|
||||||
|
if file.Id == 0 || file.Path == "" {
|
||||||
|
// Skip files without path
|
||||||
|
continue
|
||||||
|
}
|
||||||
files = append(files, ContentFile{
|
files = append(files, ContentFile{
|
||||||
FileId: file.Id,
|
FileId: file.Id,
|
||||||
Path: file.Path,
|
Path: file.Path,
|
||||||
Id: eId,
|
Id: d.Id,
|
||||||
|
EpisodeId: eId,
|
||||||
|
SeasonNumber: file.SeasonNumber,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
if len(files) == 0 {
|
||||||
|
// Skip series without files
|
||||||
|
continue
|
||||||
|
}
|
||||||
ct.Files = files
|
ct.Files = files
|
||||||
contents = append(contents, ct)
|
contents = append(contents, ct)
|
||||||
}
|
}
|
||||||
@@ -92,15 +130,20 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) {
|
|||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
var movies []Movie
|
var movies []Movie
|
||||||
if err = json.NewDecoder(resp.Body).Decode(&movies); err != nil {
|
if err = json.NewDecoder(resp.Body).Decode(&movies); err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("failed to decode movies: %v", err)
|
||||||
}
|
}
|
||||||
contents := make([]Content, 0)
|
contents := make([]Content, 0)
|
||||||
for _, movie := range movies {
|
for _, movie := range movies {
|
||||||
|
if movie.MovieFile.Id == 0 || movie.MovieFile.Path == "" {
|
||||||
|
// Skip movies without files
|
||||||
|
continue
|
||||||
|
}
|
||||||
ct := Content{
|
ct := Content{
|
||||||
Title: movie.Title,
|
Title: movie.Title,
|
||||||
Id: movie.Id,
|
Id: movie.Id,
|
||||||
}
|
}
|
||||||
files := make([]ContentFile, 0)
|
files := make([]ContentFile, 0)
|
||||||
|
|
||||||
files = append(files, ContentFile{
|
files = append(files, ContentFile{
|
||||||
FileId: movie.MovieFile.Id,
|
FileId: movie.MovieFile.Id,
|
||||||
Id: movie.Id,
|
Id: movie.Id,
|
||||||
@@ -112,29 +155,72 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) {
|
|||||||
return contents, nil
|
return contents, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Arr) search(ids []int) error {
|
// searchSonarr searches for missing files in the arr
|
||||||
var payload interface{}
|
// map ids are series id and season number
|
||||||
switch a.Type {
|
func (a *Arr) searchSonarr(files []ContentFile) error {
|
||||||
case Sonarr:
|
ids := make(map[string]any)
|
||||||
payload = struct {
|
for _, f := range files {
|
||||||
Name string `json:"name"`
|
// Join series id and season number
|
||||||
EpisodeIds []int `json:"episodeIds"`
|
id := fmt.Sprintf("%d-%d", f.Id, f.SeasonNumber)
|
||||||
}{
|
ids[id] = nil
|
||||||
Name: "EpisodeSearch",
|
|
||||||
EpisodeIds: ids,
|
|
||||||
}
|
}
|
||||||
case Radarr:
|
|
||||||
payload = struct {
|
g, ctx := errgroup.WithContext(context.Background())
|
||||||
Name string `json:"name"`
|
|
||||||
MovieIds []int `json:"movieIds"`
|
// Limit concurrent goroutines
|
||||||
}{
|
g.SetLimit(10)
|
||||||
|
for id := range ids {
|
||||||
|
id := id
|
||||||
|
g.Go(func() error {
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.Split(id, "-")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return fmt.Errorf("invalid id: %s", id)
|
||||||
|
}
|
||||||
|
seriesId, err := strconv.Atoi(parts[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
seasonNumber, err := strconv.Atoi(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
payload := sonarrSearch{
|
||||||
|
Name: "SeasonSearch",
|
||||||
|
SeasonNumber: seasonNumber,
|
||||||
|
SeriesId: seriesId,
|
||||||
|
}
|
||||||
|
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to automatic search: %v", err)
|
||||||
|
}
|
||||||
|
if resp.StatusCode >= 300 || resp.StatusCode < 200 {
|
||||||
|
return fmt.Errorf("failed to automatic search. Status Code: %s", resp.Status)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) searchRadarr(files []ContentFile) error {
|
||||||
|
ids := make([]int, 0)
|
||||||
|
for _, f := range files {
|
||||||
|
ids = append(ids, f.Id)
|
||||||
|
}
|
||||||
|
payload := radarrSearch{
|
||||||
Name: "MoviesSearch",
|
Name: "MoviesSearch",
|
||||||
MovieIds: ids,
|
MovieIds: ids,
|
||||||
}
|
}
|
||||||
default:
|
|
||||||
return fmt.Errorf("unknown arr type: %s", a.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to automatic search: %v", err)
|
return fmt.Errorf("failed to automatic search: %v", err)
|
||||||
@@ -146,16 +232,14 @@ func (a *Arr) search(ids []int) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *Arr) SearchMissing(files []ContentFile) error {
|
func (a *Arr) SearchMissing(files []ContentFile) error {
|
||||||
|
switch a.Type {
|
||||||
ids := make([]int, 0)
|
case Sonarr:
|
||||||
for _, f := range files {
|
return a.searchSonarr(files)
|
||||||
ids = append(ids, f.Id)
|
case Radarr:
|
||||||
|
return a.searchRadarr(files)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown arr type: %s", a.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(ids) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return a.search(ids)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Arr) DeleteFiles(files []ContentFile) error {
|
func (a *Arr) DeleteFiles(files []ContentFile) error {
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
package arr
|
package arr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"github.com/goccy/go-json"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -77,24 +79,43 @@ func (a *Arr) GetQueue() []QueueSchema {
|
|||||||
query.Add("page", "1")
|
query.Add("page", "1")
|
||||||
query.Add("pageSize", "200")
|
query.Add("pageSize", "200")
|
||||||
results := make([]QueueSchema, 0)
|
results := make([]QueueSchema, 0)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
url := "api/v3/queue" + "?" + query.Encode()
|
url := "api/v3/queue" + "?" + query.Encode()
|
||||||
resp, err := a.Request(http.MethodGet, url, nil)
|
resp, err := a.Request(http.MethodGet, url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
|
func() {
|
||||||
|
defer func(Body io.ReadCloser) {
|
||||||
|
err := Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(resp.Body)
|
||||||
|
|
||||||
var data QueueResponseScheme
|
var data QueueResponseScheme
|
||||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
break
|
return
|
||||||
}
|
}
|
||||||
if len(results) < data.TotalRecords {
|
|
||||||
results = append(results, data.Records...)
|
results = append(results, data.Records...)
|
||||||
query.Set("page", string(rune(data.Page+1)))
|
|
||||||
} else {
|
if len(results) >= data.TotalRecords {
|
||||||
|
// We've fetched all records
|
||||||
|
err = io.EOF // Signal to exit the loop
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
query.Set("page", strconv.Itoa(data.Page+1))
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return results
|
return results
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -116,6 +137,10 @@ func (a *Arr) CleanupQueue() error {
|
|||||||
isMessedUp = true
|
isMessedUp = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if strings.Contains(m.Title, "One or more episodes expected in this release were not imported or missing from the release") {
|
||||||
|
isMessedUp = true
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -133,13 +158,11 @@ func (a *Arr) CleanupQueue() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
queueIds := make([]int, 0)
|
queueIds := make([]int, 0)
|
||||||
episodesIds := make([]int, 0)
|
|
||||||
|
|
||||||
for _, c := range cleanups {
|
for _, c := range cleanups {
|
||||||
// Delete the messed up episodes from queue
|
// Delete the messed up episodes from queue
|
||||||
for _, m := range c {
|
for _, m := range c {
|
||||||
queueIds = append(queueIds, m.id)
|
queueIds = append(queueIds, m.id)
|
||||||
episodesIds = append(episodesIds, m.episodeId)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package arr
|
package arr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
|
|||||||
@@ -1,54 +0,0 @@
|
|||||||
package arr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cmp"
|
|
||||||
"fmt"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (a *Arr) Refresh() error {
|
|
||||||
payload := map[string]string{"name": "RefreshMonitoredDownloads"}
|
|
||||||
|
|
||||||
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
|
||||||
if err == nil && resp != nil {
|
|
||||||
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
|
|
||||||
if statusOk {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("failed to refresh monitored downloads for %s", cmp.Or(a.Name, a.Host))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Arr) Blacklist(infoHash string) error {
|
|
||||||
downloadId := strings.ToUpper(infoHash)
|
|
||||||
history := a.GetHistory(downloadId, "grabbed")
|
|
||||||
if history == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
torrentId := 0
|
|
||||||
for _, record := range history.Records {
|
|
||||||
if strings.EqualFold(record.DownloadID, downloadId) {
|
|
||||||
torrentId = record.ID
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if torrentId != 0 {
|
|
||||||
url, err := request.JoinURL(a.Host, "history/failed/", strconv.Itoa(torrentId))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest(http.MethodPost, url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
client := &http.Client{}
|
|
||||||
_, err = client.Do(req)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("failed to mark %s as failed: %v", cmp.Or(a.Name, a.Host), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
package arr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
url2 "net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
type TMDBResponse struct {
|
|
||||||
Page int `json:"page"`
|
|
||||||
Results []struct {
|
|
||||||
ID int `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
MediaType string `json:"media_type"`
|
|
||||||
PosterPath string `json:"poster_path"`
|
|
||||||
} `json:"results"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func SearchTMDB(term string) (*TMDBResponse, error) {
|
|
||||||
resp, err := http.Get("https://api.themoviedb.org/3/search/multi?api_key=key&query=" + url2.QueryEscape(term))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var data *TMDBResponse
|
|
||||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
@@ -8,7 +8,6 @@ type Movie struct {
|
|||||||
MovieId int `json:"movieId"`
|
MovieId int `json:"movieId"`
|
||||||
RelativePath string `json:"relativePath"`
|
RelativePath string `json:"relativePath"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Size int `json:"size"`
|
|
||||||
Id int `json:"id"`
|
Id int `json:"id"`
|
||||||
} `json:"movieFile"`
|
} `json:"movieFile"`
|
||||||
Id int `json:"id"`
|
Id int `json:"id"`
|
||||||
@@ -18,10 +17,12 @@ type ContentFile struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Id int `json:"id"`
|
Id int `json:"id"`
|
||||||
|
EpisodeId int `json:"showId"`
|
||||||
FileId int `json:"fileId"`
|
FileId int `json:"fileId"`
|
||||||
TargetPath string `json:"targetPath"`
|
TargetPath string `json:"targetPath"`
|
||||||
IsSymlink bool `json:"isSymlink"`
|
IsSymlink bool `json:"isSymlink"`
|
||||||
IsBroken bool `json:"isBroken"`
|
IsBroken bool `json:"isBroken"`
|
||||||
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Content struct {
|
type Content struct {
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
package arr
|
|
||||||
|
|
||||||
func Readfile(path string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,35 +1,73 @@
|
|||||||
package alldebrid
|
package alldebrid
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
"github.com/puzpuzpuz/xsync/v3"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
|
||||||
|
|
||||||
"net/http"
|
"net/http"
|
||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AllDebrid struct {
|
type AllDebrid struct {
|
||||||
Name string
|
Name string
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
APIKey string
|
APIKey string
|
||||||
|
DownloadKeys *xsync.MapOf[string, types.Account]
|
||||||
DownloadUncached bool
|
DownloadUncached bool
|
||||||
client *request.RLHTTPClient
|
client *request.Client
|
||||||
cache *cache.Cache
|
|
||||||
MountPath string
|
MountPath string
|
||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
CheckCached bool
|
CheckCached bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func New(dc config.Debrid) *AllDebrid {
|
||||||
|
rl := request.ParseRateLimit(dc.RateLimit)
|
||||||
|
|
||||||
|
headers := map[string]string{
|
||||||
|
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||||
|
}
|
||||||
|
_log := logger.New(dc.Name)
|
||||||
|
client := request.New(
|
||||||
|
request.WithHeaders(headers),
|
||||||
|
request.WithLogger(_log),
|
||||||
|
request.WithRateLimiter(rl),
|
||||||
|
request.WithProxy(dc.Proxy),
|
||||||
|
)
|
||||||
|
|
||||||
|
accounts := xsync.NewMapOf[string, types.Account]()
|
||||||
|
for idx, key := range dc.DownloadAPIKeys {
|
||||||
|
id := strconv.Itoa(idx)
|
||||||
|
accounts.Store(id, types.Account{
|
||||||
|
Name: key,
|
||||||
|
ID: id,
|
||||||
|
Token: key,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return &AllDebrid{
|
||||||
|
Name: "alldebrid",
|
||||||
|
Host: dc.Host,
|
||||||
|
APIKey: dc.APIKey,
|
||||||
|
DownloadKeys: accounts,
|
||||||
|
DownloadUncached: dc.DownloadUncached,
|
||||||
|
client: client,
|
||||||
|
MountPath: dc.Folder,
|
||||||
|
logger: logger.New(dc.Name),
|
||||||
|
CheckCached: dc.CheckCached,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) GetName() string {
|
func (ad *AllDebrid) GetName() string {
|
||||||
return ad.Name
|
return ad.Name
|
||||||
}
|
}
|
||||||
@@ -38,22 +76,16 @@ func (ad *AllDebrid) GetLogger() zerolog.Logger {
|
|||||||
return ad.logger
|
return ad.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) IsAvailable(infohashes []string) map[string]bool {
|
func (ad *AllDebrid) IsAvailable(hashes []string) map[string]bool {
|
||||||
// Check if the infohashes are available in the local cache
|
// Check if the infohashes are available in the local cache
|
||||||
hashes, result := torrent.GetLocalCache(infohashes, ad.cache)
|
result := make(map[string]bool)
|
||||||
|
|
||||||
if len(hashes) == 0 {
|
|
||||||
// Either all the infohashes are locally cached or none are
|
|
||||||
ad.cache.AddMultiple(result)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Divide hashes into groups of 100
|
// Divide hashes into groups of 100
|
||||||
// AllDebrid does not support checking cached infohashes
|
// AllDebrid does not support checking cached infohashes
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) SubmitMagnet(torrent *torrent.Torrent) (*torrent.Torrent, error) {
|
func (ad *AllDebrid) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
|
||||||
url := fmt.Sprintf("%s/magnet/upload", ad.Host)
|
url := fmt.Sprintf("%s/magnet/upload", ad.Host)
|
||||||
query := gourl.Values{}
|
query := gourl.Values{}
|
||||||
query.Add("magnets[]", torrent.Magnet.Link)
|
query.Add("magnets[]", torrent.Magnet.Link)
|
||||||
@@ -90,10 +122,10 @@ func getAlldebridStatus(statusCode int) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.File {
|
func flattenFiles(files []MagnetFile, parentPath string, index *int) map[string]types.File {
|
||||||
result := make([]torrent.File, 0)
|
result := make(map[string]types.File)
|
||||||
|
|
||||||
cfg := config.GetConfig()
|
cfg := config.Get()
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
currentPath := f.Name
|
currentPath := f.Name
|
||||||
@@ -103,13 +135,21 @@ func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.F
|
|||||||
|
|
||||||
if f.Elements != nil {
|
if f.Elements != nil {
|
||||||
// This is a folder, recurse into it
|
// This is a folder, recurse into it
|
||||||
result = append(result, flattenFiles(f.Elements, currentPath, index)...)
|
subFiles := flattenFiles(f.Elements, currentPath, index)
|
||||||
|
for k, v := range subFiles {
|
||||||
|
if _, ok := result[k]; ok {
|
||||||
|
// File already exists, use path as key
|
||||||
|
result[v.Path] = v
|
||||||
|
} else {
|
||||||
|
result[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// This is a file
|
// This is a file
|
||||||
fileName := filepath.Base(f.Name)
|
fileName := filepath.Base(f.Name)
|
||||||
|
|
||||||
// Skip sample files
|
// Skip sample files
|
||||||
if utils.IsSampleFile(fileName) {
|
if utils.IsSampleFile(f.Name) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !cfg.IsAllowedFile(fileName) {
|
if !cfg.IsAllowedFile(fileName) {
|
||||||
@@ -121,37 +161,36 @@ func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.F
|
|||||||
}
|
}
|
||||||
|
|
||||||
*index++
|
*index++
|
||||||
file := torrent.File{
|
file := types.File{
|
||||||
Id: strconv.Itoa(*index),
|
Id: strconv.Itoa(*index),
|
||||||
Name: fileName,
|
Name: fileName,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Path: currentPath,
|
Path: currentPath,
|
||||||
|
Link: f.Link,
|
||||||
}
|
}
|
||||||
result = append(result, file)
|
result[file.Name] = file
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) GetTorrent(id string) (*torrent.Torrent, error) {
|
func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
|
||||||
t := &torrent.Torrent{}
|
url := fmt.Sprintf("%s/magnet/status?id=%s", ad.Host, t.Id)
|
||||||
url := fmt.Sprintf("%s/magnet/status?id=%s", ad.Host, id)
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := ad.client.MakeRequest(req)
|
resp, err := ad.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return t, err
|
return err
|
||||||
}
|
}
|
||||||
var res TorrentInfoResponse
|
var res TorrentInfoResponse
|
||||||
err = json.Unmarshal(resp, &res)
|
err = json.Unmarshal(resp, &res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
|
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
|
||||||
return t, err
|
return err
|
||||||
}
|
}
|
||||||
data := res.Data.Magnets
|
data := res.Data.Magnets
|
||||||
status := getAlldebridStatus(data.StatusCode)
|
status := getAlldebridStatus(data.StatusCode)
|
||||||
name := data.Filename
|
name := data.Filename
|
||||||
t.Id = id
|
|
||||||
t.Name = name
|
t.Name = name
|
||||||
t.Status = status
|
t.Status = status
|
||||||
t.Filename = name
|
t.Filename = name
|
||||||
@@ -159,7 +198,6 @@ func (ad *AllDebrid) GetTorrent(id string) (*torrent.Torrent, error) {
|
|||||||
t.Folder = name
|
t.Folder = name
|
||||||
t.MountPath = ad.MountPath
|
t.MountPath = ad.MountPath
|
||||||
t.Debrid = ad.Name
|
t.Debrid = ad.Name
|
||||||
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
|
|
||||||
if status == "downloaded" {
|
if status == "downloaded" {
|
||||||
t.Bytes = data.Size
|
t.Bytes = data.Size
|
||||||
|
|
||||||
@@ -170,36 +208,33 @@ func (ad *AllDebrid) GetTorrent(id string) (*torrent.Torrent, error) {
|
|||||||
files := flattenFiles(data.Files, "", &index)
|
files := flattenFiles(data.Files, "", &index)
|
||||||
t.Files = files
|
t.Files = files
|
||||||
}
|
}
|
||||||
return t, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
|
func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
|
||||||
for {
|
for {
|
||||||
tb, err := ad.GetTorrent(torrent.Id)
|
err := ad.UpdateTorrent(torrent)
|
||||||
|
|
||||||
torrent = tb
|
if err != nil || torrent == nil {
|
||||||
|
return torrent, err
|
||||||
if err != nil || tb == nil {
|
|
||||||
return tb, err
|
|
||||||
}
|
}
|
||||||
status := torrent.Status
|
status := torrent.Status
|
||||||
if status == "downloaded" {
|
if status == "downloaded" {
|
||||||
ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||||
if !isSymlink {
|
if !isSymlink {
|
||||||
err = ad.GetDownloadLinks(torrent)
|
err = ad.GenerateDownloadLinks(torrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return torrent, err
|
return torrent, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
} else if status == "downloading" {
|
} else if slices.Contains(ad.GetDownloadingStatus(), status) {
|
||||||
if !ad.DownloadUncached {
|
if !torrent.DownloadUncached {
|
||||||
go ad.DeleteTorrent(torrent)
|
|
||||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
}
|
}
|
||||||
// Break out of the loop if the torrent is downloading.
|
// Break out of the loop if the torrent is downloading.
|
||||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||||
break
|
return torrent, nil
|
||||||
} else {
|
} else {
|
||||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||||
}
|
}
|
||||||
@@ -208,20 +243,62 @@ func (ad *AllDebrid) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*tor
|
|||||||
return torrent, nil
|
return torrent, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) DeleteTorrent(torrent *torrent.Torrent) {
|
func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
|
||||||
url := fmt.Sprintf("%s/magnet/delete?id=%s", ad.Host, torrent.Id)
|
url := fmt.Sprintf("%s/magnet/delete?id=%s", ad.Host, torrentId)
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
_, err := ad.client.MakeRequest(req)
|
if _, err := ad.client.MakeRequest(req); err != nil {
|
||||||
if err == nil {
|
return err
|
||||||
ad.logger.Info().Msgf("Torrent: %s deleted", torrent.Name)
|
|
||||||
} else {
|
|
||||||
ad.logger.Info().Msgf("Error deleting torrent: %s", err)
|
|
||||||
}
|
}
|
||||||
|
ad.logger.Info().Msgf("Torrent %s deleted from AD", torrentId)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) GetDownloadLinks(t *torrent.Torrent) error {
|
func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error {
|
||||||
downloadLinks := make(map[string]torrent.DownloadLinks)
|
filesCh := make(chan types.File, len(t.Files))
|
||||||
|
errCh := make(chan error, len(t.Files))
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(t.Files))
|
||||||
for _, file := range t.Files {
|
for _, file := range t.Files {
|
||||||
|
go func(file types.File) {
|
||||||
|
defer wg.Done()
|
||||||
|
link, accountId, err := ad.GetDownloadLink(t, &file)
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
file.DownloadLink = link
|
||||||
|
file.Generated = time.Now()
|
||||||
|
file.AccountId = accountId
|
||||||
|
if link == "" {
|
||||||
|
errCh <- fmt.Errorf("error getting download links %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filesCh <- file
|
||||||
|
}(file)
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(filesCh)
|
||||||
|
close(errCh)
|
||||||
|
}()
|
||||||
|
files := make(map[string]types.File, len(t.Files))
|
||||||
|
for file := range filesCh {
|
||||||
|
files[file.Name] = file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for errors
|
||||||
|
for err := range errCh {
|
||||||
|
if err != nil {
|
||||||
|
return err // Return the first error encountered
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Files = files
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string, string, error) {
|
||||||
url := fmt.Sprintf("%s/link/unlock", ad.Host)
|
url := fmt.Sprintf("%s/link/unlock", ad.Host)
|
||||||
query := gourl.Values{}
|
query := gourl.Values{}
|
||||||
query.Add("link", file.Link)
|
query.Add("link", file.Link)
|
||||||
@@ -229,70 +306,78 @@ func (ad *AllDebrid) GetDownloadLinks(t *torrent.Torrent) error {
|
|||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := ad.client.MakeRequest(req)
|
resp, err := ad.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", "", err
|
||||||
}
|
}
|
||||||
var data DownloadLink
|
var data DownloadLink
|
||||||
if err = json.Unmarshal(resp, &data); err != nil {
|
if err = json.Unmarshal(resp, &data); err != nil {
|
||||||
return err
|
return "", "", err
|
||||||
}
|
}
|
||||||
link := data.Data.Link
|
link := data.Data.Link
|
||||||
|
if link == "" {
|
||||||
dl := torrent.DownloadLinks{
|
return "", "", fmt.Errorf("error getting download links %s", data.Error.Message)
|
||||||
Link: file.Link,
|
|
||||||
Filename: data.Data.Filename,
|
|
||||||
DownloadLink: link,
|
|
||||||
}
|
|
||||||
downloadLinks[file.Id] = dl
|
|
||||||
}
|
|
||||||
t.DownloadLinks = downloadLinks
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ad *AllDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
|
|
||||||
url := fmt.Sprintf("%s/link/unlock", ad.Host)
|
|
||||||
query := gourl.Values{}
|
|
||||||
query.Add("link", file.Link)
|
|
||||||
url += "?" + query.Encode()
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
|
||||||
resp, err := ad.client.MakeRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var data DownloadLink
|
|
||||||
if err = json.Unmarshal(resp, &data); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
link := data.Data.Link
|
|
||||||
return &torrent.DownloadLinks{
|
|
||||||
DownloadLink: link,
|
|
||||||
Link: file.Link,
|
|
||||||
Filename: data.Data.Filename,
|
|
||||||
}
|
}
|
||||||
|
return link, "0", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) GetCheckCached() bool {
|
func (ad *AllDebrid) GetCheckCached() bool {
|
||||||
return ad.CheckCached
|
return ad.CheckCached
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ad *AllDebrid) GetTorrents() ([]*torrent.Torrent, error) {
|
func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
|
||||||
return nil, fmt.Errorf("not implemented")
|
url := fmt.Sprintf("%s/magnet/status?status=ready", ad.Host)
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := ad.client.MakeRequest(req)
|
||||||
|
torrents := make([]*types.Torrent, 0)
|
||||||
|
if err != nil {
|
||||||
|
return torrents, err
|
||||||
|
}
|
||||||
|
var res TorrentsListResponse
|
||||||
|
err = json.Unmarshal(resp, &res)
|
||||||
|
if err != nil {
|
||||||
|
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
|
||||||
|
return torrents, err
|
||||||
|
}
|
||||||
|
for _, magnet := range res.Data.Magnets {
|
||||||
|
torrents = append(torrents, &types.Torrent{
|
||||||
|
Id: strconv.Itoa(magnet.Id),
|
||||||
|
Name: magnet.Filename,
|
||||||
|
Bytes: magnet.Size,
|
||||||
|
Status: getAlldebridStatus(magnet.StatusCode),
|
||||||
|
Filename: magnet.Filename,
|
||||||
|
OriginalFilename: magnet.Filename,
|
||||||
|
Files: make(map[string]types.File),
|
||||||
|
InfoHash: magnet.Hash,
|
||||||
|
Debrid: ad.Name,
|
||||||
|
MountPath: ad.MountPath,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dc config.Debrid, cache *cache.Cache) *AllDebrid {
|
return torrents, nil
|
||||||
rl := request.ParseRateLimit(dc.RateLimit)
|
|
||||||
headers := map[string]string{
|
|
||||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
|
||||||
}
|
}
|
||||||
client := request.NewRLHTTPClient(rl, headers)
|
|
||||||
return &AllDebrid{
|
func (ad *AllDebrid) GetDownloads() (map[string]types.DownloadLinks, error) {
|
||||||
Name: "alldebrid",
|
return nil, nil
|
||||||
Host: dc.Host,
|
|
||||||
APIKey: dc.APIKey,
|
|
||||||
DownloadUncached: dc.DownloadUncached,
|
|
||||||
client: client,
|
|
||||||
cache: cache,
|
|
||||||
MountPath: dc.Folder,
|
|
||||||
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
|
|
||||||
CheckCached: dc.CheckCached,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ad *AllDebrid) GetDownloadingStatus() []string {
|
||||||
|
return []string{"downloading"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ad *AllDebrid) GetDownloadUncached() bool {
|
||||||
|
return ad.DownloadUncached
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ad *AllDebrid) CheckLink(link string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ad *AllDebrid) GetMountPath() string {
|
||||||
|
return ad.MountPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ad *AllDebrid) DisableAccount(accountId string) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ad *AllDebrid) ResetActiveDownloadKeys() {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,6 +40,14 @@ type TorrentInfoResponse struct {
|
|||||||
Error *errorResponse `json:"error"`
|
Error *errorResponse `json:"error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TorrentsListResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
Magnets []magnetInfo `json:"magnets"`
|
||||||
|
} `json:"data"`
|
||||||
|
Error *errorResponse `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
type UploadMagnetResponse struct {
|
type UploadMagnetResponse struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Data struct {
|
Data struct {
|
||||||
|
|||||||
360
pkg/debrid/cache/cache.go
vendored
360
pkg/debrid/cache/cache.go
vendored
@@ -1,360 +0,0 @@
|
|||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
|
||||||
)
|
|
||||||
|
|
||||||
type DownloadLinkCache struct {
|
|
||||||
Link string `json:"download_link"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CachedTorrent struct {
|
|
||||||
*torrent.Torrent
|
|
||||||
LastRead time.Time `json:"last_read"`
|
|
||||||
IsComplete bool `json:"is_complete"`
|
|
||||||
DownloadLinks map[string]DownloadLinkCache `json:"download_links"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_logInstance zerolog.Logger
|
|
||||||
once sync.Once
|
|
||||||
)
|
|
||||||
|
|
||||||
func getLogger() zerolog.Logger {
|
|
||||||
once.Do(func() {
|
|
||||||
_logInstance = logger.NewLogger("cache", "info", os.Stdout)
|
|
||||||
})
|
|
||||||
return _logInstance
|
|
||||||
}
|
|
||||||
|
|
||||||
type Cache struct {
|
|
||||||
dir string
|
|
||||||
client engine.Service
|
|
||||||
torrents *sync.Map // key: torrent.Id, value: *CachedTorrent
|
|
||||||
torrentsNames *sync.Map // key: torrent.Name, value: torrent.Id
|
|
||||||
LastUpdated time.Time `json:"last_updated"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Manager struct {
|
|
||||||
caches map[string]*Cache
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewManager(debridService *engine.Engine) *Manager {
|
|
||||||
cfg := config.GetConfig()
|
|
||||||
cm := &Manager{
|
|
||||||
caches: make(map[string]*Cache),
|
|
||||||
}
|
|
||||||
for _, debrid := range debridService.GetDebrids() {
|
|
||||||
c := New(debrid, cfg.Path)
|
|
||||||
cm.caches[debrid.GetName()] = c
|
|
||||||
}
|
|
||||||
return cm
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Manager) GetCaches() map[string]*Cache {
|
|
||||||
return m.caches
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Manager) GetCache(debridName string) *Cache {
|
|
||||||
return m.caches[debridName]
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(debridService engine.Service, basePath string) *Cache {
|
|
||||||
return &Cache{
|
|
||||||
dir: filepath.Join(basePath, "cache", debridService.GetName(), "torrents"),
|
|
||||||
torrents: &sync.Map{},
|
|
||||||
torrentsNames: &sync.Map{},
|
|
||||||
client: debridService,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) Start() error {
|
|
||||||
_logger := getLogger()
|
|
||||||
_logger.Info().Msg("Starting cache for: " + c.client.GetName())
|
|
||||||
if err := c.Load(); err != nil {
|
|
||||||
return fmt.Errorf("failed to load cache: %v", err)
|
|
||||||
}
|
|
||||||
if err := c.Sync(); err != nil {
|
|
||||||
return fmt.Errorf("failed to sync cache: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) Load() error {
|
|
||||||
_logger := getLogger()
|
|
||||||
|
|
||||||
if err := os.MkdirAll(c.dir, 0755); err != nil {
|
|
||||||
return fmt.Errorf("failed to create cache directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
files, err := os.ReadDir(c.dir)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read cache directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
if file.IsDir() || filepath.Ext(file.Name()) != ".json" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
filePath := filepath.Join(c.dir, file.Name())
|
|
||||||
data, err := os.ReadFile(filePath)
|
|
||||||
if err != nil {
|
|
||||||
_logger.Debug().Err(err).Msgf("Failed to read file: %s", filePath)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var ct CachedTorrent
|
|
||||||
if err := json.Unmarshal(data, &ct); err != nil {
|
|
||||||
_logger.Debug().Err(err).Msgf("Failed to unmarshal file: %s", filePath)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(ct.Files) > 0 {
|
|
||||||
c.torrents.Store(ct.Torrent.Id, &ct)
|
|
||||||
c.torrentsNames.Store(ct.Torrent.Name, ct.Torrent.Id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) GetTorrent(id string) *CachedTorrent {
|
|
||||||
if value, ok := c.torrents.Load(id); ok {
|
|
||||||
return value.(*CachedTorrent)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
|
|
||||||
if id, ok := c.torrentsNames.Load(name); ok {
|
|
||||||
return c.GetTorrent(id.(string))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) SaveTorrent(ct *CachedTorrent) error {
|
|
||||||
data, err := json.MarshalIndent(ct, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal torrent: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fileName := ct.Torrent.Id + ".json"
|
|
||||||
filePath := filepath.Join(c.dir, fileName)
|
|
||||||
tmpFile := filePath + ".tmp"
|
|
||||||
|
|
||||||
f, err := os.Create(tmpFile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create temp file: %w", err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
w := bufio.NewWriter(f)
|
|
||||||
if _, err := w.Write(data); err != nil {
|
|
||||||
return fmt.Errorf("failed to write data: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.Flush(); err != nil {
|
|
||||||
return fmt.Errorf("failed to flush data: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return os.Rename(tmpFile, filePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) SaveAll() error {
|
|
||||||
const batchSize = 100
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
_logger := getLogger()
|
|
||||||
|
|
||||||
tasks := make(chan *CachedTorrent, batchSize)
|
|
||||||
|
|
||||||
for i := 0; i < runtime.NumCPU(); i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for ct := range tasks {
|
|
||||||
if err := c.SaveTorrent(ct); err != nil {
|
|
||||||
_logger.Error().Err(err).Msg("failed to save torrent")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
c.torrents.Range(func(_, value interface{}) bool {
|
|
||||||
tasks <- value.(*CachedTorrent)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
close(tasks)
|
|
||||||
wg.Wait()
|
|
||||||
c.LastUpdated = time.Now()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) Sync() error {
|
|
||||||
_logger := getLogger()
|
|
||||||
torrents, err := c.client.GetTorrents()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to sync torrents: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
workers := runtime.NumCPU() * 200
|
|
||||||
workChan := make(chan *torrent.Torrent, len(torrents))
|
|
||||||
errChan := make(chan error, len(torrents))
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
for i := 0; i < workers; i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for t := range workChan {
|
|
||||||
if err := c.processTorrent(t); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, t := range torrents {
|
|
||||||
workChan <- t
|
|
||||||
}
|
|
||||||
close(workChan)
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
close(errChan)
|
|
||||||
|
|
||||||
for err := range errChan {
|
|
||||||
_logger.Error().Err(err).Msg("sync error")
|
|
||||||
}
|
|
||||||
|
|
||||||
_logger.Info().Msgf("Synced %d torrents", len(torrents))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) processTorrent(t *torrent.Torrent) error {
|
|
||||||
if existing, ok := c.torrents.Load(t.Id); ok {
|
|
||||||
ct := existing.(*CachedTorrent)
|
|
||||||
if ct.IsComplete {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.AddTorrent(t)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) AddTorrent(t *torrent.Torrent) {
|
|
||||||
_logger := getLogger()
|
|
||||||
|
|
||||||
if len(t.Files) == 0 {
|
|
||||||
tNew, err := c.client.GetTorrent(t.Id)
|
|
||||||
_logger.Debug().Msgf("Getting torrent files for %s", t.Id)
|
|
||||||
if err != nil {
|
|
||||||
_logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t = tNew
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(t.Files) == 0 {
|
|
||||||
_logger.Debug().Msgf("No files found for %s", t.Id)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ct := &CachedTorrent{
|
|
||||||
Torrent: t,
|
|
||||||
LastRead: time.Now(),
|
|
||||||
IsComplete: len(t.Files) > 0,
|
|
||||||
DownloadLinks: make(map[string]DownloadLinkCache),
|
|
||||||
}
|
|
||||||
|
|
||||||
c.torrents.Store(t.Id, ct)
|
|
||||||
c.torrentsNames.Store(t.Name, t.Id)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := c.SaveTorrent(ct); err != nil {
|
|
||||||
_logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) RefreshTorrent(torrentId string) *CachedTorrent {
|
|
||||||
_logger := getLogger()
|
|
||||||
|
|
||||||
t, err := c.client.GetTorrent(torrentId)
|
|
||||||
if err != nil {
|
|
||||||
_logger.Debug().Msgf("Failed to get torrent files for %s: %v", torrentId, err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if len(t.Files) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ct := &CachedTorrent{
|
|
||||||
Torrent: t,
|
|
||||||
LastRead: time.Now(),
|
|
||||||
IsComplete: len(t.Files) > 0,
|
|
||||||
DownloadLinks: make(map[string]DownloadLinkCache),
|
|
||||||
}
|
|
||||||
|
|
||||||
c.torrents.Store(t.Id, ct)
|
|
||||||
c.torrentsNames.Store(t.Name, t.Id)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := c.SaveTorrent(ct); err != nil {
|
|
||||||
_logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return ct
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) GetFileDownloadLink(t *CachedTorrent, file *torrent.File) (string, error) {
|
|
||||||
_logger := getLogger()
|
|
||||||
|
|
||||||
if linkCache, ok := t.DownloadLinks[file.Id]; ok {
|
|
||||||
return linkCache.Link, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if file.Link == "" {
|
|
||||||
t = c.RefreshTorrent(t.Id)
|
|
||||||
if t == nil {
|
|
||||||
return "", fmt.Errorf("torrent not found")
|
|
||||||
}
|
|
||||||
file = t.Torrent.GetFile(file.Id)
|
|
||||||
}
|
|
||||||
|
|
||||||
_logger.Debug().Msgf("Getting download link for %s", t.Name)
|
|
||||||
link := c.client.GetDownloadLink(t.Torrent, file)
|
|
||||||
if link == nil {
|
|
||||||
return "", fmt.Errorf("download link not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
t.DownloadLinks[file.Id] = DownloadLinkCache{
|
|
||||||
Link: link.DownloadLink,
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := c.SaveTorrent(t); err != nil {
|
|
||||||
_logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return link.DownloadLink, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache) GetTorrents() *sync.Map {
|
|
||||||
return c.torrents
|
|
||||||
}
|
|
||||||
@@ -1,93 +0,0 @@
|
|||||||
package debrid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cmp"
|
|
||||||
"fmt"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/alldebrid"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid_link"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/realdebrid"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torbox"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
|
||||||
)
|
|
||||||
|
|
||||||
func New() *engine.Engine {
|
|
||||||
cfg := config.GetConfig()
|
|
||||||
maxCachedSize := cmp.Or(cfg.MaxCacheSize, 1000)
|
|
||||||
debrids := make([]engine.Service, 0)
|
|
||||||
// Divide the cache size by the number of debrids
|
|
||||||
maxCacheSize := maxCachedSize / len(cfg.Debrids)
|
|
||||||
|
|
||||||
for _, dc := range cfg.Debrids {
|
|
||||||
d := createDebrid(dc, cache.New(maxCacheSize))
|
|
||||||
logger := d.GetLogger()
|
|
||||||
logger.Info().Msg("Debrid Service started")
|
|
||||||
debrids = append(debrids, d)
|
|
||||||
}
|
|
||||||
d := &engine.Engine{Debrids: debrids, LastUsed: 0}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func createDebrid(dc config.Debrid, cache *cache.Cache) engine.Service {
|
|
||||||
switch dc.Name {
|
|
||||||
case "realdebrid":
|
|
||||||
return realdebrid.New(dc, cache)
|
|
||||||
case "torbox":
|
|
||||||
return torbox.New(dc, cache)
|
|
||||||
case "debridlink":
|
|
||||||
return debrid_link.New(dc, cache)
|
|
||||||
case "alldebrid":
|
|
||||||
return alldebrid.New(dc, cache)
|
|
||||||
default:
|
|
||||||
return realdebrid.New(dc, cache)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ProcessTorrent(d *engine.Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink bool) (*torrent.Torrent, error) {
|
|
||||||
debridTorrent := &torrent.Torrent{
|
|
||||||
InfoHash: magnet.InfoHash,
|
|
||||||
Magnet: magnet,
|
|
||||||
Name: magnet.Name,
|
|
||||||
Arr: a,
|
|
||||||
Size: magnet.Size,
|
|
||||||
}
|
|
||||||
|
|
||||||
errs := make([]error, 0)
|
|
||||||
|
|
||||||
for index, db := range d.Debrids {
|
|
||||||
logger := db.GetLogger()
|
|
||||||
logger.Info().Msgf("Processing debrid: %s", db.GetName())
|
|
||||||
|
|
||||||
logger.Info().Msgf("Torrent Hash: %s", debridTorrent.InfoHash)
|
|
||||||
if db.GetCheckCached() {
|
|
||||||
hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
|
|
||||||
if !exists || !hash {
|
|
||||||
logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name)
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dbt, err := db.SubmitMagnet(debridTorrent)
|
|
||||||
if dbt != nil {
|
|
||||||
dbt.Arr = a
|
|
||||||
}
|
|
||||||
if err != nil || dbt == nil || dbt.Id == "" {
|
|
||||||
errs = append(errs, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
logger.Info().Msgf("Torrent: %s(id=%s) submitted to %s", dbt.Name, dbt.Id, db.GetName())
|
|
||||||
d.LastUsed = index
|
|
||||||
return db.CheckStatus(dbt, isSymlink)
|
|
||||||
}
|
|
||||||
err := fmt.Errorf("failed to process torrent")
|
|
||||||
for _, e := range errs {
|
|
||||||
err = fmt.Errorf("%w\n%w", err, e)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
803
pkg/debrid/debrid/cache.go
Normal file
803
pkg/debrid/debrid/cache.go
Normal file
@@ -0,0 +1,803 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
"github.com/puzpuzpuz/xsync/v3"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WebDavFolderNaming string
|
||||||
|
|
||||||
|
const (
|
||||||
|
WebDavUseFileName WebDavFolderNaming = "filename"
|
||||||
|
WebDavUseOriginalName WebDavFolderNaming = "original"
|
||||||
|
WebDavUseFileNameNoExt WebDavFolderNaming = "filename_no_ext"
|
||||||
|
WebDavUseOriginalNameNoExt WebDavFolderNaming = "original_no_ext"
|
||||||
|
WebDavUseID WebDavFolderNaming = "id"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PropfindResponse struct {
|
||||||
|
Data []byte
|
||||||
|
GzippedData []byte
|
||||||
|
Ts time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type CachedTorrent struct {
|
||||||
|
*types.Torrent
|
||||||
|
AddedOn time.Time `json:"added_on"`
|
||||||
|
IsComplete bool `json:"is_complete"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type downloadLinkCache struct {
|
||||||
|
Link string
|
||||||
|
AccountId string
|
||||||
|
ExpiresAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type RepairType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
RepairTypeReinsert RepairType = "reinsert"
|
||||||
|
RepairTypeDelete RepairType = "delete"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RepairRequest struct {
|
||||||
|
Type RepairType
|
||||||
|
TorrentID string
|
||||||
|
Priority int
|
||||||
|
FileName string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Cache struct {
|
||||||
|
dir string
|
||||||
|
client types.Client
|
||||||
|
logger zerolog.Logger
|
||||||
|
|
||||||
|
torrents *xsync.MapOf[string, *CachedTorrent] // key: torrent.Id, value: *CachedTorrent
|
||||||
|
torrentsNames *xsync.MapOf[string, *CachedTorrent] // key: torrent.Name, value: torrent
|
||||||
|
listings atomic.Value
|
||||||
|
downloadLinks *xsync.MapOf[string, downloadLinkCache]
|
||||||
|
invalidDownloadLinks *xsync.MapOf[string, string]
|
||||||
|
PropfindResp *xsync.MapOf[string, PropfindResponse]
|
||||||
|
folderNaming WebDavFolderNaming
|
||||||
|
|
||||||
|
// repair
|
||||||
|
repairChan chan RepairRequest
|
||||||
|
repairsInProgress *xsync.MapOf[string, struct{}]
|
||||||
|
|
||||||
|
// config
|
||||||
|
workers int
|
||||||
|
torrentRefreshInterval time.Duration
|
||||||
|
downloadLinksRefreshInterval time.Duration
|
||||||
|
autoExpiresLinksAfter time.Duration
|
||||||
|
|
||||||
|
// refresh mutex
|
||||||
|
listingRefreshMu sync.RWMutex // for refreshing torrents
|
||||||
|
downloadLinksRefreshMu sync.RWMutex // for refreshing download links
|
||||||
|
torrentsRefreshMu sync.RWMutex // for refreshing torrents
|
||||||
|
|
||||||
|
saveSemaphore chan struct{}
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(dc config.Debrid, client types.Client) *Cache {
|
||||||
|
cfg := config.Get()
|
||||||
|
torrentRefreshInterval, err := time.ParseDuration(dc.TorrentsRefreshInterval)
|
||||||
|
if err != nil {
|
||||||
|
torrentRefreshInterval = time.Second * 15
|
||||||
|
}
|
||||||
|
downloadLinksRefreshInterval, err := time.ParseDuration(dc.DownloadLinksRefreshInterval)
|
||||||
|
if err != nil {
|
||||||
|
downloadLinksRefreshInterval = time.Minute * 40
|
||||||
|
}
|
||||||
|
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
|
||||||
|
if err != nil {
|
||||||
|
autoExpiresLinksAfter = time.Hour * 24
|
||||||
|
}
|
||||||
|
workers := runtime.NumCPU() * 50
|
||||||
|
if dc.Workers > 0 {
|
||||||
|
workers = dc.Workers
|
||||||
|
}
|
||||||
|
return &Cache{
|
||||||
|
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
|
||||||
|
torrents: xsync.NewMapOf[string, *CachedTorrent](),
|
||||||
|
torrentsNames: xsync.NewMapOf[string, *CachedTorrent](),
|
||||||
|
invalidDownloadLinks: xsync.NewMapOf[string, string](),
|
||||||
|
client: client,
|
||||||
|
logger: logger.New(fmt.Sprintf("%s-webdav", client.GetName())),
|
||||||
|
workers: workers,
|
||||||
|
downloadLinks: xsync.NewMapOf[string, downloadLinkCache](),
|
||||||
|
torrentRefreshInterval: torrentRefreshInterval,
|
||||||
|
downloadLinksRefreshInterval: downloadLinksRefreshInterval,
|
||||||
|
PropfindResp: xsync.NewMapOf[string, PropfindResponse](),
|
||||||
|
folderNaming: WebDavFolderNaming(dc.FolderNaming),
|
||||||
|
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||||
|
repairsInProgress: xsync.NewMapOf[string, struct{}](),
|
||||||
|
saveSemaphore: make(chan struct{}, 50),
|
||||||
|
ctx: context.Background(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) Start(ctx context.Context) error {
|
||||||
|
if err := os.MkdirAll(c.dir, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create cache directory: %w", err)
|
||||||
|
}
|
||||||
|
c.ctx = ctx
|
||||||
|
|
||||||
|
if err := c.Sync(); err != nil {
|
||||||
|
return fmt.Errorf("failed to sync cache: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// initial download links
|
||||||
|
go func() {
|
||||||
|
c.refreshDownloadLinks()
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
err := c.Refresh()
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Error().Err(err).Msg("Failed to start cache refresh worker")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
c.repairChan = make(chan RepairRequest, 100)
|
||||||
|
go c.repairWorker()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) load() (map[string]*CachedTorrent, error) {
|
||||||
|
torrents := make(map[string]*CachedTorrent)
|
||||||
|
var results sync.Map
|
||||||
|
|
||||||
|
if err := os.MkdirAll(c.dir, 0755); err != nil {
|
||||||
|
return torrents, fmt.Errorf("failed to create cache directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := os.ReadDir(c.dir)
|
||||||
|
if err != nil {
|
||||||
|
return torrents, fmt.Errorf("failed to read cache directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get only json files
|
||||||
|
var jsonFiles []os.DirEntry
|
||||||
|
for _, file := range files {
|
||||||
|
if !file.IsDir() && filepath.Ext(file.Name()) == ".json" {
|
||||||
|
jsonFiles = append(jsonFiles, file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(jsonFiles) == 0 {
|
||||||
|
return torrents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create channels with appropriate buffering
|
||||||
|
workChan := make(chan os.DirEntry, min(c.workers, len(jsonFiles)))
|
||||||
|
|
||||||
|
// Create a wait group for workers
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
// Start workers
|
||||||
|
for i := 0; i < c.workers; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
for {
|
||||||
|
file, ok := <-workChan
|
||||||
|
if !ok {
|
||||||
|
return // Channel closed, exit goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
fileName := file.Name()
|
||||||
|
filePath := filepath.Join(c.dir, fileName)
|
||||||
|
data, err := os.ReadFile(filePath)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to read file: %s", filePath)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var ct CachedTorrent
|
||||||
|
if err := json.Unmarshal(data, &ct); err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to unmarshal file: %s", filePath)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
isComplete := true
|
||||||
|
if len(ct.Files) != 0 {
|
||||||
|
// Check if all files are valid, if not, delete the file.json and remove from cache.
|
||||||
|
for _, f := range ct.Files {
|
||||||
|
if f.Link == "" {
|
||||||
|
isComplete = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isComplete {
|
||||||
|
addedOn, err := time.Parse(time.RFC3339, ct.Added)
|
||||||
|
if err != nil {
|
||||||
|
addedOn = now
|
||||||
|
}
|
||||||
|
ct.AddedOn = addedOn
|
||||||
|
ct.IsComplete = true
|
||||||
|
results.Store(ct.Id, &ct)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Feed work to workers
|
||||||
|
for _, file := range jsonFiles {
|
||||||
|
workChan <- file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signal workers that no more work is coming
|
||||||
|
close(workChan)
|
||||||
|
|
||||||
|
// Wait for all workers to complete
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Convert sync.Map to regular map
|
||||||
|
results.Range(func(key, value interface{}) bool {
|
||||||
|
id, _ := key.(string)
|
||||||
|
torrent, _ := value.(*CachedTorrent)
|
||||||
|
torrents[id] = torrent
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return torrents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) Sync() error {
|
||||||
|
defer c.logger.Info().Msg("WebDav server sync complete")
|
||||||
|
cachedTorrents, err := c.load()
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msg("Failed to load cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
torrents, err := c.client.GetTorrents()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to sync torrents: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Info().Msgf("Got %d torrents from %s", len(torrents), c.client.GetName())
|
||||||
|
|
||||||
|
newTorrents := make([]*types.Torrent, 0)
|
||||||
|
idStore := make(map[string]struct{}, len(torrents))
|
||||||
|
for _, t := range torrents {
|
||||||
|
idStore[t.Id] = struct{}{}
|
||||||
|
if _, ok := cachedTorrents[t.Id]; !ok {
|
||||||
|
newTorrents = append(newTorrents, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for deleted torrents
|
||||||
|
deletedTorrents := make([]string, 0)
|
||||||
|
for _, t := range cachedTorrents {
|
||||||
|
if _, ok := idStore[t.Id]; !ok {
|
||||||
|
deletedTorrents = append(deletedTorrents, t.Id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(deletedTorrents) > 0 {
|
||||||
|
c.logger.Info().Msgf("Found %d deleted torrents", len(deletedTorrents))
|
||||||
|
for _, id := range deletedTorrents {
|
||||||
|
if _, ok := cachedTorrents[id]; ok {
|
||||||
|
delete(cachedTorrents, id)
|
||||||
|
c.removeFromDB(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write these torrents to the cache
|
||||||
|
c.setTorrents(cachedTorrents)
|
||||||
|
c.logger.Info().Msgf("Loaded %d torrents from cache", len(cachedTorrents))
|
||||||
|
|
||||||
|
if len(newTorrents) > 0 {
|
||||||
|
c.logger.Info().Msgf("Found %d new torrents", len(newTorrents))
|
||||||
|
if err := c.sync(newTorrents); err != nil {
|
||||||
|
return fmt.Errorf("failed to sync torrents: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) sync(torrents []*types.Torrent) error {
|
||||||
|
|
||||||
|
// Create channels with appropriate buffering
|
||||||
|
workChan := make(chan *types.Torrent, min(c.workers, len(torrents)))
|
||||||
|
|
||||||
|
// Use an atomic counter for progress tracking
|
||||||
|
var processed int64
|
||||||
|
var errorCount int64
|
||||||
|
|
||||||
|
// Create a wait group for workers
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
// Start workers
|
||||||
|
for i := 0; i < c.workers; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case t, ok := <-workChan:
|
||||||
|
if !ok {
|
||||||
|
return // Channel closed, exit goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.ProcessTorrent(t, false); err != nil {
|
||||||
|
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("sync error")
|
||||||
|
atomic.AddInt64(&errorCount, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
count := atomic.AddInt64(&processed, 1)
|
||||||
|
if count%1000 == 0 {
|
||||||
|
c.refreshListings()
|
||||||
|
c.logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents))
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return // Context cancelled, exit goroutine
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Feed work to workers
|
||||||
|
for _, t := range torrents {
|
||||||
|
select {
|
||||||
|
case workChan <- t:
|
||||||
|
// Work sent successfully
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
break // Context cancelled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signal workers that no more work is coming
|
||||||
|
close(workChan)
|
||||||
|
|
||||||
|
// Wait for all workers to complete
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
c.refreshListings()
|
||||||
|
c.logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetTorrentFolder(torrent *types.Torrent) string {
|
||||||
|
switch c.folderNaming {
|
||||||
|
case WebDavUseFileName:
|
||||||
|
return torrent.Filename
|
||||||
|
case WebDavUseOriginalName:
|
||||||
|
return torrent.OriginalFilename
|
||||||
|
case WebDavUseFileNameNoExt:
|
||||||
|
return utils.RemoveExtension(torrent.Filename)
|
||||||
|
case WebDavUseOriginalNameNoExt:
|
||||||
|
return utils.RemoveExtension(torrent.OriginalFilename)
|
||||||
|
case WebDavUseID:
|
||||||
|
return torrent.Id
|
||||||
|
default:
|
||||||
|
return torrent.Filename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) setTorrent(t *CachedTorrent) {
|
||||||
|
c.torrents.Store(t.Id, t)
|
||||||
|
|
||||||
|
c.torrentsNames.Store(c.GetTorrentFolder(t.Torrent), t)
|
||||||
|
|
||||||
|
c.SaveTorrent(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) {
|
||||||
|
for _, t := range torrents {
|
||||||
|
c.torrents.Store(t.Id, t)
|
||||||
|
c.torrentsNames.Store(c.GetTorrentFolder(t.Torrent), t)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.refreshListings()
|
||||||
|
|
||||||
|
c.SaveTorrents()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetListing() []os.FileInfo {
|
||||||
|
if v, ok := c.listings.Load().([]os.FileInfo); ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetTorrents() map[string]*CachedTorrent {
|
||||||
|
torrents := make(map[string]*CachedTorrent)
|
||||||
|
c.torrents.Range(func(key string, value *CachedTorrent) bool {
|
||||||
|
torrents[key] = value
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return torrents
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetTorrent(id string) *CachedTorrent {
|
||||||
|
if t, ok := c.torrents.Load(id); ok {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
|
||||||
|
if t, ok := c.torrentsNames.Load(name); ok {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) SaveTorrents() {
|
||||||
|
c.torrents.Range(func(key string, value *CachedTorrent) bool {
|
||||||
|
c.SaveTorrent(value)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) SaveTorrent(ct *CachedTorrent) {
|
||||||
|
marshaled, err := json.MarshalIndent(ct, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to marshal torrent: %s", ct.Id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store just the essential info needed for the file operation
|
||||||
|
saveInfo := struct {
|
||||||
|
id string
|
||||||
|
jsonData []byte
|
||||||
|
}{
|
||||||
|
id: ct.Torrent.Id,
|
||||||
|
jsonData: marshaled,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to acquire semaphore without blocking
|
||||||
|
select {
|
||||||
|
case c.saveSemaphore <- struct{}{}:
|
||||||
|
go func() {
|
||||||
|
defer func() { <-c.saveSemaphore }()
|
||||||
|
c.saveTorrent(saveInfo.id, saveInfo.jsonData)
|
||||||
|
}()
|
||||||
|
default:
|
||||||
|
c.saveTorrent(saveInfo.id, saveInfo.jsonData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) saveTorrent(id string, data []byte) {
|
||||||
|
|
||||||
|
fileName := id + ".json"
|
||||||
|
filePath := filepath.Join(c.dir, fileName)
|
||||||
|
|
||||||
|
// Use a unique temporary filename for concurrent safety
|
||||||
|
tmpFile := filePath + ".tmp." + strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||||
|
|
||||||
|
f, err := os.Create(tmpFile)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to create file: %s", tmpFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track if we've closed the file
|
||||||
|
fileClosed := false
|
||||||
|
defer func() {
|
||||||
|
// Only close if not already closed
|
||||||
|
if !fileClosed {
|
||||||
|
_ = f.Close()
|
||||||
|
}
|
||||||
|
// Clean up the temp file if it still exists and rename failed
|
||||||
|
_ = os.Remove(tmpFile)
|
||||||
|
}()
|
||||||
|
|
||||||
|
w := bufio.NewWriter(f)
|
||||||
|
if _, err := w.Write(data); err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to write data: %s", tmpFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Flush(); err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to flush data: %s", tmpFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the file before renaming
|
||||||
|
_ = f.Close()
|
||||||
|
fileClosed = true
|
||||||
|
|
||||||
|
if err := os.Rename(tmpFile, filePath); err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to rename file: %s", tmpFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) ProcessTorrent(t *types.Torrent, refreshRclone bool) error {
|
||||||
|
|
||||||
|
isComplete := func(files map[string]types.File) bool {
|
||||||
|
_complete := len(files) > 0
|
||||||
|
for _, file := range files {
|
||||||
|
if file.Link == "" {
|
||||||
|
_complete = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _complete
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isComplete(t.Files) {
|
||||||
|
if err := c.client.UpdateTorrent(t); err != nil {
|
||||||
|
return fmt.Errorf("failed to update torrent: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isComplete(t.Files) {
|
||||||
|
c.logger.Debug().Msgf("Torrent %s is still not complete. Triggering a reinsert(disabled)", t.Id)
|
||||||
|
//ct, err := c.reInsertTorrent(t)
|
||||||
|
//if err != nil {
|
||||||
|
// c.logger.Debug().Err(err).Msgf("Failed to reinsert torrent %s", t.Id)
|
||||||
|
// return err
|
||||||
|
//}
|
||||||
|
//c.logger.Debug().Msgf("Reinserted torrent %s", ct.Id)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
addedOn, err := time.Parse(time.RFC3339, t.Added)
|
||||||
|
if err != nil {
|
||||||
|
addedOn = time.Now()
|
||||||
|
}
|
||||||
|
ct := &CachedTorrent{
|
||||||
|
Torrent: t,
|
||||||
|
IsComplete: len(t.Files) > 0,
|
||||||
|
AddedOn: addedOn,
|
||||||
|
}
|
||||||
|
c.setTorrent(ct)
|
||||||
|
}
|
||||||
|
|
||||||
|
if refreshRclone {
|
||||||
|
c.refreshListings()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string {
|
||||||
|
|
||||||
|
// Check link cache
|
||||||
|
if dl := c.checkDownloadLink(fileLink); dl != "" {
|
||||||
|
return dl
|
||||||
|
}
|
||||||
|
|
||||||
|
ct := c.GetTorrent(torrentId)
|
||||||
|
if ct == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
file := ct.Files[filename]
|
||||||
|
|
||||||
|
if file.Link == "" {
|
||||||
|
// file link is empty, refresh the torrent to get restricted links
|
||||||
|
ct = c.refreshTorrent(ct) // Refresh the torrent from the debrid
|
||||||
|
if ct == nil {
|
||||||
|
return ""
|
||||||
|
} else {
|
||||||
|
file = ct.Files[filename]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If file.Link is still empty, return
|
||||||
|
if file.Link == "" {
|
||||||
|
c.logger.Debug().Msgf("File link is empty for %s. Release is probably nerfed", filename)
|
||||||
|
// Try to reinsert the torrent?
|
||||||
|
ct, err := c.reInsertTorrent(ct.Torrent)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to reinsert torrent %s", ct.Name)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
file = ct.Files[filename]
|
||||||
|
c.logger.Debug().Msgf("Reinserted torrent %s", ct.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Trace().Msgf("Getting download link for %s", filename)
|
||||||
|
downloadLink, accountId, err := c.client.GetDownloadLink(ct.Torrent, &file)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, request.HosterUnavailableError) {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Hoster is unavailable. Triggering repair for %s", ct.Name)
|
||||||
|
ct, err := c.reInsertTorrent(ct.Torrent)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to reinsert torrent %s", ct.Name)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
c.logger.Debug().Msgf("Reinserted torrent %s", ct.Name)
|
||||||
|
file = ct.Files[filename]
|
||||||
|
// Retry getting the download link
|
||||||
|
downloadLink, accountId, err = c.client.GetDownloadLink(ct.Torrent, &file)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to get download link for %s", file.Link)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if downloadLink == "" {
|
||||||
|
c.logger.Debug().Msgf("Download link is empty for %s", file.Link)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
file.DownloadLink = downloadLink
|
||||||
|
file.Generated = time.Now()
|
||||||
|
file.AccountId = accountId
|
||||||
|
ct.Files[filename] = file
|
||||||
|
go func() {
|
||||||
|
c.updateDownloadLink(file.Link, downloadLink, accountId)
|
||||||
|
c.setTorrent(ct)
|
||||||
|
}()
|
||||||
|
return file.DownloadLink
|
||||||
|
} else if errors.Is(err, request.TrafficExceededError) {
|
||||||
|
// This is likely a fair usage limit error
|
||||||
|
} else {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to get download link for %s", file.Link)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file.DownloadLink = downloadLink
|
||||||
|
file.Generated = time.Now()
|
||||||
|
file.AccountId = accountId
|
||||||
|
ct.Files[filename] = file
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
c.updateDownloadLink(file.Link, downloadLink, file.AccountId)
|
||||||
|
c.setTorrent(ct)
|
||||||
|
}()
|
||||||
|
return file.DownloadLink
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GenerateDownloadLinks(t *CachedTorrent) {
|
||||||
|
if err := c.client.GenerateDownloadLinks(t.Torrent); err != nil {
|
||||||
|
c.logger.Error().Err(err).Msg("Failed to generate download links")
|
||||||
|
}
|
||||||
|
for _, file := range t.Files {
|
||||||
|
c.updateDownloadLink(file.Link, file.DownloadLink, file.AccountId)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.SaveTorrent(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) AddTorrent(t *types.Torrent) error {
|
||||||
|
if len(t.Files) == 0 {
|
||||||
|
if err := c.client.UpdateTorrent(t); err != nil {
|
||||||
|
return fmt.Errorf("failed to update torrent: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
addedOn, err := time.Parse(time.RFC3339, t.Added)
|
||||||
|
if err != nil {
|
||||||
|
addedOn = time.Now()
|
||||||
|
}
|
||||||
|
ct := &CachedTorrent{
|
||||||
|
Torrent: t,
|
||||||
|
IsComplete: len(t.Files) > 0,
|
||||||
|
AddedOn: addedOn,
|
||||||
|
}
|
||||||
|
c.setTorrent(ct)
|
||||||
|
c.refreshListings()
|
||||||
|
go c.GenerateDownloadLinks(ct)
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) updateDownloadLink(link, downloadLink string, accountId string) {
|
||||||
|
c.downloadLinks.Store(link, downloadLinkCache{
|
||||||
|
Link: downloadLink,
|
||||||
|
ExpiresAt: time.Now().Add(c.autoExpiresLinksAfter),
|
||||||
|
AccountId: accountId,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) checkDownloadLink(link string) string {
|
||||||
|
if dl, ok := c.downloadLinks.Load(link); ok {
|
||||||
|
if dl.ExpiresAt.After(time.Now()) && !c.IsDownloadLinkInvalid(dl.Link) {
|
||||||
|
return dl.Link
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
|
||||||
|
c.invalidDownloadLinks.Store(downloadLink, reason)
|
||||||
|
// Remove the download api key from active
|
||||||
|
if reason == "bandwidth_exceeded" {
|
||||||
|
if dl, ok := c.downloadLinks.Load(link); ok {
|
||||||
|
if dl.AccountId != "" && dl.Link == downloadLink {
|
||||||
|
c.client.DisableAccount(dl.AccountId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.downloadLinks.Delete(link) // Remove the download link from cache
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) IsDownloadLinkInvalid(downloadLink string) bool {
|
||||||
|
if reason, ok := c.invalidDownloadLinks.Load(downloadLink); ok {
|
||||||
|
c.logger.Debug().Msgf("Download link %s is invalid: %s", downloadLink, reason)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetClient() types.Client {
|
||||||
|
return c.client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) DeleteTorrent(id string) error {
|
||||||
|
c.logger.Info().Msgf("Deleting torrent %s", id)
|
||||||
|
c.torrentsRefreshMu.Lock()
|
||||||
|
defer c.torrentsRefreshMu.Unlock()
|
||||||
|
|
||||||
|
if t, ok := c.torrents.Load(id); ok {
|
||||||
|
_ = c.client.DeleteTorrent(id) // SKip error handling, we don't care if it fails
|
||||||
|
c.torrents.Delete(id)
|
||||||
|
c.torrentsNames.Delete(c.GetTorrentFolder(t.Torrent))
|
||||||
|
c.removeFromDB(id)
|
||||||
|
c.refreshListings()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) DeleteTorrents(ids []string) {
|
||||||
|
c.logger.Info().Msgf("Deleting %d torrents", len(ids))
|
||||||
|
for _, id := range ids {
|
||||||
|
if t, ok := c.torrents.Load(id); ok {
|
||||||
|
c.torrents.Delete(id)
|
||||||
|
c.torrentsNames.Delete(c.GetTorrentFolder(t.Torrent))
|
||||||
|
c.removeFromDB(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.refreshListings()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) removeFromDB(torrentId string) {
|
||||||
|
// Moves the torrent file to the trash
|
||||||
|
filePath := filepath.Join(c.dir, torrentId+".json")
|
||||||
|
|
||||||
|
// Check if the file exists
|
||||||
|
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move the file to the trash
|
||||||
|
trashPath := filepath.Join(c.dir, "trash", torrentId+".json")
|
||||||
|
if err := os.MkdirAll(filepath.Dir(trashPath), 0755); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := os.Rename(filePath, trashPath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) OnRemove(torrentId string) {
|
||||||
|
c.logger.Debug().Msgf("OnRemove triggered for %s", torrentId)
|
||||||
|
err := c.DeleteTorrent(torrentId)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Error().Err(err).Msgf("Failed to delete torrent: %s", torrentId)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetLogger() zerolog.Logger {
|
||||||
|
return c.logger
|
||||||
|
}
|
||||||
86
pkg/debrid/debrid/debrid.go
Normal file
86
pkg/debrid/debrid/debrid.go
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/alldebrid"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/debrid_link"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/realdebrid"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/torbox"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createDebridClient(dc config.Debrid) types.Client {
|
||||||
|
switch dc.Name {
|
||||||
|
case "realdebrid":
|
||||||
|
return realdebrid.New(dc)
|
||||||
|
case "torbox":
|
||||||
|
return torbox.New(dc)
|
||||||
|
case "debridlink":
|
||||||
|
return debrid_link.New(dc)
|
||||||
|
case "alldebrid":
|
||||||
|
return alldebrid.New(dc)
|
||||||
|
default:
|
||||||
|
return realdebrid.New(dc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ProcessTorrent(d *Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) {
|
||||||
|
|
||||||
|
debridTorrent := &types.Torrent{
|
||||||
|
InfoHash: magnet.InfoHash,
|
||||||
|
Magnet: magnet,
|
||||||
|
Name: magnet.Name,
|
||||||
|
Arr: a,
|
||||||
|
Size: magnet.Size,
|
||||||
|
Files: make(map[string]types.File),
|
||||||
|
}
|
||||||
|
|
||||||
|
errs := make([]error, 0)
|
||||||
|
|
||||||
|
for index, db := range d.Clients {
|
||||||
|
logger := db.GetLogger()
|
||||||
|
logger.Info().Msgf("Processing debrid: %s", db.GetName())
|
||||||
|
|
||||||
|
// Override first, arr second, debrid third
|
||||||
|
|
||||||
|
if overrideDownloadUncached {
|
||||||
|
debridTorrent.DownloadUncached = true
|
||||||
|
} else if a.DownloadUncached != nil {
|
||||||
|
// Arr cached is set
|
||||||
|
debridTorrent.DownloadUncached = *a.DownloadUncached
|
||||||
|
} else {
|
||||||
|
debridTorrent.DownloadUncached = db.GetDownloadUncached()
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info().Msgf("Torrent Hash: %s", debridTorrent.InfoHash)
|
||||||
|
if db.GetCheckCached() {
|
||||||
|
hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
|
||||||
|
if !exists || !hash {
|
||||||
|
logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dbt, err := db.SubmitMagnet(debridTorrent)
|
||||||
|
if dbt != nil {
|
||||||
|
dbt.Arr = a
|
||||||
|
}
|
||||||
|
if err != nil || dbt == nil || dbt.Id == "" {
|
||||||
|
errs = append(errs, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
logger.Info().Msgf("Torrent: %s(id=%s) submitted to %s", dbt.Name, dbt.Id, db.GetName())
|
||||||
|
d.LastUsed = index
|
||||||
|
return db.CheckStatus(dbt, isSymlink)
|
||||||
|
}
|
||||||
|
err := fmt.Errorf("failed to process torrent")
|
||||||
|
for _, e := range errs {
|
||||||
|
err = fmt.Errorf("%w\n%w", err, e)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
55
pkg/debrid/debrid/engine.go
Normal file
55
pkg/debrid/debrid/engine.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Engine struct {
|
||||||
|
Clients map[string]types.Client
|
||||||
|
Caches map[string]*Cache
|
||||||
|
LastUsed string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEngine() *Engine {
|
||||||
|
cfg := config.Get()
|
||||||
|
clients := make(map[string]types.Client)
|
||||||
|
|
||||||
|
caches := make(map[string]*Cache)
|
||||||
|
|
||||||
|
for _, dc := range cfg.Debrids {
|
||||||
|
client := createDebridClient(dc)
|
||||||
|
logger := client.GetLogger()
|
||||||
|
if dc.UseWebDav {
|
||||||
|
caches[dc.Name] = New(dc, client)
|
||||||
|
logger.Info().Msg("Debrid Service started with WebDAV")
|
||||||
|
} else {
|
||||||
|
logger.Info().Msg("Debrid Service started")
|
||||||
|
}
|
||||||
|
clients[dc.Name] = client
|
||||||
|
}
|
||||||
|
|
||||||
|
d := &Engine{
|
||||||
|
Clients: clients,
|
||||||
|
LastUsed: "",
|
||||||
|
Caches: caches,
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Engine) Get() types.Client {
|
||||||
|
if d.LastUsed == "" {
|
||||||
|
for _, c := range d.Clients {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d.Clients[d.LastUsed]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Engine) GetByName(name string) types.Client {
|
||||||
|
return d.Clients[name]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Engine) GetDebrids() map[string]types.Client {
|
||||||
|
return d.Clients
|
||||||
|
}
|
||||||
249
pkg/debrid/debrid/refresh.go
Normal file
249
pkg/debrid/debrid/refresh.go
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fileInfo struct {
|
||||||
|
name string
|
||||||
|
size int64
|
||||||
|
mode os.FileMode
|
||||||
|
modTime time.Time
|
||||||
|
isDir bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi *fileInfo) Name() string { return fi.name }
|
||||||
|
func (fi *fileInfo) Size() int64 { return fi.size }
|
||||||
|
func (fi *fileInfo) Mode() os.FileMode { return fi.mode }
|
||||||
|
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
|
||||||
|
func (fi *fileInfo) IsDir() bool { return fi.isDir }
|
||||||
|
func (fi *fileInfo) Sys() interface{} { return nil }
|
||||||
|
|
||||||
|
func (c *Cache) refreshListings() {
|
||||||
|
if c.listingRefreshMu.TryLock() {
|
||||||
|
defer c.listingRefreshMu.Unlock()
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// COpy the torrents to a string|time map
|
||||||
|
torrentsTime := make(map[string]time.Time, c.torrents.Size())
|
||||||
|
torrents := make([]string, 0, c.torrents.Size())
|
||||||
|
c.torrentsNames.Range(func(key string, value *CachedTorrent) bool {
|
||||||
|
torrentsTime[key] = value.AddedOn
|
||||||
|
torrents = append(torrents, key)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
// Sort the torrents by name
|
||||||
|
sort.Strings(torrents)
|
||||||
|
|
||||||
|
files := make([]os.FileInfo, 0, len(torrents))
|
||||||
|
for _, t := range torrents {
|
||||||
|
files = append(files, &fileInfo{
|
||||||
|
name: t,
|
||||||
|
size: 0,
|
||||||
|
mode: 0755 | os.ModeDir,
|
||||||
|
modTime: torrentsTime[t],
|
||||||
|
isDir: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// Atomic store of the complete ready-to-use slice
|
||||||
|
c.listings.Store(files)
|
||||||
|
_ = c.refreshXml()
|
||||||
|
if err := c.RefreshRclone(); err != nil {
|
||||||
|
c.logger.Trace().Err(err).Msg("Failed to refresh rclone") // silent error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) refreshTorrents() {
|
||||||
|
if c.torrentsRefreshMu.TryLock() {
|
||||||
|
defer c.torrentsRefreshMu.Unlock()
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Create a copy of the current torrents to avoid concurrent issues
|
||||||
|
torrents := make(map[string]string, c.torrents.Size()) // a mpa of id and name
|
||||||
|
c.torrents.Range(func(key string, t *CachedTorrent) bool {
|
||||||
|
torrents[t.Id] = t.Name
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
// Get new torrents from the debrid service
|
||||||
|
debTorrents, err := c.client.GetTorrents()
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msg("Failed to get torrents")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(debTorrents) == 0 {
|
||||||
|
// Maybe an error occurred
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the newly added torrents only
|
||||||
|
_newTorrents := make([]*types.Torrent, 0)
|
||||||
|
idStore := make(map[string]struct{}, len(debTorrents))
|
||||||
|
for _, t := range debTorrents {
|
||||||
|
idStore[t.Id] = struct{}{}
|
||||||
|
if _, ok := torrents[t.Id]; !ok {
|
||||||
|
_newTorrents = append(_newTorrents, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for deleted torrents
|
||||||
|
deletedTorrents := make([]string, 0)
|
||||||
|
for id := range torrents {
|
||||||
|
if _, ok := idStore[id]; !ok {
|
||||||
|
deletedTorrents = append(deletedTorrents, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newTorrents := make([]*types.Torrent, 0)
|
||||||
|
for _, t := range _newTorrents {
|
||||||
|
if !slices.Contains(deletedTorrents, t.Id) {
|
||||||
|
newTorrents = append(newTorrents, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(deletedTorrents) > 0 {
|
||||||
|
c.DeleteTorrents(deletedTorrents)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(newTorrents) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.logger.Info().Msgf("Found %d new torrents", len(newTorrents))
|
||||||
|
|
||||||
|
workChan := make(chan *types.Torrent, min(100, len(newTorrents)))
|
||||||
|
errChan := make(chan error, len(newTorrents))
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
for i := 0; i < c.workers; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for t := range workChan {
|
||||||
|
select {
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if err := c.ProcessTorrent(t, true); err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msgf("Failed to process new torrent %s", t.Id)
|
||||||
|
errChan <- err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, t := range newTorrents {
|
||||||
|
select {
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
workChan <- t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(workChan)
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
c.logger.Debug().Msgf("Processed %d new torrents", len(newTorrents))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) RefreshRclone() error {
|
||||||
|
client := request.Default()
|
||||||
|
cfg := config.Get().WebDav
|
||||||
|
|
||||||
|
if cfg.RcUrl == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Create form data
|
||||||
|
data := "dir=__all__&dir2=torrents"
|
||||||
|
|
||||||
|
// Create a POST request with form URL-encoded content
|
||||||
|
forgetReq, err := http.NewRequest("POST", fmt.Sprintf("%s/vfs/forget", cfg.RcUrl), strings.NewReader(data))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if cfg.RcUser != "" && cfg.RcPass != "" {
|
||||||
|
forgetReq.SetBasicAuth(cfg.RcUser, cfg.RcPass)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the appropriate content type for form data
|
||||||
|
forgetReq.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
|
||||||
|
// Send the request
|
||||||
|
forgetResp, err := client.Do(forgetReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer forgetResp.Body.Close()
|
||||||
|
|
||||||
|
if forgetResp.StatusCode != 200 {
|
||||||
|
body, _ := io.ReadAll(forgetResp.Body)
|
||||||
|
return fmt.Errorf("failed to forget rclone: %s - %s", forgetResp.Status, string(body))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) refreshTorrent(t *CachedTorrent) *CachedTorrent {
|
||||||
|
_torrent := t.Torrent
|
||||||
|
err := c.client.UpdateTorrent(_torrent)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(t.Files) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
addedOn, err := time.Parse(time.RFC3339, _torrent.Added)
|
||||||
|
if err != nil {
|
||||||
|
addedOn = time.Now()
|
||||||
|
}
|
||||||
|
ct := &CachedTorrent{
|
||||||
|
Torrent: _torrent,
|
||||||
|
AddedOn: addedOn,
|
||||||
|
IsComplete: len(t.Files) > 0,
|
||||||
|
}
|
||||||
|
c.setTorrent(ct)
|
||||||
|
|
||||||
|
return ct
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) refreshDownloadLinks() {
|
||||||
|
if c.downloadLinksRefreshMu.TryLock() {
|
||||||
|
defer c.downloadLinksRefreshMu.Unlock()
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadLinks, err := c.client.GetDownloads()
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debug().Err(err).Msg("Failed to get download links")
|
||||||
|
}
|
||||||
|
for k, v := range downloadLinks {
|
||||||
|
// if link is generated in the last 24 hours, add it to cache
|
||||||
|
timeSince := time.Since(v.Generated)
|
||||||
|
if timeSince < c.autoExpiresLinksAfter {
|
||||||
|
c.downloadLinks.Store(k, downloadLinkCache{
|
||||||
|
Link: v.DownloadLink,
|
||||||
|
ExpiresAt: v.Generated.Add(c.autoExpiresLinksAfter - timeSince),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
c.downloadLinks.Delete(k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Debug().Msgf("Refreshed %d download links", len(downloadLinks))
|
||||||
|
|
||||||
|
}
|
||||||
168
pkg/debrid/debrid/repair.go
Normal file
168
pkg/debrid/debrid/repair.go
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/puzpuzpuz/xsync/v3"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
"slices"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool {
|
||||||
|
// Check torrent files
|
||||||
|
|
||||||
|
isBroken := false
|
||||||
|
files := make(map[string]types.File)
|
||||||
|
if len(filenames) > 0 {
|
||||||
|
for name, f := range t.Files {
|
||||||
|
if slices.Contains(filenames, name) {
|
||||||
|
files[name] = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
files = t.Files
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check empty links
|
||||||
|
for _, f := range files {
|
||||||
|
// Check if file is missing
|
||||||
|
if f.Link == "" {
|
||||||
|
// refresh torrent and then break
|
||||||
|
t = c.refreshTorrent(t)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
files = t.Files
|
||||||
|
|
||||||
|
for _, f := range files {
|
||||||
|
// Check if file link is still missing
|
||||||
|
if f.Link == "" {
|
||||||
|
isBroken = true
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
// Check if file.Link not in the downloadLink Cache
|
||||||
|
if err := c.client.CheckLink(f.Link); err != nil {
|
||||||
|
if errors.Is(err, request.HosterUnavailableError) {
|
||||||
|
isBroken = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return isBroken
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) repairWorker() {
|
||||||
|
// This watches a channel for torrents to repair
|
||||||
|
for req := range c.repairChan {
|
||||||
|
torrentId := req.TorrentID
|
||||||
|
if _, inProgress := c.repairsInProgress.Load(torrentId); inProgress {
|
||||||
|
c.logger.Debug().Str("torrentId", torrentId).Msg("Skipping duplicate repair request")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark as in progress
|
||||||
|
c.repairsInProgress.Store(torrentId, struct{}{})
|
||||||
|
c.logger.Debug().Str("torrentId", req.TorrentID).Msg("Received repair request")
|
||||||
|
|
||||||
|
// Get the torrent from the cache
|
||||||
|
cachedTorrent, ok := c.torrents.Load(torrentId)
|
||||||
|
if !ok || cachedTorrent == nil {
|
||||||
|
c.logger.Warn().Str("torrentId", torrentId).Msg("Torrent not found in cache")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch req.Type {
|
||||||
|
case RepairTypeReinsert:
|
||||||
|
c.logger.Debug().Str("torrentId", torrentId).Msg("Reinserting torrent")
|
||||||
|
var err error
|
||||||
|
cachedTorrent, err = c.reInsertTorrent(cachedTorrent.Torrent)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Error().Err(err).Str("torrentId", cachedTorrent.Id).Msg("Failed to reinsert torrent")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case RepairTypeDelete:
|
||||||
|
c.logger.Debug().Str("torrentId", torrentId).Msg("Deleting torrent")
|
||||||
|
if err := c.DeleteTorrent(torrentId); err != nil {
|
||||||
|
c.logger.Error().Err(err).Str("torrentId", torrentId).Msg("Failed to delete torrent")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.repairsInProgress.Delete(torrentId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) reInsertTorrent(torrent *types.Torrent) (*CachedTorrent, error) {
|
||||||
|
// Check if Magnet is not empty, if empty, reconstruct the magnet
|
||||||
|
if _, ok := c.repairsInProgress.Load(torrent.Id); ok {
|
||||||
|
return nil, fmt.Errorf("repair already in progress for torrent %s", torrent.Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if torrent.Magnet == nil {
|
||||||
|
torrent.Magnet = utils.ConstructMagnet(torrent.InfoHash, torrent.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
oldID := torrent.Id
|
||||||
|
defer func() {
|
||||||
|
err := c.DeleteTorrent(oldID)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Error().Err(err).Str("torrentId", oldID).Msg("Failed to delete old torrent")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Submit the magnet to the debrid service
|
||||||
|
torrent.Id = ""
|
||||||
|
var err error
|
||||||
|
torrent, err = c.client.SubmitMagnet(torrent)
|
||||||
|
if err != nil {
|
||||||
|
// Remove the old torrent from the cache and debrid service
|
||||||
|
return nil, fmt.Errorf("failed to submit magnet: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the torrent was submitted
|
||||||
|
if torrent == nil || torrent.Id == "" {
|
||||||
|
return nil, fmt.Errorf("failed to submit magnet: empty torrent")
|
||||||
|
}
|
||||||
|
torrent.DownloadUncached = false // Set to false, avoid re-downloading
|
||||||
|
torrent, err = c.client.CheckStatus(torrent, true)
|
||||||
|
if err != nil && torrent != nil {
|
||||||
|
// Torrent is likely in progress
|
||||||
|
_ = c.DeleteTorrent(torrent.Id)
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("failed to check status: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if torrent == nil {
|
||||||
|
return nil, fmt.Errorf("failed to check status: empty torrent")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the torrent in the cache
|
||||||
|
addedOn, err := time.Parse(time.RFC3339, torrent.Added)
|
||||||
|
for _, f := range torrent.Files {
|
||||||
|
if f.Link == "" {
|
||||||
|
// Delete the new torrent
|
||||||
|
_ = c.DeleteTorrent(torrent.Id)
|
||||||
|
return nil, fmt.Errorf("failed to reinsert torrent: empty link")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
addedOn = time.Now()
|
||||||
|
}
|
||||||
|
ct := &CachedTorrent{
|
||||||
|
Torrent: torrent,
|
||||||
|
IsComplete: len(torrent.Files) > 0,
|
||||||
|
AddedOn: addedOn,
|
||||||
|
}
|
||||||
|
c.setTorrent(ct)
|
||||||
|
c.refreshListings()
|
||||||
|
return ct, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) resetInvalidLinks() {
|
||||||
|
c.invalidDownloadLinks = xsync.NewMapOf[string, string]()
|
||||||
|
c.client.ResetActiveDownloadKeys() // Reset the active download keys
|
||||||
|
}
|
||||||
75
pkg/debrid/debrid/worker.go
Normal file
75
pkg/debrid/debrid/worker.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
func (c *Cache) Refresh() error {
|
||||||
|
// For now, we just want to refresh the listing and download links
|
||||||
|
//go c.refreshDownloadLinksWorker()
|
||||||
|
go c.refreshTorrentsWorker()
|
||||||
|
go c.resetInvalidLinksWorker()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) refreshDownloadLinksWorker() {
|
||||||
|
refreshTicker := time.NewTicker(c.downloadLinksRefreshInterval)
|
||||||
|
defer refreshTicker.Stop()
|
||||||
|
|
||||||
|
for range refreshTicker.C {
|
||||||
|
c.refreshDownloadLinks()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) refreshTorrentsWorker() {
|
||||||
|
refreshTicker := time.NewTicker(c.torrentRefreshInterval)
|
||||||
|
defer refreshTicker.Stop()
|
||||||
|
|
||||||
|
for range refreshTicker.C {
|
||||||
|
c.refreshTorrents()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) resetInvalidLinksWorker() {
|
||||||
|
// Calculate time until next 00:00 CET
|
||||||
|
now := time.Now()
|
||||||
|
loc, err := time.LoadLocation("CET")
|
||||||
|
if err != nil {
|
||||||
|
// Fallback if CET timezone can't be loaded
|
||||||
|
c.logger.Error().Err(err).Msg("Failed to load CET timezone, using local time")
|
||||||
|
loc = time.Local
|
||||||
|
}
|
||||||
|
|
||||||
|
nowInCET := now.In(loc)
|
||||||
|
next := time.Date(
|
||||||
|
nowInCET.Year(),
|
||||||
|
nowInCET.Month(),
|
||||||
|
nowInCET.Day(),
|
||||||
|
0, 0, 0, 0,
|
||||||
|
loc,
|
||||||
|
)
|
||||||
|
|
||||||
|
// If it's already past 12:00 CET today, schedule for tomorrow
|
||||||
|
if nowInCET.After(next) {
|
||||||
|
next = next.Add(24 * time.Hour)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duration until next 12:00 CET
|
||||||
|
initialWait := next.Sub(nowInCET)
|
||||||
|
|
||||||
|
// Set up initial timer
|
||||||
|
timer := time.NewTimer(initialWait)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
c.logger.Debug().Msgf("Scheduled Links Reset at %s (in %s)", next.Format("2006-01-02 15:04:05 MST"), initialWait)
|
||||||
|
|
||||||
|
// Wait for the first execution
|
||||||
|
<-timer.C
|
||||||
|
c.resetInvalidLinks()
|
||||||
|
|
||||||
|
// Now set up the daily ticker
|
||||||
|
refreshTicker := time.NewTicker(24 * time.Hour)
|
||||||
|
defer refreshTicker.Stop()
|
||||||
|
|
||||||
|
for range refreshTicker.C {
|
||||||
|
c.resetInvalidLinks()
|
||||||
|
}
|
||||||
|
}
|
||||||
125
pkg/debrid/debrid/xml.go
Normal file
125
pkg/debrid/debrid/xml.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/beevik/etree"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
path "path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *Cache) refreshXml() error {
|
||||||
|
parents := []string{"__all__", "torrents"}
|
||||||
|
torrents := c.GetListing()
|
||||||
|
for _, parent := range parents {
|
||||||
|
if err := c.refreshParentXml(torrents, parent); err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh XML for %s: %v", parent, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.logger.Trace().Msgf("Refreshed XML cache for %s", c.client.GetName())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) refreshParentXml(torrents []os.FileInfo, parent string) error {
|
||||||
|
// Define the WebDAV namespace
|
||||||
|
davNS := "DAV:"
|
||||||
|
|
||||||
|
// Create the root multistatus element
|
||||||
|
doc := etree.NewDocument()
|
||||||
|
doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`)
|
||||||
|
|
||||||
|
multistatus := doc.CreateElement("D:multistatus")
|
||||||
|
multistatus.CreateAttr("xmlns:D", davNS)
|
||||||
|
|
||||||
|
// Get the current timestamp in RFC1123 format (WebDAV format)
|
||||||
|
currentTime := time.Now().UTC().Format(http.TimeFormat)
|
||||||
|
|
||||||
|
// Add the parent directory
|
||||||
|
baseUrl := path.Clean(fmt.Sprintf("/webdav/%s/%s", c.client.GetName(), parent))
|
||||||
|
parentPath := fmt.Sprintf("%s/", baseUrl)
|
||||||
|
addDirectoryResponse(multistatus, parentPath, parent, currentTime)
|
||||||
|
|
||||||
|
// Add torrents to the XML
|
||||||
|
for _, torrent := range torrents {
|
||||||
|
name := torrent.Name()
|
||||||
|
// Note the path structure change - parent first, then torrent name
|
||||||
|
torrentPath := fmt.Sprintf("/webdav/%s/%s/%s/",
|
||||||
|
c.client.GetName(),
|
||||||
|
parent,
|
||||||
|
name,
|
||||||
|
)
|
||||||
|
|
||||||
|
addDirectoryResponse(multistatus, torrentPath, name, currentTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to XML string
|
||||||
|
xmlData, err := doc.WriteToBytes()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to generate XML: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store in cache
|
||||||
|
key0 := fmt.Sprintf("propfind:%s:0", baseUrl)
|
||||||
|
key1 := fmt.Sprintf("propfind:%s:1", baseUrl)
|
||||||
|
|
||||||
|
res := PropfindResponse{
|
||||||
|
Data: xmlData,
|
||||||
|
GzippedData: request.Gzip(xmlData),
|
||||||
|
Ts: time.Now(),
|
||||||
|
}
|
||||||
|
c.PropfindResp.Store(key0, res)
|
||||||
|
c.PropfindResp.Store(key1, res)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addDirectoryResponse(multistatus *etree.Element, href, displayName, modTime string) *etree.Element {
|
||||||
|
responseElem := multistatus.CreateElement("D:response")
|
||||||
|
|
||||||
|
// Add href - ensure it's properly formatted
|
||||||
|
hrefElem := responseElem.CreateElement("D:href")
|
||||||
|
hrefElem.SetText(href)
|
||||||
|
|
||||||
|
// Add propstat
|
||||||
|
propstatElem := responseElem.CreateElement("D:propstat")
|
||||||
|
|
||||||
|
// Add prop
|
||||||
|
propElem := propstatElem.CreateElement("D:prop")
|
||||||
|
|
||||||
|
// Add resource type (collection = directory)
|
||||||
|
resourceTypeElem := propElem.CreateElement("D:resourcetype")
|
||||||
|
resourceTypeElem.CreateElement("D:collection")
|
||||||
|
|
||||||
|
// Add display name
|
||||||
|
displayNameElem := propElem.CreateElement("D:displayname")
|
||||||
|
displayNameElem.SetText(displayName)
|
||||||
|
|
||||||
|
// Add last modified time
|
||||||
|
lastModElem := propElem.CreateElement("D:getlastmodified")
|
||||||
|
lastModElem.SetText(modTime)
|
||||||
|
|
||||||
|
// Add content type for directories
|
||||||
|
contentTypeElem := propElem.CreateElement("D:getcontenttype")
|
||||||
|
contentTypeElem.SetText("httpd/unix-directory")
|
||||||
|
|
||||||
|
// Add length (size) - directories typically have zero size
|
||||||
|
contentLengthElem := propElem.CreateElement("D:getcontentlength")
|
||||||
|
contentLengthElem.SetText("0")
|
||||||
|
|
||||||
|
// Add supported lock
|
||||||
|
lockElem := propElem.CreateElement("D:supportedlock")
|
||||||
|
lockEntryElem := lockElem.CreateElement("D:lockentry")
|
||||||
|
|
||||||
|
lockScopeElem := lockEntryElem.CreateElement("D:lockscope")
|
||||||
|
lockScopeElem.CreateElement("D:exclusive")
|
||||||
|
|
||||||
|
lockTypeElem := lockEntryElem.CreateElement("D:locktype")
|
||||||
|
lockTypeElem.CreateElement("D:write")
|
||||||
|
|
||||||
|
// Add status
|
||||||
|
statusElem := propstatElem.CreateElement("D:status")
|
||||||
|
statusElem.SetText("HTTP/1.1 200 OK")
|
||||||
|
|
||||||
|
return responseElem
|
||||||
|
}
|
||||||
@@ -2,18 +2,20 @@ package debrid_link
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
"github.com/puzpuzpuz/xsync/v3"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -21,9 +23,10 @@ type DebridLink struct {
|
|||||||
Name string
|
Name string
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
APIKey string
|
APIKey string
|
||||||
|
DownloadKeys *xsync.MapOf[string, types.Account]
|
||||||
DownloadUncached bool
|
DownloadUncached bool
|
||||||
client *request.RLHTTPClient
|
client *request.Client
|
||||||
cache *cache.Cache
|
|
||||||
MountPath string
|
MountPath string
|
||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
CheckCached bool
|
CheckCached bool
|
||||||
@@ -37,15 +40,9 @@ func (dl *DebridLink) GetLogger() zerolog.Logger {
|
|||||||
return dl.logger
|
return dl.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) IsAvailable(infohashes []string) map[string]bool {
|
func (dl *DebridLink) IsAvailable(hashes []string) map[string]bool {
|
||||||
// Check if the infohashes are available in the local cache
|
// Check if the infohashes are available in the local cache
|
||||||
hashes, result := torrent.GetLocalCache(infohashes, dl.cache)
|
result := make(map[string]bool)
|
||||||
|
|
||||||
if len(hashes) == 0 {
|
|
||||||
// Either all the infohashes are locally cached or none are
|
|
||||||
dl.cache.AddMultiple(result)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Divide hashes into groups of 100
|
// Divide hashes into groups of 100
|
||||||
for i := 0; i < len(hashes); i += 100 {
|
for i := 0; i < len(hashes); i += 100 {
|
||||||
@@ -92,33 +89,31 @@ func (dl *DebridLink) IsAvailable(infohashes []string) map[string]bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dl.cache.AddMultiple(result) // Add the results to the cache
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GetTorrent(id string) (*torrent.Torrent, error) {
|
func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
|
||||||
t := &torrent.Torrent{}
|
url := fmt.Sprintf("%s/seedbox/list?ids=%s", dl.Host, t.Id)
|
||||||
url := fmt.Sprintf("%s/seedbox/list?ids=%s", dl.Host, id)
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := dl.client.MakeRequest(req)
|
resp, err := dl.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return t, err
|
return err
|
||||||
}
|
}
|
||||||
var res TorrentInfo
|
var res TorrentInfo
|
||||||
err = json.Unmarshal(resp, &res)
|
err = json.Unmarshal(resp, &res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return t, err
|
return err
|
||||||
}
|
}
|
||||||
if res.Success == false {
|
if !res.Success {
|
||||||
return t, fmt.Errorf("error getting torrent")
|
return fmt.Errorf("error getting torrent")
|
||||||
}
|
}
|
||||||
if res.Value == nil {
|
if res.Value == nil {
|
||||||
return t, fmt.Errorf("torrent not found")
|
return fmt.Errorf("torrent not found")
|
||||||
}
|
}
|
||||||
dt := *res.Value
|
dt := *res.Value
|
||||||
|
|
||||||
if len(dt) == 0 {
|
if len(dt) == 0 {
|
||||||
return t, fmt.Errorf("torrent not found")
|
return fmt.Errorf("torrent not found")
|
||||||
}
|
}
|
||||||
data := dt[0]
|
data := dt[0]
|
||||||
status := "downloading"
|
status := "downloading"
|
||||||
@@ -136,24 +131,25 @@ func (dl *DebridLink) GetTorrent(id string) (*torrent.Torrent, error) {
|
|||||||
t.Seeders = data.PeersConnected
|
t.Seeders = data.PeersConnected
|
||||||
t.Filename = name
|
t.Filename = name
|
||||||
t.OriginalFilename = name
|
t.OriginalFilename = name
|
||||||
files := make([]torrent.File, len(data.Files))
|
cfg := config.Get()
|
||||||
cfg := config.GetConfig()
|
for _, f := range data.Files {
|
||||||
for i, f := range data.Files {
|
|
||||||
if !cfg.IsSizeAllowed(f.Size) {
|
if !cfg.IsSizeAllowed(f.Size) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
files[i] = torrent.File{
|
file := types.File{
|
||||||
Id: f.ID,
|
Id: f.ID,
|
||||||
Name: f.Name,
|
Name: f.Name,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Path: f.Name,
|
Path: f.Name,
|
||||||
|
DownloadLink: f.DownloadURL,
|
||||||
|
Link: f.DownloadURL,
|
||||||
}
|
}
|
||||||
|
t.Files[f.Name] = file
|
||||||
}
|
}
|
||||||
t.Files = files
|
return nil
|
||||||
return t, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) {
|
func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
|
||||||
url := fmt.Sprintf("%s/seedbox/add", dl.Host)
|
url := fmt.Sprintf("%s/seedbox/add", dl.Host)
|
||||||
payload := map[string]string{"url": t.Magnet.Link}
|
payload := map[string]string{"url": t.Magnet.Link}
|
||||||
jsonPayload, _ := json.Marshal(payload)
|
jsonPayload, _ := json.Marshal(payload)
|
||||||
@@ -167,7 +163,7 @@ func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if res.Success == false || res.Value == nil {
|
if !res.Success || res.Value == nil {
|
||||||
return nil, fmt.Errorf("error adding torrent")
|
return nil, fmt.Errorf("error adding torrent")
|
||||||
}
|
}
|
||||||
data := *res.Value
|
data := *res.Value
|
||||||
@@ -185,45 +181,43 @@ func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error)
|
|||||||
t.OriginalFilename = name
|
t.OriginalFilename = name
|
||||||
t.MountPath = dl.MountPath
|
t.MountPath = dl.MountPath
|
||||||
t.Debrid = dl.Name
|
t.Debrid = dl.Name
|
||||||
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
|
for _, f := range data.Files {
|
||||||
files := make([]torrent.File, len(data.Files))
|
file := types.File{
|
||||||
for i, f := range data.Files {
|
|
||||||
files[i] = torrent.File{
|
|
||||||
Id: f.ID,
|
Id: f.ID,
|
||||||
Name: f.Name,
|
Name: f.Name,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Path: f.Name,
|
Path: f.Name,
|
||||||
Link: f.DownloadURL,
|
Link: f.DownloadURL,
|
||||||
|
DownloadLink: f.DownloadURL,
|
||||||
|
Generated: time.Now(),
|
||||||
}
|
}
|
||||||
|
t.Files[f.Name] = file
|
||||||
}
|
}
|
||||||
t.Files = files
|
|
||||||
|
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
|
func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
|
||||||
for {
|
for {
|
||||||
t, err := dl.GetTorrent(torrent.Id)
|
err := dl.UpdateTorrent(torrent)
|
||||||
torrent = t
|
|
||||||
if err != nil || torrent == nil {
|
if err != nil || torrent == nil {
|
||||||
return torrent, err
|
return torrent, err
|
||||||
}
|
}
|
||||||
status := torrent.Status
|
status := torrent.Status
|
||||||
if status == "downloaded" {
|
if status == "downloaded" {
|
||||||
dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||||
err = dl.GetDownloadLinks(torrent)
|
err = dl.GenerateDownloadLinks(torrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return torrent, err
|
return torrent, err
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
} else if status == "downloading" {
|
} else if slices.Contains(dl.GetDownloadingStatus(), status) {
|
||||||
if !dl.DownloadUncached {
|
if !torrent.DownloadUncached {
|
||||||
go dl.DeleteTorrent(torrent)
|
|
||||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
}
|
}
|
||||||
// Break out of the loop if the torrent is downloading.
|
// Break out of the loop if the torrent is downloading.
|
||||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||||
break
|
return torrent, nil
|
||||||
} else {
|
} else {
|
||||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||||
}
|
}
|
||||||
@@ -232,62 +226,162 @@ func (dl *DebridLink) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*to
|
|||||||
return torrent, nil
|
return torrent, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) DeleteTorrent(torrent *torrent.Torrent) {
|
func (dl *DebridLink) DeleteTorrent(torrentId string) error {
|
||||||
url := fmt.Sprintf("%s/seedbox/%s/remove", dl.Host, torrent.Id)
|
url := fmt.Sprintf("%s/seedbox/%s/remove", dl.Host, torrentId)
|
||||||
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||||
_, err := dl.client.MakeRequest(req)
|
if _, err := dl.client.MakeRequest(req); err != nil {
|
||||||
if err == nil {
|
return err
|
||||||
dl.logger.Info().Msgf("Torrent: %s deleted", torrent.Name)
|
|
||||||
} else {
|
|
||||||
dl.logger.Info().Msgf("Error deleting torrent: %s", err)
|
|
||||||
}
|
}
|
||||||
}
|
dl.logger.Info().Msgf("Torrent: %s deleted from DebridLink", torrentId)
|
||||||
|
|
||||||
func (dl *DebridLink) GetDownloadLinks(t *torrent.Torrent) error {
|
|
||||||
downloadLinks := make(map[string]torrent.DownloadLinks)
|
|
||||||
for _, f := range t.Files {
|
|
||||||
dl := torrent.DownloadLinks{
|
|
||||||
Link: f.Link,
|
|
||||||
Filename: f.Name,
|
|
||||||
}
|
|
||||||
downloadLinks[f.Id] = dl
|
|
||||||
}
|
|
||||||
t.DownloadLinks = downloadLinks
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
|
func (dl *DebridLink) GenerateDownloadLinks(t *types.Torrent) error {
|
||||||
dlLink, ok := t.DownloadLinks[file.Id]
|
// Download links are already generated
|
||||||
if !ok {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &dlLink
|
|
||||||
|
func (dl *DebridLink) GetDownloads() (map[string]types.DownloadLinks, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (string, string, error) {
|
||||||
|
return file.DownloadLink, "0", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) GetDownloadingStatus() []string {
|
||||||
|
return []string{"downloading"}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GetCheckCached() bool {
|
func (dl *DebridLink) GetCheckCached() bool {
|
||||||
return dl.CheckCached
|
return dl.CheckCached
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dc config.Debrid, cache *cache.Cache) *DebridLink {
|
func (dl *DebridLink) GetDownloadUncached() bool {
|
||||||
|
return dl.DownloadUncached
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(dc config.Debrid) *DebridLink {
|
||||||
rl := request.ParseRateLimit(dc.RateLimit)
|
rl := request.ParseRateLimit(dc.RateLimit)
|
||||||
|
|
||||||
headers := map[string]string{
|
headers := map[string]string{
|
||||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
}
|
}
|
||||||
client := request.NewRLHTTPClient(rl, headers)
|
_log := logger.New(dc.Name)
|
||||||
|
client := request.New(
|
||||||
|
request.WithHeaders(headers),
|
||||||
|
request.WithLogger(_log),
|
||||||
|
request.WithRateLimiter(rl),
|
||||||
|
request.WithProxy(dc.Proxy),
|
||||||
|
)
|
||||||
|
|
||||||
|
accounts := xsync.NewMapOf[string, types.Account]()
|
||||||
|
for idx, key := range dc.DownloadAPIKeys {
|
||||||
|
id := strconv.Itoa(idx)
|
||||||
|
accounts.Store(id, types.Account{
|
||||||
|
Name: key,
|
||||||
|
ID: id,
|
||||||
|
Token: key,
|
||||||
|
})
|
||||||
|
}
|
||||||
return &DebridLink{
|
return &DebridLink{
|
||||||
Name: "debridlink",
|
Name: "debridlink",
|
||||||
Host: dc.Host,
|
Host: dc.Host,
|
||||||
APIKey: dc.APIKey,
|
APIKey: dc.APIKey,
|
||||||
|
DownloadKeys: accounts,
|
||||||
DownloadUncached: dc.DownloadUncached,
|
DownloadUncached: dc.DownloadUncached,
|
||||||
client: client,
|
client: client,
|
||||||
cache: cache,
|
|
||||||
MountPath: dc.Folder,
|
MountPath: dc.Folder,
|
||||||
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
|
logger: logger.New(dc.Name),
|
||||||
CheckCached: dc.CheckCached,
|
CheckCached: dc.CheckCached,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *DebridLink) GetTorrents() ([]*torrent.Torrent, error) {
|
func (dl *DebridLink) GetTorrents() ([]*types.Torrent, error) {
|
||||||
return nil, fmt.Errorf("not implemented")
|
page := 0
|
||||||
|
perPage := 100
|
||||||
|
torrents := make([]*types.Torrent, 0)
|
||||||
|
for {
|
||||||
|
t, err := dl.getTorrents(page, perPage)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if len(t) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
torrents = append(torrents, t...)
|
||||||
|
page++
|
||||||
|
}
|
||||||
|
return torrents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
|
||||||
|
url := fmt.Sprintf("%s/seedbox/list?page=%d&perPage=%d", dl.Host, page, perPage)
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := dl.client.MakeRequest(req)
|
||||||
|
torrents := make([]*types.Torrent, 0)
|
||||||
|
if err != nil {
|
||||||
|
return torrents, err
|
||||||
|
}
|
||||||
|
var res TorrentInfo
|
||||||
|
err = json.Unmarshal(resp, &res)
|
||||||
|
if err != nil {
|
||||||
|
dl.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
|
||||||
|
return torrents, err
|
||||||
|
}
|
||||||
|
|
||||||
|
data := *res.Value
|
||||||
|
|
||||||
|
if len(data) == 0 {
|
||||||
|
return torrents, nil
|
||||||
|
}
|
||||||
|
for _, t := range data {
|
||||||
|
if t.Status != 100 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
torrent := &types.Torrent{
|
||||||
|
Id: t.ID,
|
||||||
|
Name: t.Name,
|
||||||
|
Bytes: t.TotalSize,
|
||||||
|
Status: "downloaded",
|
||||||
|
Filename: t.Name,
|
||||||
|
OriginalFilename: t.Name,
|
||||||
|
InfoHash: t.HashString,
|
||||||
|
Files: make(map[string]types.File),
|
||||||
|
Debrid: dl.Name,
|
||||||
|
MountPath: dl.MountPath,
|
||||||
|
}
|
||||||
|
cfg := config.Get()
|
||||||
|
for _, f := range t.Files {
|
||||||
|
if !cfg.IsSizeAllowed(f.Size) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
file := types.File{
|
||||||
|
Id: f.ID,
|
||||||
|
Name: f.Name,
|
||||||
|
Size: f.Size,
|
||||||
|
Path: f.Name,
|
||||||
|
DownloadLink: f.DownloadURL,
|
||||||
|
Link: f.DownloadURL,
|
||||||
|
}
|
||||||
|
torrent.Files[f.Name] = file
|
||||||
|
}
|
||||||
|
torrents = append(torrents, torrent)
|
||||||
|
}
|
||||||
|
return torrents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) CheckLink(link string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) GetMountPath() string {
|
||||||
|
return dl.MountPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) DisableAccount(accountId string) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dl *DebridLink) ResetActiveDownloadKeys() {
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
package debrid
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
package engine
|
|
||||||
|
|
||||||
type Engine struct {
|
|
||||||
Debrids []Service
|
|
||||||
LastUsed int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Engine) Get() Service {
|
|
||||||
if d.LastUsed == 0 {
|
|
||||||
return d.Debrids[0]
|
|
||||||
}
|
|
||||||
return d.Debrids[d.LastUsed]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Engine) GetByName(name string) Service {
|
|
||||||
for _, deb := range d.Debrids {
|
|
||||||
if deb.GetName() == name {
|
|
||||||
return deb
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Engine) GetDebrids() []Service {
|
|
||||||
return d.Debrids
|
|
||||||
}
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Service interface {
|
|
||||||
SubmitMagnet(tr *torrent.Torrent) (*torrent.Torrent, error)
|
|
||||||
CheckStatus(tr *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error)
|
|
||||||
GetDownloadLinks(tr *torrent.Torrent) error
|
|
||||||
GetDownloadLink(tr *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks
|
|
||||||
DeleteTorrent(tr *torrent.Torrent)
|
|
||||||
IsAvailable(infohashes []string) map[string]bool
|
|
||||||
GetCheckCached() bool
|
|
||||||
GetTorrent(id string) (*torrent.Torrent, error)
|
|
||||||
GetTorrents() ([]*torrent.Torrent, error)
|
|
||||||
GetName() string
|
|
||||||
GetLogger() zerolog.Logger
|
|
||||||
}
|
|
||||||
@@ -1,36 +1,101 @@
|
|||||||
package realdebrid
|
package realdebrid
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
"github.com/puzpuzpuz/xsync/v3"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RealDebrid struct {
|
type RealDebrid struct {
|
||||||
Name string
|
Name string
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
|
|
||||||
APIKey string
|
APIKey string
|
||||||
|
DownloadKeys *xsync.MapOf[string, types.Account] // index | Account
|
||||||
|
|
||||||
DownloadUncached bool
|
DownloadUncached bool
|
||||||
client *request.RLHTTPClient
|
client *request.Client
|
||||||
cache *cache.Cache
|
downloadClient *request.Client
|
||||||
|
|
||||||
MountPath string
|
MountPath string
|
||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
CheckCached bool
|
CheckCached bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func New(dc config.Debrid) *RealDebrid {
|
||||||
|
rl := request.ParseRateLimit(dc.RateLimit)
|
||||||
|
|
||||||
|
headers := map[string]string{
|
||||||
|
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||||
|
}
|
||||||
|
_log := logger.New(dc.Name)
|
||||||
|
|
||||||
|
accounts := xsync.NewMapOf[string, types.Account]()
|
||||||
|
firstDownloadKey := dc.DownloadAPIKeys[0]
|
||||||
|
for idx, key := range dc.DownloadAPIKeys {
|
||||||
|
id := strconv.Itoa(idx)
|
||||||
|
accounts.Store(id, types.Account{
|
||||||
|
Name: key,
|
||||||
|
ID: id,
|
||||||
|
Token: key,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadHeaders := map[string]string{
|
||||||
|
"Authorization": fmt.Sprintf("Bearer %s", firstDownloadKey),
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadClient := request.New(
|
||||||
|
request.WithHeaders(downloadHeaders),
|
||||||
|
request.WithRateLimiter(rl),
|
||||||
|
request.WithLogger(_log),
|
||||||
|
request.WithMaxRetries(5),
|
||||||
|
request.WithRetryableStatus(429, 447),
|
||||||
|
request.WithProxy(dc.Proxy),
|
||||||
|
request.WithStatusCooldown(447, 10*time.Second), // 447 is a fair use error
|
||||||
|
)
|
||||||
|
|
||||||
|
client := request.New(
|
||||||
|
request.WithHeaders(headers),
|
||||||
|
request.WithRateLimiter(rl),
|
||||||
|
request.WithLogger(_log),
|
||||||
|
request.WithMaxRetries(5),
|
||||||
|
request.WithRetryableStatus(429),
|
||||||
|
request.WithProxy(dc.Proxy),
|
||||||
|
)
|
||||||
|
|
||||||
|
return &RealDebrid{
|
||||||
|
Name: "realdebrid",
|
||||||
|
Host: dc.Host,
|
||||||
|
APIKey: dc.APIKey,
|
||||||
|
DownloadKeys: accounts,
|
||||||
|
DownloadUncached: dc.DownloadUncached,
|
||||||
|
client: client,
|
||||||
|
downloadClient: downloadClient,
|
||||||
|
MountPath: dc.Folder,
|
||||||
|
logger: logger.New(dc.Name),
|
||||||
|
CheckCached: dc.CheckCached,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetName() string {
|
func (r *RealDebrid) GetName() string {
|
||||||
return r.Name
|
return r.Name
|
||||||
}
|
}
|
||||||
@@ -39,61 +104,68 @@ func (r *RealDebrid) GetLogger() zerolog.Logger {
|
|||||||
return r.logger
|
return r.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTorrentFiles returns a list of torrent files from the torrent info
|
func getSelectedFiles(t *types.Torrent, data TorrentInfo) map[string]types.File {
|
||||||
|
selectedFiles := make([]types.File, 0)
|
||||||
|
for _, f := range data.Files {
|
||||||
|
if f.Selected == 1 {
|
||||||
|
name := filepath.Base(f.Path)
|
||||||
|
file := types.File{
|
||||||
|
Name: name,
|
||||||
|
Path: name,
|
||||||
|
Size: f.Bytes,
|
||||||
|
Id: strconv.Itoa(f.ID),
|
||||||
|
}
|
||||||
|
selectedFiles = append(selectedFiles, file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
files := make(map[string]types.File)
|
||||||
|
for index, f := range selectedFiles {
|
||||||
|
if index >= len(data.Links) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
f.Link = data.Links[index]
|
||||||
|
files[f.Name] = f
|
||||||
|
}
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTorrentFiles returns a list of torrent files from the torrent info
|
||||||
// validate is used to determine if the files should be validated
|
// validate is used to determine if the files should be validated
|
||||||
// if validate is false, selected files will be returned
|
// if validate is false, selected files will be returned
|
||||||
func GetTorrentFiles(data TorrentInfo, validate bool) []torrent.File {
|
func getTorrentFiles(t *types.Torrent, data TorrentInfo) map[string]types.File {
|
||||||
files := make([]torrent.File, 0)
|
files := make(map[string]types.File)
|
||||||
cfg := config.GetConfig()
|
cfg := config.Get()
|
||||||
idx := 0
|
idx := 0
|
||||||
|
|
||||||
for _, f := range data.Files {
|
for _, f := range data.Files {
|
||||||
|
|
||||||
name := filepath.Base(f.Path)
|
name := filepath.Base(f.Path)
|
||||||
|
if utils.IsSampleFile(f.Path) {
|
||||||
if validate {
|
|
||||||
if utils.RegexMatch(utils.SAMPLEMATCH, name) {
|
|
||||||
// Skip sample files
|
// Skip sample files
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cfg.IsAllowedFile(name) {
|
if !cfg.IsAllowedFile(name) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !cfg.IsSizeAllowed(f.Bytes) {
|
if !cfg.IsSizeAllowed(f.Bytes) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
if f.Selected == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fileId := f.ID
|
file := types.File{
|
||||||
_link := ""
|
|
||||||
if len(data.Links) > idx {
|
|
||||||
_link = data.Links[idx]
|
|
||||||
}
|
|
||||||
file := torrent.File{
|
|
||||||
Name: name,
|
Name: name,
|
||||||
Path: name,
|
Path: name,
|
||||||
Size: f.Bytes,
|
Size: f.Bytes,
|
||||||
Id: strconv.Itoa(fileId),
|
Id: strconv.Itoa(f.ID),
|
||||||
Link: _link,
|
|
||||||
}
|
}
|
||||||
files = append(files, file)
|
files[name] = file
|
||||||
idx++
|
idx++
|
||||||
}
|
}
|
||||||
return files
|
return files
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool {
|
func (r *RealDebrid) IsAvailable(hashes []string) map[string]bool {
|
||||||
// Check if the infohashes are available in the local cache
|
// Check if the infohashes are available in the local cache
|
||||||
hashes, result := torrent.GetLocalCache(infohashes, r.cache)
|
result := make(map[string]bool)
|
||||||
|
|
||||||
if len(hashes) == 0 {
|
|
||||||
// Either all the infohashes are locally cached or none are
|
|
||||||
r.cache.AddMultiple(result)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Divide hashes into groups of 100
|
// Divide hashes into groups of 100
|
||||||
for i := 0; i < len(hashes); i += 200 {
|
for i := 0; i < len(hashes); i += 200 {
|
||||||
@@ -136,11 +208,39 @@ func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r.cache.AddMultiple(result) // Add the results to the cache
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) {
|
func (r *RealDebrid) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
|
||||||
|
if t.Magnet.IsTorrent() {
|
||||||
|
return r.addTorrent(t)
|
||||||
|
}
|
||||||
|
return r.addMagnet(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) {
|
||||||
|
url := fmt.Sprintf("%s/torrents/addTorrent", r.Host)
|
||||||
|
var data AddMagnetSchema
|
||||||
|
req, err := http.NewRequest(http.MethodPut, url, bytes.NewReader(t.Magnet.File))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Add("Content-Type", "application/x-bittorrent")
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = json.Unmarshal(resp, &data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
t.Id = data.Id
|
||||||
|
t.Debrid = r.Name
|
||||||
|
t.MountPath = r.MountPath
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) addMagnet(t *types.Torrent) (*types.Torrent, error) {
|
||||||
url := fmt.Sprintf("%s/torrents/addMagnet", r.Host)
|
url := fmt.Sprintf("%s/torrents/addMagnet", r.Host)
|
||||||
payload := gourl.Values{
|
payload := gourl.Values{
|
||||||
"magnet": {t.Magnet.Link},
|
"magnet": {t.Magnet.Link},
|
||||||
@@ -151,31 +251,30 @@ func (r *RealDebrid) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(resp, &data)
|
if err = json.Unmarshal(resp, &data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
t.Id = data.Id
|
t.Id = data.Id
|
||||||
t.Debrid = r.Name
|
t.Debrid = r.Name
|
||||||
t.MountPath = r.MountPath
|
t.MountPath = r.MountPath
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetTorrent(id string) (*torrent.Torrent, error) {
|
func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
|
||||||
t := &torrent.Torrent{}
|
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
|
||||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, id)
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := r.client.MakeRequest(req)
|
resp, err := r.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return t, err
|
return err
|
||||||
}
|
}
|
||||||
var data TorrentInfo
|
var data TorrentInfo
|
||||||
err = json.Unmarshal(resp, &data)
|
err = json.Unmarshal(resp, &data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return t, err
|
return err
|
||||||
}
|
}
|
||||||
name := utils.RemoveInvalidChars(data.OriginalFilename)
|
t.Name = data.Filename
|
||||||
t.Id = id
|
|
||||||
t.Name = name
|
|
||||||
t.Bytes = data.Bytes
|
t.Bytes = data.Bytes
|
||||||
t.Folder = name
|
t.Folder = data.OriginalFilename
|
||||||
t.Progress = data.Progress
|
t.Progress = data.Progress
|
||||||
t.Status = data.Status
|
t.Status = data.Status
|
||||||
t.Speed = data.Speed
|
t.Speed = data.Speed
|
||||||
@@ -185,13 +284,12 @@ func (r *RealDebrid) GetTorrent(id string) (*torrent.Torrent, error) {
|
|||||||
t.Links = data.Links
|
t.Links = data.Links
|
||||||
t.MountPath = r.MountPath
|
t.MountPath = r.MountPath
|
||||||
t.Debrid = r.Name
|
t.Debrid = r.Name
|
||||||
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
|
t.Added = data.Added
|
||||||
files := GetTorrentFiles(data, false) // Get selected files
|
t.Files = getSelectedFiles(t, data) // Get selected files
|
||||||
t.Files = files
|
return nil
|
||||||
return t, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
|
func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torrent, error) {
|
||||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
|
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
for {
|
for {
|
||||||
@@ -201,11 +299,12 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T
|
|||||||
return t, err
|
return t, err
|
||||||
}
|
}
|
||||||
var data TorrentInfo
|
var data TorrentInfo
|
||||||
err = json.Unmarshal(resp, &data)
|
if err = json.Unmarshal(resp, &data); err != nil {
|
||||||
|
return t, err
|
||||||
|
}
|
||||||
status := data.Status
|
status := data.Status
|
||||||
name := utils.RemoveInvalidChars(data.OriginalFilename)
|
t.Name = data.Filename // Important because some magnet changes the name
|
||||||
t.Name = name // Important because some magnet changes the name
|
t.Folder = data.OriginalFilename
|
||||||
t.Folder = name
|
|
||||||
t.Filename = data.Filename
|
t.Filename = data.Filename
|
||||||
t.OriginalFilename = data.OriginalFilename
|
t.OriginalFilename = data.OriginalFilename
|
||||||
t.Bytes = data.Bytes
|
t.Bytes = data.Bytes
|
||||||
@@ -216,15 +315,13 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T
|
|||||||
t.Status = status
|
t.Status = status
|
||||||
t.Debrid = r.Name
|
t.Debrid = r.Name
|
||||||
t.MountPath = r.MountPath
|
t.MountPath = r.MountPath
|
||||||
downloadingStatus := []string{"downloading", "magnet_conversion", "queued", "compressing", "uploading"}
|
|
||||||
if status == "waiting_files_selection" {
|
if status == "waiting_files_selection" {
|
||||||
files := GetTorrentFiles(data, true) // Validate files to be selected
|
t.Files = getTorrentFiles(t, data)
|
||||||
t.Files = files
|
if len(t.Files) == 0 {
|
||||||
if len(files) == 0 {
|
|
||||||
return t, fmt.Errorf("no video files found")
|
return t, fmt.Errorf("no video files found")
|
||||||
}
|
}
|
||||||
filesId := make([]string, 0)
|
filesId := make([]string, 0)
|
||||||
for _, f := range files {
|
for _, f := range t.Files {
|
||||||
filesId = append(filesId, f.Id)
|
filesId = append(filesId, f.Id)
|
||||||
}
|
}
|
||||||
p := gourl.Values{
|
p := gourl.Values{
|
||||||
@@ -237,24 +334,20 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T
|
|||||||
return t, err
|
return t, err
|
||||||
}
|
}
|
||||||
} else if status == "downloaded" {
|
} else if status == "downloaded" {
|
||||||
files := GetTorrentFiles(data, false) // Get selected files
|
t.Files = getSelectedFiles(t, data) // Get selected files
|
||||||
t.Files = files
|
|
||||||
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
|
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
|
||||||
if !isSymlink {
|
if !isSymlink {
|
||||||
err = r.GetDownloadLinks(t)
|
err = r.GenerateDownloadLinks(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return t, err
|
return t, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
} else if slices.Contains(downloadingStatus, status) {
|
} else if slices.Contains(r.GetDownloadingStatus(), status) {
|
||||||
if !r.DownloadUncached {
|
if !t.DownloadUncached {
|
||||||
go r.DeleteTorrent(t)
|
|
||||||
return t, fmt.Errorf("torrent: %s not cached", t.Name)
|
return t, fmt.Errorf("torrent: %s not cached", t.Name)
|
||||||
}
|
}
|
||||||
// Break out of the loop if the torrent is downloading.
|
return t, nil
|
||||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
|
||||||
break
|
|
||||||
} else {
|
} else {
|
||||||
return t, fmt.Errorf("torrent: %s has error: %s", t.Name, status)
|
return t, fmt.Errorf("torrent: %s has error: %s", t.Name, status)
|
||||||
}
|
}
|
||||||
@@ -263,91 +356,213 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T
|
|||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) DeleteTorrent(torrent *torrent.Torrent) {
|
func (r *RealDebrid) DeleteTorrent(torrentId string) error {
|
||||||
url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrent.Id)
|
url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrentId)
|
||||||
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||||
_, err := r.client.MakeRequest(req)
|
if _, err := r.client.MakeRequest(req); err != nil {
|
||||||
if err == nil {
|
|
||||||
r.logger.Info().Msgf("Torrent: %s deleted", torrent.Name)
|
|
||||||
} else {
|
|
||||||
r.logger.Info().Msgf("Error deleting torrent: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RealDebrid) GetDownloadLinks(t *torrent.Torrent) error {
|
|
||||||
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
|
||||||
downloadLinks := make(map[string]torrent.DownloadLinks)
|
|
||||||
for _, f := range t.Files {
|
|
||||||
dlLink := t.DownloadLinks[f.Id]
|
|
||||||
if f.Link == "" || dlLink.DownloadLink != "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
payload := gourl.Values{
|
|
||||||
"link": {f.Link},
|
|
||||||
}
|
|
||||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
|
||||||
resp, err := r.client.MakeRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var data UnrestrictResponse
|
r.logger.Info().Msgf("Torrent: %s deleted from RD", torrentId)
|
||||||
if err = json.Unmarshal(resp, &data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
download := torrent.DownloadLinks{
|
|
||||||
Link: data.Link,
|
|
||||||
Filename: data.Filename,
|
|
||||||
DownloadLink: data.Download,
|
|
||||||
}
|
|
||||||
downloadLinks[f.Id] = download
|
|
||||||
}
|
|
||||||
t.DownloadLinks = downloadLinks
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
|
func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error {
|
||||||
|
filesCh := make(chan types.File, len(t.Files))
|
||||||
|
errCh := make(chan error, len(t.Files))
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(t.Files))
|
||||||
|
for _, f := range t.Files {
|
||||||
|
go func(file types.File) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
link, accountId, err := r.GetDownloadLink(t, &file)
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
file.DownloadLink = link
|
||||||
|
file.AccountId = accountId
|
||||||
|
filesCh <- file
|
||||||
|
}(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(filesCh)
|
||||||
|
close(errCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Collect results
|
||||||
|
files := make(map[string]types.File, len(t.Files))
|
||||||
|
for file := range filesCh {
|
||||||
|
files[file.Name] = file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for errors
|
||||||
|
for err := range errCh {
|
||||||
|
if err != nil {
|
||||||
|
return err // Return the first error encountered
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Files = files
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) CheckLink(link string) error {
|
||||||
|
url := fmt.Sprintf("%s/unrestrict/check", r.Host)
|
||||||
|
payload := gourl.Values{
|
||||||
|
"link": {link},
|
||||||
|
}
|
||||||
|
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||||
|
resp, err := r.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
|
return request.HosterUnavailableError // File has been removed
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) _getDownloadLink(file *types.File) (string, error) {
|
||||||
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
||||||
payload := gourl.Values{
|
payload := gourl.Values{
|
||||||
"link": {file.Link},
|
"link": {file.Link},
|
||||||
}
|
}
|
||||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||||
resp, err := r.client.MakeRequest(req)
|
resp, err := r.downloadClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
// Read the response body to get the error message
|
||||||
|
b, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
var data ErrorResponse
|
||||||
|
if err = json.Unmarshal(b, &data); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
switch data.ErrorCode {
|
||||||
|
case 23:
|
||||||
|
return "", request.TrafficExceededError
|
||||||
|
case 24:
|
||||||
|
return "", request.HosterUnavailableError // Link has been nerfed
|
||||||
|
case 19:
|
||||||
|
return "", request.HosterUnavailableError // File has been removed
|
||||||
|
case 36:
|
||||||
|
return "", request.TrafficExceededError // traffic exceeded
|
||||||
|
case 34:
|
||||||
|
return "", request.TrafficExceededError // traffic exceeded
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
}
|
}
|
||||||
var data UnrestrictResponse
|
var data UnrestrictResponse
|
||||||
if err = json.Unmarshal(resp, &data); err != nil {
|
if err = json.Unmarshal(b, &data); err != nil {
|
||||||
return nil
|
return "", err
|
||||||
}
|
}
|
||||||
return &torrent.DownloadLinks{
|
return data.Download, nil
|
||||||
Link: data.Link,
|
|
||||||
Filename: data.Filename,
|
|
||||||
DownloadLink: data.Download,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string, string, error) {
|
||||||
|
defer r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.APIKey))
|
||||||
|
var (
|
||||||
|
downloadLink string
|
||||||
|
accountId string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
accounts := r.getActiveAccounts()
|
||||||
|
if len(accounts) < 1 {
|
||||||
|
// No active download keys. It's likely that the key has reached bandwidth limit
|
||||||
|
return "", "", fmt.Errorf("no active download keys")
|
||||||
|
}
|
||||||
|
for _, account := range accounts {
|
||||||
|
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", account.Token))
|
||||||
|
downloadLink, err = r._getDownloadLink(file)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, request.TrafficExceededError) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// If the error is not traffic exceeded, skip generating the link with a new key
|
||||||
|
return "", "", err
|
||||||
|
} else {
|
||||||
|
// If we successfully generated a link, break the loop
|
||||||
|
accountId = account.ID
|
||||||
|
file.AccountId = accountId
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
if downloadLink != "" {
|
||||||
|
// If we successfully generated a link, return it
|
||||||
|
return downloadLink, accountId, nil
|
||||||
|
}
|
||||||
|
// If we reach here, it means all keys are disabled or traffic exceeded
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, request.TrafficExceededError) {
|
||||||
|
return "", "", request.TrafficExceededError
|
||||||
|
}
|
||||||
|
return "", "", fmt.Errorf("error generating download link: %v", err)
|
||||||
|
}
|
||||||
|
return "", "", fmt.Errorf("error generating download link: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetCheckCached() bool {
|
func (r *RealDebrid) GetCheckCached() bool {
|
||||||
return r.CheckCached
|
return r.CheckCached
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) getTorrents(offset int, limit int) ([]*torrent.Torrent, error) {
|
func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) {
|
||||||
url := fmt.Sprintf("%s/torrents?limit=%d", r.Host, limit)
|
url := fmt.Sprintf("%s/torrents?limit=%d", r.Host, limit)
|
||||||
|
torrents := make([]*types.Torrent, 0)
|
||||||
if offset > 0 {
|
if offset > 0 {
|
||||||
url = fmt.Sprintf("%s&offset=%d", url, offset)
|
url = fmt.Sprintf("%s&offset=%d", url, offset)
|
||||||
}
|
}
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := r.client.MakeRequest(req)
|
resp, err := r.client.Do(req)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var data []TorrentsResponse
|
|
||||||
if err = json.Unmarshal(resp, &data); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
torrents := make([]*torrent.Torrent, 0)
|
|
||||||
for _, t := range data {
|
|
||||||
|
|
||||||
torrents = append(torrents, &torrent.Torrent{
|
if err != nil {
|
||||||
|
return 0, torrents, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusNoContent {
|
||||||
|
return 0, torrents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
resp.Body.Close()
|
||||||
|
return 0, torrents, fmt.Errorf("realdebrid API error: %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer resp.Body.Close()
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return 0, torrents, err
|
||||||
|
}
|
||||||
|
totalItems, _ := strconv.Atoi(resp.Header.Get("X-Total-Count"))
|
||||||
|
var data []TorrentsResponse
|
||||||
|
if err = json.Unmarshal(body, &data); err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
filenames := map[string]struct{}{}
|
||||||
|
for _, t := range data {
|
||||||
|
if t.Status != "downloaded" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, exists := filenames[t.Filename]; exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
torrents = append(torrents, &types.Torrent{
|
||||||
Id: t.Id,
|
Id: t.Id,
|
||||||
Name: t.Filename,
|
Name: t.Filename,
|
||||||
Bytes: t.Bytes,
|
Bytes: t.Bytes,
|
||||||
@@ -356,45 +571,158 @@ func (r *RealDebrid) getTorrents(offset int, limit int) ([]*torrent.Torrent, err
|
|||||||
Filename: t.Filename,
|
Filename: t.Filename,
|
||||||
OriginalFilename: t.Filename,
|
OriginalFilename: t.Filename,
|
||||||
Links: t.Links,
|
Links: t.Links,
|
||||||
|
Files: make(map[string]types.File),
|
||||||
|
InfoHash: t.Hash,
|
||||||
|
Debrid: r.Name,
|
||||||
|
MountPath: r.MountPath,
|
||||||
|
Added: t.Added.Format(time.RFC3339),
|
||||||
})
|
})
|
||||||
|
filenames[t.Filename] = struct{}{}
|
||||||
}
|
}
|
||||||
return torrents, nil
|
return totalItems, torrents, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetTorrents() ([]*torrent.Torrent, error) {
|
func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
|
||||||
torrents := make([]*torrent.Torrent, 0)
|
|
||||||
offset := 0
|
|
||||||
limit := 5000
|
limit := 5000
|
||||||
|
|
||||||
|
// Get first batch and total count
|
||||||
|
totalItems, firstBatch, err := r.getTorrents(0, limit)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
allTorrents := firstBatch
|
||||||
|
|
||||||
|
// Calculate remaining requests
|
||||||
|
remaining := totalItems - len(firstBatch)
|
||||||
|
if remaining <= 0 {
|
||||||
|
return allTorrents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare for concurrent fetching
|
||||||
|
var fetchError error
|
||||||
|
|
||||||
|
// Calculate how many more requests we need
|
||||||
|
batchCount := (remaining + limit - 1) / limit // ceiling division
|
||||||
|
|
||||||
|
for i := 1; i <= batchCount; i++ {
|
||||||
|
_, batch, err := r.getTorrents(i*limit, limit)
|
||||||
|
if err != nil {
|
||||||
|
fetchError = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
allTorrents = append(allTorrents, batch...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fetchError != nil {
|
||||||
|
return nil, fetchError
|
||||||
|
}
|
||||||
|
|
||||||
|
return allTorrents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetDownloads() (map[string]types.DownloadLinks, error) {
|
||||||
|
links := make(map[string]types.DownloadLinks)
|
||||||
|
offset := 0
|
||||||
|
limit := 1000
|
||||||
|
|
||||||
|
accounts := r.getActiveAccounts()
|
||||||
|
|
||||||
|
if len(accounts) < 1 {
|
||||||
|
// No active download keys. It's likely that the key has reached bandwidth limit
|
||||||
|
return nil, fmt.Errorf("no active download keys")
|
||||||
|
}
|
||||||
|
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", accounts[0].Token))
|
||||||
for {
|
for {
|
||||||
ts, err := r.getTorrents(offset, limit)
|
dl, err := r._getDownloads(offset, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if len(ts) == 0 {
|
if len(dl) == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
torrents = append(torrents, ts...)
|
|
||||||
offset = len(torrents)
|
|
||||||
}
|
|
||||||
return torrents, nil
|
|
||||||
|
|
||||||
|
for _, d := range dl {
|
||||||
|
if _, exists := links[d.Link]; exists {
|
||||||
|
// This is ordered by date, so we can skip the rest
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
links[d.Link] = d
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dc config.Debrid, cache *cache.Cache) *RealDebrid {
|
offset += len(dl)
|
||||||
rl := request.ParseRateLimit(dc.RateLimit)
|
|
||||||
headers := map[string]string{
|
|
||||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
|
||||||
}
|
}
|
||||||
client := request.NewRLHTTPClient(rl, headers)
|
return links, nil
|
||||||
return &RealDebrid{
|
}
|
||||||
Name: "realdebrid",
|
|
||||||
Host: dc.Host,
|
func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLinks, error) {
|
||||||
APIKey: dc.APIKey,
|
url := fmt.Sprintf("%s/downloads?limit=%d", r.Host, limit)
|
||||||
DownloadUncached: dc.DownloadUncached,
|
if offset > 0 {
|
||||||
client: client,
|
url = fmt.Sprintf("%s&offset=%d", url, offset)
|
||||||
cache: cache,
|
}
|
||||||
MountPath: dc.Folder,
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
|
resp, err := r.downloadClient.MakeRequest(req)
|
||||||
CheckCached: dc.CheckCached,
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var data []DownloadsResponse
|
||||||
|
if err = json.Unmarshal(resp, &data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
links := make([]types.DownloadLinks, 0)
|
||||||
|
for _, d := range data {
|
||||||
|
links = append(links, types.DownloadLinks{
|
||||||
|
Filename: d.Filename,
|
||||||
|
Size: d.Filesize,
|
||||||
|
Link: d.Link,
|
||||||
|
DownloadLink: d.Download,
|
||||||
|
Generated: d.Generated,
|
||||||
|
Id: d.Id,
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
return links, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetDownloadingStatus() []string {
|
||||||
|
return []string{"downloading", "magnet_conversion", "queued", "compressing", "uploading"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetDownloadUncached() bool {
|
||||||
|
return r.DownloadUncached
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetMountPath() string {
|
||||||
|
return r.MountPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) DisableAccount(accountId string) {
|
||||||
|
if value, ok := r.DownloadKeys.Load(accountId); ok {
|
||||||
|
value.Disabled = true
|
||||||
|
r.DownloadKeys.Store(accountId, value)
|
||||||
|
r.logger.Info().Msgf("Disabled account Index: %s", value.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) ResetActiveDownloadKeys() {
|
||||||
|
r.DownloadKeys.Range(func(key string, value types.Account) bool {
|
||||||
|
value.Disabled = false
|
||||||
|
r.DownloadKeys.Store(key, value)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) getActiveAccounts() []types.Account {
|
||||||
|
accounts := make([]types.Account, 0)
|
||||||
|
r.DownloadKeys.Range(func(key string, value types.Account) bool {
|
||||||
|
if value.Disabled {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
accounts = append(accounts, value)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
sort.Slice(accounts, func(i, j int) bool {
|
||||||
|
return accounts[i].ID < accounts[j].ID
|
||||||
|
})
|
||||||
|
return accounts
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package realdebrid
|
package realdebrid
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -98,7 +98,7 @@ type UnrestrictResponse struct {
|
|||||||
Id string `json:"id"`
|
Id string `json:"id"`
|
||||||
Filename string `json:"filename"`
|
Filename string `json:"filename"`
|
||||||
MimeType string `json:"mimeType"`
|
MimeType string `json:"mimeType"`
|
||||||
Filesize int `json:"filesize"`
|
Filesize int64 `json:"filesize"`
|
||||||
Link string `json:"link"`
|
Link string `json:"link"`
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
Chunks int `json:"chunks"`
|
Chunks int `json:"chunks"`
|
||||||
@@ -120,3 +120,22 @@ type TorrentsResponse struct {
|
|||||||
Links []string `json:"links"`
|
Links []string `json:"links"`
|
||||||
Ended time.Time `json:"ended"`
|
Ended time.Time `json:"ended"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DownloadsResponse struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
MimeType string `json:"mimeType"`
|
||||||
|
Filesize int64 `json:"filesize"`
|
||||||
|
Link string `json:"link"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
HostIcon string `json:"host_icon"`
|
||||||
|
Chunks int64 `json:"chunks"`
|
||||||
|
Download string `json:"download"`
|
||||||
|
Streamable int `json:"streamable"`
|
||||||
|
Generated time.Time `json:"generated"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ErrorResponse struct {
|
||||||
|
Error string `json:"error"`
|
||||||
|
ErrorCode int `json:"error_code"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,39 +2,76 @@ package torbox
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
"github.com/puzpuzpuz/xsync/v3"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
|
||||||
|
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Torbox struct {
|
type Torbox struct {
|
||||||
Name string
|
Name string
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
APIKey string
|
APIKey string
|
||||||
|
DownloadKeys *xsync.MapOf[string, types.Account]
|
||||||
DownloadUncached bool
|
DownloadUncached bool
|
||||||
client *request.RLHTTPClient
|
client *request.Client
|
||||||
cache *cache.Cache
|
|
||||||
MountPath string
|
MountPath string
|
||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
CheckCached bool
|
CheckCached bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func New(dc config.Debrid) *Torbox {
|
||||||
|
rl := request.ParseRateLimit(dc.RateLimit)
|
||||||
|
|
||||||
|
headers := map[string]string{
|
||||||
|
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||||
|
}
|
||||||
|
_log := logger.New(dc.Name)
|
||||||
|
client := request.New(
|
||||||
|
request.WithHeaders(headers),
|
||||||
|
request.WithRateLimiter(rl),
|
||||||
|
request.WithLogger(_log),
|
||||||
|
request.WithProxy(dc.Proxy),
|
||||||
|
)
|
||||||
|
|
||||||
|
accounts := xsync.NewMapOf[string, types.Account]()
|
||||||
|
for idx, key := range dc.DownloadAPIKeys {
|
||||||
|
id := strconv.Itoa(idx)
|
||||||
|
accounts.Store(id, types.Account{
|
||||||
|
Name: key,
|
||||||
|
ID: id,
|
||||||
|
Token: key,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Torbox{
|
||||||
|
Name: "torbox",
|
||||||
|
Host: dc.Host,
|
||||||
|
APIKey: dc.APIKey,
|
||||||
|
DownloadKeys: accounts,
|
||||||
|
DownloadUncached: dc.DownloadUncached,
|
||||||
|
client: client,
|
||||||
|
MountPath: dc.Folder,
|
||||||
|
logger: _log,
|
||||||
|
CheckCached: dc.CheckCached,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetName() string {
|
func (tb *Torbox) GetName() string {
|
||||||
return tb.Name
|
return tb.Name
|
||||||
}
|
}
|
||||||
@@ -43,15 +80,9 @@ func (tb *Torbox) GetLogger() zerolog.Logger {
|
|||||||
return tb.logger
|
return tb.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) IsAvailable(infohashes []string) map[string]bool {
|
func (tb *Torbox) IsAvailable(hashes []string) map[string]bool {
|
||||||
// Check if the infohashes are available in the local cache
|
// Check if the infohashes are available in the local cache
|
||||||
hashes, result := torrent.GetLocalCache(infohashes, tb.cache)
|
result := make(map[string]bool)
|
||||||
|
|
||||||
if len(hashes) == 0 {
|
|
||||||
// Either all the infohashes are locally cached or none are
|
|
||||||
tb.cache.AddMultiple(result)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Divide hashes into groups of 100
|
// Divide hashes into groups of 100
|
||||||
for i := 0; i < len(hashes); i += 100 {
|
for i := 0; i < len(hashes); i += 100 {
|
||||||
@@ -91,17 +122,16 @@ func (tb *Torbox) IsAvailable(infohashes []string) map[string]bool {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
for h, cache := range *res.Data {
|
for h, c := range *res.Data {
|
||||||
if cache.Size > 0 {
|
if c.Size > 0 {
|
||||||
result[strings.ToUpper(h)] = true
|
result[strings.ToUpper(h)] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tb.cache.AddMultiple(result) // Add the results to the cache
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) SubmitMagnet(torrent *torrent.Torrent) (*torrent.Torrent, error) {
|
func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
|
||||||
url := fmt.Sprintf("%s/api/torrents/createtorrent", tb.Host)
|
url := fmt.Sprintf("%s/api/torrents/createtorrent", tb.Host)
|
||||||
payload := &bytes.Buffer{}
|
payload := &bytes.Buffer{}
|
||||||
writer := multipart.NewWriter(payload)
|
writer := multipart.NewWriter(payload)
|
||||||
@@ -149,22 +179,20 @@ func getTorboxStatus(status string, finished bool) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetTorrent(id string) (*torrent.Torrent, error) {
|
func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
|
||||||
t := &torrent.Torrent{}
|
url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", tb.Host, t.Id)
|
||||||
url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", tb.Host, id)
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := tb.client.MakeRequest(req)
|
resp, err := tb.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return t, err
|
return err
|
||||||
}
|
}
|
||||||
var res InfoResponse
|
var res InfoResponse
|
||||||
err = json.Unmarshal(resp, &res)
|
err = json.Unmarshal(resp, &res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return t, err
|
return err
|
||||||
}
|
}
|
||||||
data := res.Data
|
data := res.Data
|
||||||
name := data.Name
|
name := data.Name
|
||||||
t.Id = id
|
|
||||||
t.Name = name
|
t.Name = name
|
||||||
t.Bytes = data.Size
|
t.Bytes = data.Size
|
||||||
t.Folder = name
|
t.Folder = name
|
||||||
@@ -176,12 +204,10 @@ func (tb *Torbox) GetTorrent(id string) (*torrent.Torrent, error) {
|
|||||||
t.OriginalFilename = name
|
t.OriginalFilename = name
|
||||||
t.MountPath = tb.MountPath
|
t.MountPath = tb.MountPath
|
||||||
t.Debrid = tb.Name
|
t.Debrid = tb.Name
|
||||||
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
|
cfg := config.Get()
|
||||||
files := make([]torrent.File, 0)
|
|
||||||
cfg := config.GetConfig()
|
|
||||||
for _, f := range data.Files {
|
for _, f := range data.Files {
|
||||||
fileName := filepath.Base(f.Name)
|
fileName := filepath.Base(f.Name)
|
||||||
if utils.IsSampleFile(fileName) {
|
if utils.IsSampleFile(f.AbsolutePath) {
|
||||||
// Skip sample files
|
// Skip sample files
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -192,54 +218,50 @@ func (tb *Torbox) GetTorrent(id string) (*torrent.Torrent, error) {
|
|||||||
if !cfg.IsSizeAllowed(f.Size) {
|
if !cfg.IsSizeAllowed(f.Size) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
file := torrent.File{
|
file := types.File{
|
||||||
Id: strconv.Itoa(f.Id),
|
Id: strconv.Itoa(f.Id),
|
||||||
Name: fileName,
|
Name: fileName,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Path: fileName,
|
Path: fileName,
|
||||||
}
|
}
|
||||||
files = append(files, file)
|
t.Files[fileName] = file
|
||||||
}
|
}
|
||||||
var cleanPath string
|
var cleanPath string
|
||||||
if len(files) > 0 {
|
if len(t.Files) > 0 {
|
||||||
cleanPath = path.Clean(data.Files[0].Name)
|
cleanPath = path.Clean(data.Files[0].Name)
|
||||||
} else {
|
} else {
|
||||||
cleanPath = path.Clean(data.Name)
|
cleanPath = path.Clean(data.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
||||||
t.Files = files
|
t.Debrid = tb.Name
|
||||||
//t.Debrid = tb
|
return nil
|
||||||
return t, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
|
func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
|
||||||
for {
|
for {
|
||||||
t, err := tb.GetTorrent(torrent.Id)
|
err := tb.UpdateTorrent(torrent)
|
||||||
|
|
||||||
torrent = t
|
if err != nil || torrent == nil {
|
||||||
|
return torrent, err
|
||||||
if err != nil || t == nil {
|
|
||||||
return t, err
|
|
||||||
}
|
}
|
||||||
status := torrent.Status
|
status := torrent.Status
|
||||||
if status == "downloaded" {
|
if status == "downloaded" {
|
||||||
tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
|
||||||
if !isSymlink {
|
if !isSymlink {
|
||||||
err = tb.GetDownloadLinks(torrent)
|
err = tb.GenerateDownloadLinks(torrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return torrent, err
|
return torrent, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
} else if status == "downloading" {
|
} else if slices.Contains(tb.GetDownloadingStatus(), status) {
|
||||||
if !tb.DownloadUncached {
|
if !torrent.DownloadUncached {
|
||||||
go tb.DeleteTorrent(torrent)
|
|
||||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
}
|
}
|
||||||
// Break out of the loop if the torrent is downloading.
|
// Break out of the loop if the torrent is downloading.
|
||||||
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||||
break
|
return torrent, nil
|
||||||
} else {
|
} else {
|
||||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||||
}
|
}
|
||||||
@@ -248,22 +270,61 @@ func (tb *Torbox) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torren
|
|||||||
return torrent, nil
|
return torrent, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) DeleteTorrent(torrent *torrent.Torrent) {
|
func (tb *Torbox) DeleteTorrent(torrentId string) error {
|
||||||
url := fmt.Sprintf("%s/api/torrents/controltorrent/%s", tb.Host, torrent.Id)
|
url := fmt.Sprintf("%s/api/torrents/controltorrent/%s", tb.Host, torrentId)
|
||||||
payload := map[string]string{"torrent_id": torrent.Id, "action": "Delete"}
|
payload := map[string]string{"torrent_id": torrentId, "action": "Delete"}
|
||||||
jsonPayload, _ := json.Marshal(payload)
|
jsonPayload, _ := json.Marshal(payload)
|
||||||
req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(jsonPayload))
|
req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(jsonPayload))
|
||||||
_, err := tb.client.MakeRequest(req)
|
if _, err := tb.client.MakeRequest(req); err != nil {
|
||||||
if err == nil {
|
return err
|
||||||
tb.logger.Info().Msgf("Torrent: %s deleted", torrent.Name)
|
|
||||||
} else {
|
|
||||||
tb.logger.Info().Msgf("Error deleting torrent: %s", err)
|
|
||||||
}
|
}
|
||||||
|
tb.logger.Info().Msgf("Torrent %s deleted from Torbox", torrentId)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetDownloadLinks(t *torrent.Torrent) error {
|
func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error {
|
||||||
downloadLinks := make(map[string]torrent.DownloadLinks)
|
filesCh := make(chan types.File, len(t.Files))
|
||||||
|
errCh := make(chan error, len(t.Files))
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(t.Files))
|
||||||
for _, file := range t.Files {
|
for _, file := range t.Files {
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
link, accountId, err := tb.GetDownloadLink(t, &file)
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
file.DownloadLink = link
|
||||||
|
file.AccountId = accountId
|
||||||
|
filesCh <- file
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(filesCh)
|
||||||
|
close(errCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Collect results
|
||||||
|
files := make(map[string]types.File, len(t.Files))
|
||||||
|
for file := range filesCh {
|
||||||
|
files[file.Name] = file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for errors
|
||||||
|
for err := range errCh {
|
||||||
|
if err != nil {
|
||||||
|
return err // Return the first error encountered
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Files = files
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (string, string, error) {
|
||||||
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
|
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
|
||||||
query := gourl.Values{}
|
query := gourl.Values{}
|
||||||
query.Add("torrent_id", t.Id)
|
query.Add("torrent_id", t.Id)
|
||||||
@@ -273,79 +334,50 @@ func (tb *Torbox) GetDownloadLinks(t *torrent.Torrent) error {
|
|||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := tb.client.MakeRequest(req)
|
resp, err := tb.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", "", err
|
||||||
}
|
}
|
||||||
var data DownloadLinksResponse
|
var data DownloadLinksResponse
|
||||||
if err = json.Unmarshal(resp, &data); err != nil {
|
if err = json.Unmarshal(resp, &data); err != nil {
|
||||||
return err
|
return "", "", err
|
||||||
}
|
}
|
||||||
if data.Data == nil {
|
if data.Data == nil {
|
||||||
return fmt.Errorf("error getting download links")
|
return "", "", fmt.Errorf("error getting download links")
|
||||||
}
|
|
||||||
idx := 0
|
|
||||||
link := *data.Data
|
|
||||||
|
|
||||||
dl := torrent.DownloadLinks{
|
|
||||||
Link: link,
|
|
||||||
Filename: t.Files[idx].Name,
|
|
||||||
DownloadLink: link,
|
|
||||||
}
|
|
||||||
downloadLinks[file.Id] = dl
|
|
||||||
}
|
|
||||||
t.DownloadLinks = downloadLinks
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tb *Torbox) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
|
|
||||||
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
|
|
||||||
query := gourl.Values{}
|
|
||||||
query.Add("torrent_id", t.Id)
|
|
||||||
query.Add("token", tb.APIKey)
|
|
||||||
query.Add("file_id", file.Id)
|
|
||||||
url += "?" + query.Encode()
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
|
||||||
resp, err := tb.client.MakeRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var data DownloadLinksResponse
|
|
||||||
if err = json.Unmarshal(resp, &data); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if data.Data == nil {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
link := *data.Data
|
link := *data.Data
|
||||||
return &torrent.DownloadLinks{
|
return link, "0", nil
|
||||||
Link: file.Link,
|
|
||||||
Filename: file.Name,
|
|
||||||
DownloadLink: link,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tb *Torbox) GetDownloadingStatus() []string {
|
||||||
|
return []string{"downloading"}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetCheckCached() bool {
|
func (tb *Torbox) GetCheckCached() bool {
|
||||||
return tb.CheckCached
|
return tb.CheckCached
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Torbox) GetTorrents() ([]*torrent.Torrent, error) {
|
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
|
||||||
return nil, fmt.Errorf("not implemented")
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dc config.Debrid, cache *cache.Cache) *Torbox {
|
func (tb *Torbox) GetDownloadUncached() bool {
|
||||||
rl := request.ParseRateLimit(dc.RateLimit)
|
return tb.DownloadUncached
|
||||||
headers := map[string]string{
|
|
||||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
|
||||||
}
|
}
|
||||||
client := request.NewRLHTTPClient(rl, headers)
|
|
||||||
return &Torbox{
|
func (tb *Torbox) GetDownloads() (map[string]types.DownloadLinks, error) {
|
||||||
Name: "torbox",
|
return nil, nil
|
||||||
Host: dc.Host,
|
|
||||||
APIKey: dc.APIKey,
|
|
||||||
DownloadUncached: dc.DownloadUncached,
|
|
||||||
client: client,
|
|
||||||
cache: cache,
|
|
||||||
MountPath: dc.Folder,
|
|
||||||
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
|
|
||||||
CheckCached: dc.CheckCached,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tb *Torbox) CheckLink(link string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *Torbox) GetMountPath() string {
|
||||||
|
return tb.MountPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *Torbox) DisableAccount(accountId string) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tb *Torbox) ResetActiveDownloadKeys() {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,135 +0,0 @@
|
|||||||
package torrent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/cache"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Arr struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Token string `json:"-"`
|
|
||||||
Host string `json:"host"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ArrHistorySchema struct {
|
|
||||||
Page int `json:"page"`
|
|
||||||
PageSize int `json:"pageSize"`
|
|
||||||
SortKey string `json:"sortKey"`
|
|
||||||
SortDirection string `json:"sortDirection"`
|
|
||||||
TotalRecords int `json:"totalRecords"`
|
|
||||||
Records []struct {
|
|
||||||
ID int `json:"id"`
|
|
||||||
DownloadID string `json:"downloadId"`
|
|
||||||
} `json:"records"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Torrent struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
InfoHash string `json:"info_hash"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Folder string `json:"folder"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
OriginalFilename string `json:"original_filename"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Bytes int64 `json:"bytes"` // Size of only the files that are downloaded
|
|
||||||
Magnet *utils.Magnet `json:"magnet"`
|
|
||||||
Files []File `json:"files"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
Added string `json:"added"`
|
|
||||||
Progress float64 `json:"progress"`
|
|
||||||
Speed int64 `json:"speed"`
|
|
||||||
Seeders int `json:"seeders"`
|
|
||||||
Links []string `json:"links"`
|
|
||||||
DownloadLinks map[string]DownloadLinks `json:"download_links"`
|
|
||||||
MountPath string `json:"mount_path"`
|
|
||||||
|
|
||||||
Debrid string `json:"debrid"`
|
|
||||||
|
|
||||||
Arr *arr.Arr `json:"arr"`
|
|
||||||
Mu sync.Mutex `json:"-"`
|
|
||||||
SizeDownloaded int64 `json:"-"` // This is used for local download
|
|
||||||
}
|
|
||||||
|
|
||||||
type DownloadLinks struct {
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
Link string `json:"link"`
|
|
||||||
DownloadLink string `json:"download_link"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Torrent) GetSymlinkFolder(parent string) string {
|
|
||||||
return filepath.Join(parent, t.Arr.Name, t.Folder)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Torrent) GetMountFolder(rClonePath string) (string, error) {
|
|
||||||
possiblePaths := []string{
|
|
||||||
t.OriginalFilename,
|
|
||||||
t.Filename,
|
|
||||||
utils.RemoveExtension(t.OriginalFilename),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, path := range possiblePaths {
|
|
||||||
_, err := os.Stat(filepath.Join(rClonePath, path))
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("no path found")
|
|
||||||
}
|
|
||||||
|
|
||||||
type File struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
Link string `json:"link"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Torrent) Cleanup(remove bool) {
|
|
||||||
if remove {
|
|
||||||
err := os.Remove(t.Filename)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Torrent) GetFile(id string) *File {
|
|
||||||
for _, f := range t.Files {
|
|
||||||
if f.Id == id {
|
|
||||||
return &f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetLocalCache(infohashes []string, cache *cache.Cache) ([]string, map[string]bool) {
|
|
||||||
result := make(map[string]bool)
|
|
||||||
hashes := make([]string, 0)
|
|
||||||
|
|
||||||
if len(infohashes) == 0 {
|
|
||||||
return hashes, result
|
|
||||||
}
|
|
||||||
if len(infohashes) == 1 {
|
|
||||||
if cache.Exists(infohashes[0]) {
|
|
||||||
return hashes, map[string]bool{infohashes[0]: true}
|
|
||||||
}
|
|
||||||
return infohashes, result
|
|
||||||
}
|
|
||||||
|
|
||||||
cachedHashes := cache.GetMultiple(infohashes)
|
|
||||||
for _, h := range infohashes {
|
|
||||||
_, exists := cachedHashes[h]
|
|
||||||
if !exists {
|
|
||||||
hashes = append(hashes, h)
|
|
||||||
} else {
|
|
||||||
result[h] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return infohashes, result
|
|
||||||
}
|
|
||||||
26
pkg/debrid/types/client.go
Normal file
26
pkg/debrid/types/client.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Client interface {
|
||||||
|
SubmitMagnet(tr *Torrent) (*Torrent, error)
|
||||||
|
CheckStatus(tr *Torrent, isSymlink bool) (*Torrent, error)
|
||||||
|
GenerateDownloadLinks(tr *Torrent) error
|
||||||
|
GetDownloadLink(tr *Torrent, file *File) (string, string, error)
|
||||||
|
DeleteTorrent(torrentId string) error
|
||||||
|
IsAvailable(infohashes []string) map[string]bool
|
||||||
|
GetCheckCached() bool
|
||||||
|
GetDownloadUncached() bool
|
||||||
|
UpdateTorrent(torrent *Torrent) error
|
||||||
|
GetTorrents() ([]*Torrent, error)
|
||||||
|
GetName() string
|
||||||
|
GetLogger() zerolog.Logger
|
||||||
|
GetDownloadingStatus() []string
|
||||||
|
GetDownloads() (map[string]DownloadLinks, error)
|
||||||
|
CheckLink(link string) error
|
||||||
|
GetMountPath() string
|
||||||
|
DisableAccount(string)
|
||||||
|
ResetActiveDownloadKeys()
|
||||||
|
}
|
||||||
128
pkg/debrid/types/torrent.go
Normal file
128
pkg/debrid/types/torrent.go
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Torrent struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
InfoHash string `json:"info_hash"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Folder string `json:"folder"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
OriginalFilename string `json:"original_filename"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Bytes int64 `json:"bytes"` // Size of only the files that are downloaded
|
||||||
|
Magnet *utils.Magnet `json:"magnet"`
|
||||||
|
Files map[string]File `json:"files"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Added string `json:"added"`
|
||||||
|
Progress float64 `json:"progress"`
|
||||||
|
Speed int64 `json:"speed"`
|
||||||
|
Seeders int `json:"seeders"`
|
||||||
|
Links []string `json:"links"`
|
||||||
|
MountPath string `json:"mount_path"`
|
||||||
|
|
||||||
|
Debrid string `json:"debrid"`
|
||||||
|
|
||||||
|
Arr *arr.Arr `json:"arr"`
|
||||||
|
Mu sync.Mutex `json:"-"`
|
||||||
|
SizeDownloaded int64 `json:"-"` // This is used for local download
|
||||||
|
DownloadUncached bool `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DownloadLinks struct {
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
Link string `json:"link"`
|
||||||
|
DownloadLink string `json:"download_link"`
|
||||||
|
Generated time.Time `json:"generated"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Id string `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) GetSymlinkFolder(parent string) string {
|
||||||
|
return filepath.Join(parent, t.Arr.Name, t.Folder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) GetMountFolder(rClonePath string) (string, error) {
|
||||||
|
_log := logger.GetDefaultLogger()
|
||||||
|
possiblePaths := []string{
|
||||||
|
t.OriginalFilename,
|
||||||
|
t.Filename,
|
||||||
|
utils.RemoveExtension(t.OriginalFilename),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, path := range possiblePaths {
|
||||||
|
_p := filepath.Join(rClonePath, path)
|
||||||
|
_log.Trace().Msgf("Checking path: %s", _p)
|
||||||
|
_, err := os.Stat(_p)
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("no path found")
|
||||||
|
}
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Link string `json:"link"`
|
||||||
|
DownloadLink string `json:"download_link"`
|
||||||
|
AccountId string `json:"account_id"`
|
||||||
|
Generated time.Time `json:"generated"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) IsValid() bool {
|
||||||
|
cfg := config.Get()
|
||||||
|
name := filepath.Base(f.Path)
|
||||||
|
if utils.IsSampleFile(f.Path) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cfg.IsAllowedFile(name) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !cfg.IsSizeAllowed(f.Size) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Link == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) Cleanup(remove bool) {
|
||||||
|
if remove {
|
||||||
|
err := os.Remove(t.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) GetFile(id string) *File {
|
||||||
|
for _, f := range t.Files {
|
||||||
|
if f.Id == id {
|
||||||
|
return &f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Account struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Disabled bool `json:"disabled"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Token string `json:"token"`
|
||||||
|
}
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
package downloader
|
|
||||||
|
|
||||||
@@ -1,345 +0,0 @@
|
|||||||
package proxy
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"cmp"
|
|
||||||
"context"
|
|
||||||
"encoding/xml"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/elazarl/goproxy"
|
|
||||||
"github.com/elazarl/goproxy/ext/auth"
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
|
||||||
"github.com/valyala/fastjson"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RSS struct {
|
|
||||||
XMLName xml.Name `xml:"rss"`
|
|
||||||
Text string `xml:",chardata"`
|
|
||||||
Version string `xml:"version,attr"`
|
|
||||||
Atom string `xml:"atom,attr"`
|
|
||||||
Torznab string `xml:"torznab,attr"`
|
|
||||||
Channel struct {
|
|
||||||
Text string `xml:",chardata"`
|
|
||||||
Link struct {
|
|
||||||
Text string `xml:",chardata"`
|
|
||||||
Rel string `xml:"rel,attr"`
|
|
||||||
Type string `xml:"type,attr"`
|
|
||||||
} `xml:"link"`
|
|
||||||
Title string `xml:"title"`
|
|
||||||
Items []Item `xml:"item"`
|
|
||||||
} `xml:"channel"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Item struct {
|
|
||||||
Text string `xml:",chardata"`
|
|
||||||
Title string `xml:"title"`
|
|
||||||
Description string `xml:"description"`
|
|
||||||
GUID string `xml:"guid"`
|
|
||||||
ProwlarrIndexer struct {
|
|
||||||
Text string `xml:",chardata"`
|
|
||||||
ID string `xml:"id,attr"`
|
|
||||||
Type string `xml:"type,attr"`
|
|
||||||
} `xml:"prowlarrindexer"`
|
|
||||||
Comments string `xml:"comments"`
|
|
||||||
PubDate string `xml:"pubDate"`
|
|
||||||
Size string `xml:"size"`
|
|
||||||
Link string `xml:"link"`
|
|
||||||
Category []string `xml:"category"`
|
|
||||||
Enclosure struct {
|
|
||||||
Text string `xml:",chardata"`
|
|
||||||
URL string `xml:"url,attr"`
|
|
||||||
Length string `xml:"length,attr"`
|
|
||||||
Type string `xml:"type,attr"`
|
|
||||||
} `xml:"enclosure"`
|
|
||||||
TorznabAttrs []struct {
|
|
||||||
Text string `xml:",chardata"`
|
|
||||||
Name string `xml:"name,attr"`
|
|
||||||
Value string `xml:"value,attr"`
|
|
||||||
} `xml:"attr"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Proxy struct {
|
|
||||||
port string
|
|
||||||
enabled bool
|
|
||||||
debug bool
|
|
||||||
username string
|
|
||||||
password string
|
|
||||||
cachedOnly bool
|
|
||||||
logger zerolog.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewProxy() *Proxy {
|
|
||||||
cfg := config.GetConfig().Proxy
|
|
||||||
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
|
|
||||||
return &Proxy{
|
|
||||||
port: port,
|
|
||||||
enabled: cfg.Enabled,
|
|
||||||
username: cfg.Username,
|
|
||||||
password: cfg.Password,
|
|
||||||
cachedOnly: cfg.CachedOnly,
|
|
||||||
logger: logger.NewLogger("proxy", cfg.LogLevel, os.Stdout),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Proxy) ProcessJSONResponse(resp *http.Response) *http.Response {
|
|
||||||
if resp == nil || resp.Body == nil {
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
err = resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var par fastjson.Parser
|
|
||||||
v, err := par.ParseBytes(body)
|
|
||||||
if err != nil {
|
|
||||||
// If it's not JSON, return the original response
|
|
||||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
// Modify the JSON
|
|
||||||
|
|
||||||
// Serialize the modified JSON back to bytes
|
|
||||||
modifiedBody := v.MarshalTo(nil)
|
|
||||||
|
|
||||||
// Set the modified body back to the response
|
|
||||||
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
|
||||||
resp.ContentLength = int64(len(modifiedBody))
|
|
||||||
resp.Header.Set("Content-Length", string(rune(len(modifiedBody))))
|
|
||||||
|
|
||||||
return resp
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Proxy) ProcessResponse(resp *http.Response) *http.Response {
|
|
||||||
if resp == nil || resp.Body == nil {
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
contentType := resp.Header.Get("Content-Type")
|
|
||||||
switch contentType {
|
|
||||||
case "application/json":
|
|
||||||
return resp // p.ProcessJSONResponse(resp)
|
|
||||||
case "application/xml":
|
|
||||||
return p.ProcessXMLResponse(resp)
|
|
||||||
case "application/rss+xml":
|
|
||||||
return p.ProcessXMLResponse(resp)
|
|
||||||
default:
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getItemsHash(items []Item) map[string]string {
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
idHashMap := sync.Map{} // Use sync.Map for concurrent access
|
|
||||||
|
|
||||||
for _, item := range items {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(item Item) {
|
|
||||||
defer wg.Done()
|
|
||||||
hash := strings.ToLower(item.getHash())
|
|
||||||
if hash != "" {
|
|
||||||
idHashMap.Store(item.GUID, hash) // Store directly into sync.Map
|
|
||||||
}
|
|
||||||
}(item)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
// Convert sync.Map to regular map
|
|
||||||
finalMap := make(map[string]string)
|
|
||||||
idHashMap.Range(func(key, value interface{}) bool {
|
|
||||||
finalMap[key.(string)] = value.(string)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
return finalMap
|
|
||||||
}
|
|
||||||
|
|
||||||
func (item Item) getHash() string {
|
|
||||||
infohash := ""
|
|
||||||
|
|
||||||
for _, attr := range item.TorznabAttrs {
|
|
||||||
if attr.Name == "infohash" {
|
|
||||||
return attr.Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(item.GUID, "magnet:?") {
|
|
||||||
magnet, err := utils.GetMagnetInfo(item.GUID)
|
|
||||||
if err == nil && magnet != nil && magnet.InfoHash != "" {
|
|
||||||
return magnet.InfoHash
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
magnetLink := item.Link
|
|
||||||
|
|
||||||
if magnetLink == "" {
|
|
||||||
// We can't check the availability of the torrent without a magnet link or infohash
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(magnetLink, "magnet:?") {
|
|
||||||
magnet, err := utils.GetMagnetInfo(magnetLink)
|
|
||||||
if err == nil && magnet != nil && magnet.InfoHash != "" {
|
|
||||||
return magnet.InfoHash
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//Check Description for infohash
|
|
||||||
hash := utils.ExtractInfoHash(item.Description)
|
|
||||||
if hash == "" {
|
|
||||||
// Check Title for infohash
|
|
||||||
hash = utils.ExtractInfoHash(item.Comments)
|
|
||||||
}
|
|
||||||
infohash = hash
|
|
||||||
if infohash == "" {
|
|
||||||
if strings.Contains(magnetLink, "http") {
|
|
||||||
h, _ := utils.GetInfohashFromURL(magnetLink)
|
|
||||||
if h != "" {
|
|
||||||
infohash = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return infohash
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Proxy) ProcessXMLResponse(resp *http.Response) *http.Response {
|
|
||||||
if resp == nil || resp.Body == nil {
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
svc := service.GetService()
|
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Info().Msgf("Error reading response body: %v", err)
|
|
||||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
err = resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var rss RSS
|
|
||||||
err = xml.Unmarshal(body, &rss)
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Info().Msgf("Error unmarshalling XML: %v", err)
|
|
||||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
indexer := ""
|
|
||||||
if len(rss.Channel.Items) > 0 {
|
|
||||||
indexer = rss.Channel.Items[0].ProwlarrIndexer.Text
|
|
||||||
} else {
|
|
||||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 4: Extract infohash or magnet URI, manipulate data
|
|
||||||
IdsHashMap := getItemsHash(rss.Channel.Items)
|
|
||||||
hashes := make([]string, 0)
|
|
||||||
for _, hash := range IdsHashMap {
|
|
||||||
if hash != "" {
|
|
||||||
hashes = append(hashes, hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
availableHashesMap := svc.Debrid.Get().IsAvailable(hashes)
|
|
||||||
newItems := make([]Item, 0, len(rss.Channel.Items))
|
|
||||||
|
|
||||||
if len(hashes) > 0 {
|
|
||||||
for _, item := range rss.Channel.Items {
|
|
||||||
hash := IdsHashMap[item.GUID]
|
|
||||||
if hash == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
isCached, exists := availableHashesMap[hash]
|
|
||||||
if !exists || !isCached {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
newItems = append(newItems, item)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(newItems) > 0 {
|
|
||||||
p.logger.Info().Msgf("[%s Report]: %d/%d items are cached || Found %d infohash", indexer, len(newItems), len(rss.Channel.Items), len(hashes))
|
|
||||||
} else {
|
|
||||||
// This will prevent the indexer from being disabled by the arr
|
|
||||||
p.logger.Info().Msgf("[%s Report]: No Items are cached; Return only first item with [UnCached]", indexer)
|
|
||||||
item := rss.Channel.Items[0]
|
|
||||||
item.Title = fmt.Sprintf("%s [UnCached]", item.Title)
|
|
||||||
newItems = append(newItems, item)
|
|
||||||
}
|
|
||||||
|
|
||||||
rss.Channel.Items = newItems
|
|
||||||
modifiedBody, err := xml.MarshalIndent(rss, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Info().Msgf("Error marshalling XML: %v", err)
|
|
||||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
modifiedBody = append([]byte(xml.Header), modifiedBody...)
|
|
||||||
|
|
||||||
// Set the modified body back to the response
|
|
||||||
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
func UrlMatches(re *regexp.Regexp) goproxy.ReqConditionFunc {
|
|
||||||
return func(req *http.Request, ctx *goproxy.ProxyCtx) bool {
|
|
||||||
return re.MatchString(req.URL.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Proxy) Start(ctx context.Context) error {
|
|
||||||
username, password := p.username, p.password
|
|
||||||
proxy := goproxy.NewProxyHttpServer()
|
|
||||||
if username != "" || password != "" {
|
|
||||||
// Set up basic auth for proxy
|
|
||||||
auth.ProxyBasic(proxy, "my_realm", func(user, pwd string) bool {
|
|
||||||
return user == username && password == pwd
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
proxy.OnRequest(goproxy.ReqHostMatches(regexp.MustCompile("^.443$"))).HandleConnect(goproxy.AlwaysMitm)
|
|
||||||
proxy.OnResponse(
|
|
||||||
UrlMatches(regexp.MustCompile("^.*/api\\?t=(search|tvsearch|movie)(&.*)?$")),
|
|
||||||
goproxy.StatusCodeIs(http.StatusOK, http.StatusAccepted)).DoFunc(
|
|
||||||
func(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {
|
|
||||||
return p.ProcessResponse(resp)
|
|
||||||
})
|
|
||||||
|
|
||||||
proxy.Verbose = p.debug
|
|
||||||
portFmt := fmt.Sprintf(":%s", p.port)
|
|
||||||
srv := &http.Server{
|
|
||||||
Addr: portFmt,
|
|
||||||
Handler: proxy,
|
|
||||||
}
|
|
||||||
p.logger.Info().Msgf("Starting proxy server on %s", portFmt)
|
|
||||||
go func() {
|
|
||||||
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
|
||||||
p.logger.Info().Msgf("Error starting proxy server: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
<-ctx.Done()
|
|
||||||
p.logger.Info().Msg("Shutting down gracefully...")
|
|
||||||
return srv.Shutdown(context.Background())
|
|
||||||
}
|
|
||||||
@@ -1,12 +1,12 @@
|
|||||||
package qbit
|
package qbit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/cavaliergopher/grab/v3"
|
"github.com/cavaliergopher/grab/v3"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
debrid "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"net/http"
|
debrid "github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -51,7 +51,7 @@ Loop:
|
|||||||
|
|
||||||
func (q *QBit) ProcessManualFile(torrent *Torrent) (string, error) {
|
func (q *QBit) ProcessManualFile(torrent *Torrent) (string, error) {
|
||||||
debridTorrent := torrent.DebridTorrent
|
debridTorrent := torrent.DebridTorrent
|
||||||
q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.DownloadLinks))
|
q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
|
||||||
torrentPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, utils.RemoveExtension(debridTorrent.OriginalFilename))
|
torrentPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, utils.RemoveExtension(debridTorrent.OriginalFilename))
|
||||||
torrentPath = utils.RemoveInvalidChars(torrentPath)
|
torrentPath = utils.RemoveInvalidChars(torrentPath)
|
||||||
err := os.MkdirAll(torrentPath, os.ModePerm)
|
err := os.MkdirAll(torrentPath, os.ModePerm)
|
||||||
@@ -91,32 +91,25 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
|
|||||||
}
|
}
|
||||||
q.UpdateTorrentMin(torrent, debridTorrent)
|
q.UpdateTorrentMin(torrent, debridTorrent)
|
||||||
}
|
}
|
||||||
|
|
||||||
tr := &http.Transport{
|
|
||||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
}
|
|
||||||
client := &grab.Client{
|
client := &grab.Client{
|
||||||
UserAgent: "qBitTorrent",
|
UserAgent: "qBitTorrent",
|
||||||
HTTPClient: &http.Client{
|
HTTPClient: request.New(request.WithTimeout(0)),
|
||||||
Transport: tr,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
for _, link := range debridTorrent.DownloadLinks {
|
for _, file := range debridTorrent.Files {
|
||||||
if link.DownloadLink == "" {
|
if file.DownloadLink == "" {
|
||||||
q.logger.Info().Msgf("No download link found for %s", link.Filename)
|
q.logger.Info().Msgf("No download link found for %s", file.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
semaphore <- struct{}{}
|
semaphore <- struct{}{}
|
||||||
go func(link debrid.DownloadLinks) {
|
go func(file debrid.File) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer func() { <-semaphore }()
|
defer func() { <-semaphore }()
|
||||||
filename := link.Filename
|
filename := file.Link
|
||||||
|
|
||||||
err := Download(
|
err := Download(
|
||||||
client,
|
client,
|
||||||
link.DownloadLink,
|
file.DownloadLink,
|
||||||
filepath.Join(parent, filename),
|
filepath.Join(parent, filename),
|
||||||
progressCallback,
|
progressCallback,
|
||||||
)
|
)
|
||||||
@@ -126,7 +119,7 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
|
|||||||
} else {
|
} else {
|
||||||
q.logger.Info().Msgf("Downloaded %s", filename)
|
q.logger.Info().Msgf("Downloaded %s", filename)
|
||||||
}
|
}
|
||||||
}(link)
|
}(file)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
q.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
|
q.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
|
||||||
@@ -153,8 +146,13 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) {
|
|||||||
torrentFolder = utils.RemoveExtension(torrentFolder)
|
torrentFolder = utils.RemoveExtension(torrentFolder)
|
||||||
torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder
|
torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder
|
||||||
}
|
}
|
||||||
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
|
return q.createSymlinks(debridTorrent, torrentRclonePath, torrentFolder) // verify cos we're using external webdav
|
||||||
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
}
|
||||||
|
|
||||||
|
func (q *QBit) createSymlinks(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) {
|
||||||
|
files := debridTorrent.Files
|
||||||
|
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder)
|
||||||
|
err := os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
|
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
|
||||||
}
|
}
|
||||||
@@ -163,20 +161,34 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) {
|
|||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
pending[file.Path] = file
|
pending[file.Path] = file
|
||||||
}
|
}
|
||||||
ticker := time.NewTicker(200 * time.Millisecond)
|
ticker := time.NewTicker(100 * time.Millisecond)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
filePaths := make([]string, 0, len(pending))
|
||||||
|
|
||||||
for len(pending) > 0 {
|
for len(pending) > 0 {
|
||||||
<-ticker.C
|
<-ticker.C
|
||||||
for path, file := range pending {
|
for path, file := range pending {
|
||||||
fullFilePath := filepath.Join(torrentRclonePath, file.Path)
|
fullFilePath := filepath.Join(rclonePath, file.Path)
|
||||||
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
|
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
|
||||||
q.logger.Info().Msgf("File is ready: %s", file.Path)
|
q.logger.Info().Msgf("File is ready: %s", file.Path)
|
||||||
q.createSymLink(torrentSymlinkPath, torrentRclonePath, file)
|
_filePath := q.createSymLink(torrentSymlinkPath, rclonePath, file)
|
||||||
|
filePaths = append(filePaths, _filePath)
|
||||||
delete(pending, path)
|
delete(pending, path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if q.SkipPreCache {
|
||||||
|
return torrentSymlinkPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
|
||||||
|
if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil {
|
||||||
|
q.logger.Error().Msgf("Failed to pre-cache file: %s", err)
|
||||||
|
}
|
||||||
|
}() // Pre-cache the files in the background
|
||||||
|
// Pre-cache the first 256KB and 1MB of the file
|
||||||
return torrentSymlinkPath, nil
|
return torrentSymlinkPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -191,7 +203,7 @@ func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) createSymLink(path string, torrentMountPath string, file debrid.File) {
|
func (q *QBit) createSymLink(path string, torrentMountPath string, file debrid.File) string {
|
||||||
|
|
||||||
// Combine the directory and filename to form a full path
|
// Combine the directory and filename to form a full path
|
||||||
fullPath := filepath.Join(path, file.Name) // /mnt/symlinks/{category}/MyTVShow/MyTVShow.S01E01.720p.mkv
|
fullPath := filepath.Join(path, file.Name) // /mnt/symlinks/{category}/MyTVShow/MyTVShow.S01E01.720p.mkv
|
||||||
@@ -202,4 +214,55 @@ func (q *QBit) createSymLink(path string, torrentMountPath string, file debrid.F
|
|||||||
// It's okay if the symlink already exists
|
// It's okay if the symlink already exists
|
||||||
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fullPath, err)
|
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fullPath, err)
|
||||||
}
|
}
|
||||||
|
return torrentFilePath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) preCacheFile(name string, filePaths []string) error {
|
||||||
|
q.logger.Trace().Msgf("Pre-caching file: %s", name)
|
||||||
|
if len(filePaths) == 0 {
|
||||||
|
return fmt.Errorf("no file paths provided")
|
||||||
|
}
|
||||||
|
for _, filePath := range filePaths {
|
||||||
|
func() {
|
||||||
|
file, err := os.Open(filePath)
|
||||||
|
defer func(file *os.File) {
|
||||||
|
_ = file.Close()
|
||||||
|
}(file)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Pre-cache the file header (first 256KB) using 16KB chunks.
|
||||||
|
q.readSmallChunks(file, 0, 256*1024, 16*1024)
|
||||||
|
q.readSmallChunks(file, 1024*1024, 64*1024, 16*1024)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) {
|
||||||
|
_, err := file.Seek(startPos, 0)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, chunkSize)
|
||||||
|
bytesRemaining := totalToRead
|
||||||
|
|
||||||
|
for bytesRemaining > 0 {
|
||||||
|
toRead := chunkSize
|
||||||
|
if bytesRemaining < chunkSize {
|
||||||
|
toRead = bytesRemaining
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := file.Read(buf[:toRead])
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesRemaining -= n
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
"github.com/sirrobot01/decypharr/pkg/service"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -46,8 +46,7 @@ func (q *QBit) CategoryContext(next http.Handler) http.Handler {
|
|||||||
category = r.FormValue("category")
|
category = r.FormValue("category")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ctx := r.Context()
|
ctx := context.WithValue(r.Context(), "category", strings.TrimSpace(category))
|
||||||
ctx = context.WithValue(r.Context(), "category", strings.TrimSpace(category))
|
|
||||||
next.ServeHTTP(w, r.WithContext(ctx))
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -60,11 +59,18 @@ func (q *QBit) authContext(next http.Handler) http.Handler {
|
|||||||
// Check if arr exists
|
// Check if arr exists
|
||||||
a := svc.Arr.Get(category)
|
a := svc.Arr.Get(category)
|
||||||
if a == nil {
|
if a == nil {
|
||||||
a = arr.New(category, "", "", false)
|
downloadUncached := false
|
||||||
|
a = arr.New(category, "", "", false, false, &downloadUncached)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
a.Host = strings.TrimSpace(host)
|
host = strings.TrimSpace(host)
|
||||||
a.Token = strings.TrimSpace(token)
|
if host != "" {
|
||||||
|
a.Host = host
|
||||||
|
}
|
||||||
|
token = strings.TrimSpace(token)
|
||||||
|
if token != "" {
|
||||||
|
a.Token = token
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
svc.Arr.AddOrUpdate(a)
|
svc.Arr.AddOrUpdate(a)
|
||||||
@@ -94,6 +100,16 @@ func HashesCtx(next http.Handler) http.Handler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
|
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
_arr := ctx.Value("arr").(*arr.Arr)
|
||||||
|
if _arr == nil {
|
||||||
|
// No arr
|
||||||
|
_, _ = w.Write([]byte("Ok."))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := _arr.Validate(); err != nil {
|
||||||
|
q.logger.Info().Msgf("Error validating arr: %v", err)
|
||||||
|
}
|
||||||
_, _ = w.Write([]byte("Ok."))
|
_, _ = w.Write([]byte("Ok."))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -137,7 +153,7 @@ func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
|
|||||||
category := ctx.Value("category").(string)
|
category := ctx.Value("category").(string)
|
||||||
filter := strings.Trim(r.URL.Query().Get("filter"), "")
|
filter := strings.Trim(r.URL.Query().Get("filter"), "")
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
torrents := q.Storage.GetAll(category, filter, hashes)
|
torrents := q.Storage.GetAllSorted(category, filter, hashes, "added_on", false)
|
||||||
request.JSONResponse(w, torrents, http.StatusOK)
|
request.JSONResponse(w, torrents, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,24 +2,25 @@ package qbit
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/service"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ImportRequest struct {
|
type ImportRequest struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
URI string `json:"uri"`
|
Magnet *utils.Magnet `json:"magnet"`
|
||||||
Arr *arr.Arr `json:"arr"`
|
Arr *arr.Arr `json:"arr"`
|
||||||
IsSymlink bool `json:"isSymlink"`
|
IsSymlink bool `json:"isSymlink"`
|
||||||
SeriesId int `json:"series"`
|
SeriesId int `json:"series"`
|
||||||
Seasons []int `json:"seasons"`
|
Seasons []int `json:"seasons"`
|
||||||
Episodes []string `json:"episodes"`
|
Episodes []string `json:"episodes"`
|
||||||
|
DownloadUncached bool `json:"downloadUncached"`
|
||||||
|
|
||||||
Failed bool `json:"failed"`
|
Failed bool `json:"failed"`
|
||||||
FailedAt time.Time `json:"failedAt"`
|
FailedAt time.Time `json:"failedAt"`
|
||||||
@@ -40,15 +41,16 @@ type ManualImportResponseSchema struct {
|
|||||||
Id int `json:"id"`
|
Id int `json:"id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewImportRequest(uri string, arr *arr.Arr, isSymlink bool) *ImportRequest {
|
func NewImportRequest(magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool) *ImportRequest {
|
||||||
return &ImportRequest{
|
return &ImportRequest{
|
||||||
ID: uuid.NewString(),
|
ID: uuid.NewString(),
|
||||||
URI: uri,
|
Magnet: magnet,
|
||||||
Arr: arr,
|
Arr: arr,
|
||||||
Failed: false,
|
Failed: false,
|
||||||
Completed: false,
|
Completed: false,
|
||||||
Async: false,
|
Async: false,
|
||||||
IsSymlink: isSymlink,
|
IsSymlink: isSymlink,
|
||||||
|
DownloadUncached: downloadUncached,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,17 +69,14 @@ func (i *ImportRequest) Process(q *QBit) (err error) {
|
|||||||
// Use this for now.
|
// Use this for now.
|
||||||
// This sends the torrent to the arr
|
// This sends the torrent to the arr
|
||||||
svc := service.GetService()
|
svc := service.GetService()
|
||||||
magnet, err := utils.GetMagnetFromUrl(i.URI)
|
torrent := createTorrentFromMagnet(i.Magnet, i.Arr.Name, "manual")
|
||||||
if err != nil {
|
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, i.Magnet, i.Arr, i.IsSymlink, i.DownloadUncached)
|
||||||
return fmt.Errorf("error parsing magnet link: %w", err)
|
|
||||||
}
|
|
||||||
torrent := CreateTorrentFromMagnet(magnet, i.Arr.Name, "manual")
|
|
||||||
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, magnet, i.Arr, i.IsSymlink)
|
|
||||||
if err != nil || debridTorrent == nil {
|
if err != nil || debridTorrent == nil {
|
||||||
fmt.Println("Error deleting torrent: ", err)
|
|
||||||
if debridTorrent != nil {
|
if debridTorrent != nil {
|
||||||
dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
||||||
go dbClient.DeleteTorrent(debridTorrent)
|
go func() {
|
||||||
|
_ = dbClient.DeleteTorrent(debridTorrent.Id)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = fmt.Errorf("failed to process torrent")
|
err = fmt.Errorf("failed to process torrent")
|
||||||
|
|||||||
@@ -2,33 +2,11 @@ package qbit
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
debrid "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.File, ready chan<- debrid.File) {
|
func createTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Torrent {
|
||||||
defer wg.Done()
|
|
||||||
ticker := time.NewTicker(1 * time.Second) // Check every second
|
|
||||||
defer ticker.Stop()
|
|
||||||
path := filepath.Join(dir, file.Path)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
_, err := os.Stat(path)
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
ready <- file
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func CreateTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Torrent {
|
|
||||||
torrent := &Torrent{
|
torrent := &Torrent{
|
||||||
ID: uuid.NewString(),
|
ID: uuid.NewString(),
|
||||||
Hash: strings.ToLower(magnet.InfoHash),
|
Hash: strings.ToLower(magnet.InfoHash),
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ package qbit
|
|||||||
import (
|
import (
|
||||||
"cmp"
|
"cmp"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
@@ -16,14 +16,14 @@ type QBit struct {
|
|||||||
DownloadFolder string `json:"download_folder"`
|
DownloadFolder string `json:"download_folder"`
|
||||||
Categories []string `json:"categories"`
|
Categories []string `json:"categories"`
|
||||||
Storage *TorrentStorage
|
Storage *TorrentStorage
|
||||||
debug bool
|
|
||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
Tags []string
|
Tags []string
|
||||||
RefreshInterval int
|
RefreshInterval int
|
||||||
|
SkipPreCache bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func New() *QBit {
|
func New() *QBit {
|
||||||
_cfg := config.GetConfig()
|
_cfg := config.Get()
|
||||||
cfg := _cfg.QBitTorrent
|
cfg := _cfg.QBitTorrent
|
||||||
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8282")
|
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8282")
|
||||||
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
|
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
|
||||||
@@ -34,7 +34,8 @@ func New() *QBit {
|
|||||||
DownloadFolder: cfg.DownloadFolder,
|
DownloadFolder: cfg.DownloadFolder,
|
||||||
Categories: cfg.Categories,
|
Categories: cfg.Categories,
|
||||||
Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")),
|
Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")),
|
||||||
logger: logger.NewLogger("qbit", cfg.LogLevel, os.Stdout),
|
logger: logger.New("qbit"),
|
||||||
RefreshInterval: refreshInterval,
|
RefreshInterval: refreshInterval,
|
||||||
|
SkipPreCache: cfg.SkipPreCache,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,10 +8,9 @@ import (
|
|||||||
func (q *QBit) Routes() http.Handler {
|
func (q *QBit) Routes() http.Handler {
|
||||||
r := chi.NewRouter()
|
r := chi.NewRouter()
|
||||||
r.Use(q.CategoryContext)
|
r.Use(q.CategoryContext)
|
||||||
r.Post("/auth/login", q.handleLogin)
|
|
||||||
|
|
||||||
r.Group(func(r chi.Router) {
|
r.Group(func(r chi.Router) {
|
||||||
r.Use(q.authContext)
|
r.Use(q.authContext)
|
||||||
|
r.Post("/auth/login", q.handleLogin)
|
||||||
r.Route("/torrents", func(r chi.Router) {
|
r.Route("/torrents", func(r chi.Router) {
|
||||||
r.Use(HashesCtx)
|
r.Use(HashesCtx)
|
||||||
r.Get("/info", q.handleTorrentsInfo)
|
r.Get("/info", q.handleTorrentsInfo)
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
package qbit
|
package qbit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
"os"
|
"os"
|
||||||
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -51,14 +52,24 @@ func (ts *TorrentStorage) Add(torrent *Torrent) {
|
|||||||
ts.mu.Lock()
|
ts.mu.Lock()
|
||||||
defer ts.mu.Unlock()
|
defer ts.mu.Unlock()
|
||||||
ts.torrents[keyPair(torrent.Hash, torrent.Category)] = torrent
|
ts.torrents[keyPair(torrent.Hash, torrent.Category)] = torrent
|
||||||
_ = ts.saveToFile()
|
go func() {
|
||||||
|
err := ts.saveToFile()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) AddOrUpdate(torrent *Torrent) {
|
func (ts *TorrentStorage) AddOrUpdate(torrent *Torrent) {
|
||||||
ts.mu.Lock()
|
ts.mu.Lock()
|
||||||
defer ts.mu.Unlock()
|
defer ts.mu.Unlock()
|
||||||
ts.torrents[keyPair(torrent.Hash, torrent.Category)] = torrent
|
ts.torrents[keyPair(torrent.Hash, torrent.Category)] = torrent
|
||||||
_ = ts.saveToFile()
|
go func() {
|
||||||
|
err := ts.saveToFile()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) Get(hash, category string) *Torrent {
|
func (ts *TorrentStorage) Get(hash, category string) *Torrent {
|
||||||
@@ -99,7 +110,46 @@ func (ts *TorrentStorage) GetAll(category string, filter string, hashes []string
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return filtered
|
torrents = filtered
|
||||||
|
}
|
||||||
|
return torrents
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *TorrentStorage) GetAllSorted(category string, filter string, hashes []string, sortBy string, ascending bool) []*Torrent {
|
||||||
|
torrents := ts.GetAll(category, filter, hashes)
|
||||||
|
if sortBy != "" {
|
||||||
|
sort.Slice(torrents, func(i, j int) bool {
|
||||||
|
// If ascending is false, swap i and j to get descending order
|
||||||
|
if !ascending {
|
||||||
|
i, j = j, i
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sortBy {
|
||||||
|
case "name":
|
||||||
|
return torrents[i].Name < torrents[j].Name
|
||||||
|
case "size":
|
||||||
|
return torrents[i].Size < torrents[j].Size
|
||||||
|
case "added_on":
|
||||||
|
return torrents[i].AddedOn < torrents[j].AddedOn
|
||||||
|
case "completed":
|
||||||
|
return torrents[i].Completed < torrents[j].Completed
|
||||||
|
case "progress":
|
||||||
|
return torrents[i].Progress < torrents[j].Progress
|
||||||
|
case "state":
|
||||||
|
return torrents[i].State < torrents[j].State
|
||||||
|
case "category":
|
||||||
|
return torrents[i].Category < torrents[j].Category
|
||||||
|
case "dlspeed":
|
||||||
|
return torrents[i].Dlspeed < torrents[j].Dlspeed
|
||||||
|
case "upspeed":
|
||||||
|
return torrents[i].Upspeed < torrents[j].Upspeed
|
||||||
|
case "ratio":
|
||||||
|
return torrents[i].Ratio < torrents[j].Ratio
|
||||||
|
default:
|
||||||
|
// Default sort by added_on
|
||||||
|
return torrents[i].AddedOn < torrents[j].AddedOn
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
return torrents
|
return torrents
|
||||||
}
|
}
|
||||||
@@ -108,7 +158,12 @@ func (ts *TorrentStorage) Update(torrent *Torrent) {
|
|||||||
ts.mu.Lock()
|
ts.mu.Lock()
|
||||||
defer ts.mu.Unlock()
|
defer ts.mu.Unlock()
|
||||||
ts.torrents[keyPair(torrent.Hash, torrent.Category)] = torrent
|
ts.torrents[keyPair(torrent.Hash, torrent.Category)] = torrent
|
||||||
_ = ts.saveToFile()
|
go func() {
|
||||||
|
err := ts.saveToFile()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) Delete(hash, category string) {
|
func (ts *TorrentStorage) Delete(hash, category string) {
|
||||||
@@ -127,6 +182,9 @@ func (ts *TorrentStorage) Delete(hash, category string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete(ts.torrents, key)
|
delete(ts.torrents, key)
|
||||||
|
if torrent == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
// Delete the torrent folder
|
// Delete the torrent folder
|
||||||
if torrent.ContentPath != "" {
|
if torrent.ContentPath != "" {
|
||||||
err := os.RemoveAll(torrent.ContentPath)
|
err := os.RemoveAll(torrent.ContentPath)
|
||||||
@@ -134,7 +192,12 @@ func (ts *TorrentStorage) Delete(hash, category string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ = ts.saveToFile()
|
go func() {
|
||||||
|
err := ts.saveToFile()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) DeleteMultiple(hashes []string) {
|
func (ts *TorrentStorage) DeleteMultiple(hashes []string) {
|
||||||
@@ -147,18 +210,23 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ = ts.saveToFile()
|
go func() {
|
||||||
|
err := ts.saveToFile()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) Save() error {
|
func (ts *TorrentStorage) Save() error {
|
||||||
ts.mu.RLock()
|
|
||||||
defer ts.mu.RUnlock()
|
|
||||||
return ts.saveToFile()
|
return ts.saveToFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
// saveToFile is a helper function to write the current state to the JSON file
|
// saveToFile is a helper function to write the current state to the JSON file
|
||||||
func (ts *TorrentStorage) saveToFile() error {
|
func (ts *TorrentStorage) saveToFile() error {
|
||||||
|
ts.mu.RLock()
|
||||||
data, err := json.MarshalIndent(ts.torrents, "", " ")
|
data, err := json.MarshalIndent(ts.torrents, "", " ")
|
||||||
|
ts.mu.RUnlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,11 +4,12 @@ import (
|
|||||||
"cmp"
|
"cmp"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
db "github.com/sirrobot01/debrid-blackhole/pkg/debrid"
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
debrid "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
db "github.com/sirrobot01/decypharr/pkg/debrid/debrid"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
debrid "github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/service"
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"os"
|
"os"
|
||||||
@@ -49,17 +50,19 @@ func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
|
|||||||
|
|
||||||
func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error {
|
func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error {
|
||||||
svc := service.GetService()
|
svc := service.GetService()
|
||||||
torrent := CreateTorrentFromMagnet(magnet, category, "auto")
|
torrent := createTorrentFromMagnet(magnet, category, "auto")
|
||||||
a, ok := ctx.Value("arr").(*arr.Arr)
|
a, ok := ctx.Value("arr").(*arr.Arr)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("arr not found in context")
|
return fmt.Errorf("arr not found in context")
|
||||||
}
|
}
|
||||||
isSymlink := ctx.Value("isSymlink").(bool)
|
isSymlink := ctx.Value("isSymlink").(bool)
|
||||||
debridTorrent, err := db.ProcessTorrent(svc.Debrid, magnet, a, isSymlink)
|
debridTorrent, err := db.ProcessTorrent(svc.Debrid, magnet, a, isSymlink, false)
|
||||||
if err != nil || debridTorrent == nil {
|
if err != nil || debridTorrent == nil {
|
||||||
if debridTorrent != nil {
|
if debridTorrent != nil {
|
||||||
dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
||||||
go dbClient.DeleteTorrent(debridTorrent)
|
go func() {
|
||||||
|
_ = dbClient.DeleteTorrent(debridTorrent.Id)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = fmt.Errorf("failed to process torrent")
|
err = fmt.Errorf("failed to process torrent")
|
||||||
@@ -73,46 +76,96 @@ func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *arr.Arr, isSymlink bool) {
|
func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *arr.Arr, isSymlink bool) {
|
||||||
debridClient := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
svc := service.GetService()
|
||||||
|
client := svc.Debrid.GetByName(debridTorrent.Debrid)
|
||||||
for debridTorrent.Status != "downloaded" {
|
for debridTorrent.Status != "downloaded" {
|
||||||
progress := debridTorrent.Progress
|
q.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress)
|
||||||
q.logger.Debug().Msgf("%s -> (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, progress)
|
dbT, err := client.CheckStatus(debridTorrent, isSymlink)
|
||||||
time.Sleep(10 * time.Second)
|
|
||||||
dbT, err := debridClient.CheckStatus(debridTorrent, isSymlink)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.logger.Error().Msgf("Error checking status: %v", err)
|
q.logger.Error().Msgf("Error checking status: %v", err)
|
||||||
go debridClient.DeleteTorrent(debridTorrent)
|
go func() {
|
||||||
|
err := client.DeleteTorrent(debridTorrent.Id)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Error().Msgf("Error deleting torrent: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
q.MarkAsFailed(torrent)
|
q.MarkAsFailed(torrent)
|
||||||
_ = arr.Refresh()
|
if err := arr.Refresh(); err != nil {
|
||||||
|
q.logger.Error().Msgf("Error refreshing arr: %v", err)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
debridTorrent = dbT
|
debridTorrent = dbT
|
||||||
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
||||||
|
|
||||||
|
// Exit the loop for downloading statuses to prevent memory buildup
|
||||||
|
if !slices.Contains(client.GetDownloadingStatus(), debridTorrent.Status) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(time.Duration(q.RefreshInterval) * time.Second)
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
torrentSymlinkPath string
|
torrentSymlinkPath string
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
debridTorrent.Arr = arr
|
debridTorrent.Arr = arr
|
||||||
|
|
||||||
|
// File is done downloading at this stage
|
||||||
|
|
||||||
|
// Check if debrid supports webdav by checking cache
|
||||||
if isSymlink {
|
if isSymlink {
|
||||||
|
cache, ok := svc.Debrid.Caches[debridTorrent.Debrid]
|
||||||
|
if ok {
|
||||||
|
q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
|
||||||
|
// Use webdav to download the file
|
||||||
|
if err := cache.AddTorrent(debridTorrent); err != nil {
|
||||||
|
q.logger.Error().Msgf("Error adding torrent to cache: %v", err)
|
||||||
|
q.MarkAsFailed(torrent)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
|
||||||
|
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
|
||||||
|
torrentSymlinkPath, err = q.createSymlinks(debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// User is using either zurg or debrid webdav
|
||||||
torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/
|
torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
torrentSymlinkPath, err = q.ProcessManualFile(torrent)
|
torrentSymlinkPath, err = q.ProcessManualFile(torrent)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.MarkAsFailed(torrent)
|
q.MarkAsFailed(torrent)
|
||||||
go debridClient.DeleteTorrent(debridTorrent)
|
go func() {
|
||||||
|
err := client.DeleteTorrent(debridTorrent.Id)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Error().Msgf("Error deleting torrent: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
q.logger.Info().Msgf("Error: %v", err)
|
q.logger.Info().Msgf("Error: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
torrent.TorrentPath = torrentSymlinkPath
|
torrent.TorrentPath = torrentSymlinkPath
|
||||||
q.UpdateTorrent(torrent, debridTorrent)
|
q.UpdateTorrent(torrent, debridTorrent)
|
||||||
_ = arr.Refresh()
|
go func() {
|
||||||
|
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
|
||||||
|
q.logger.Error().Msgf("Error sending discord message: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err := arr.Refresh(); err != nil {
|
||||||
|
q.logger.Error().Msgf("Error refreshing arr: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
|
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
|
||||||
t.State = "error"
|
t.State = "error"
|
||||||
q.Storage.AddOrUpdate(t)
|
q.Storage.AddOrUpdate(t)
|
||||||
|
go func() {
|
||||||
|
if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil {
|
||||||
|
q.logger.Error().Msgf("Error sending discord message: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,16 +213,12 @@ func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debrid.Torrent) *Torr
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
||||||
_db := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
|
||||||
if debridTorrent == nil && t.ID != "" {
|
|
||||||
debridTorrent, _ = _db.GetTorrent(t.ID)
|
|
||||||
}
|
|
||||||
if debridTorrent == nil {
|
if debridTorrent == nil {
|
||||||
q.logger.Info().Msgf("Torrent with ID %s not found in %s", t.ID, _db.GetName())
|
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
_db := service.GetDebrid().GetByName(debridTorrent.Debrid)
|
||||||
if debridTorrent.Status != "downloaded" {
|
if debridTorrent.Status != "downloaded" {
|
||||||
debridTorrent, _ = _db.GetTorrent(t.ID)
|
_ = _db.UpdateTorrent(debridTorrent)
|
||||||
}
|
}
|
||||||
t = q.UpdateTorrentMin(t, debridTorrent)
|
t = q.UpdateTorrentMin(t, debridTorrent)
|
||||||
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
|
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
|
||||||
@@ -180,7 +229,7 @@ func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent
|
|||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
ticker := time.NewTicker(2 * time.Second)
|
ticker := time.NewTicker(100 * time.Millisecond)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@@ -215,8 +264,8 @@ func (q *QBit) RefreshTorrent(t *Torrent) bool {
|
|||||||
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
|
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
|
||||||
return &TorrentProperties{
|
return &TorrentProperties{
|
||||||
AdditionDate: t.AddedOn,
|
AdditionDate: t.AddedOn,
|
||||||
Comment: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
Comment: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>",
|
||||||
CreatedBy: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
CreatedBy: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>",
|
||||||
CreationDate: t.AddedOn,
|
CreationDate: t.AddedOn,
|
||||||
DlLimit: -1,
|
DlLimit: -1,
|
||||||
UpLimit: -1,
|
UpLimit: -1,
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
package qbit
|
package qbit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
|
"fmt"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -173,7 +174,7 @@ type TorrentCategory struct {
|
|||||||
|
|
||||||
type Torrent struct {
|
type Torrent struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
DebridTorrent *torrent.Torrent `json:"-"`
|
DebridTorrent *types.Torrent `json:"-"`
|
||||||
Debrid string `json:"debrid"`
|
Debrid string `json:"debrid"`
|
||||||
TorrentPath string `json:"-"`
|
TorrentPath string `json:"-"`
|
||||||
|
|
||||||
@@ -230,6 +231,17 @@ func (t *Torrent) IsReady() bool {
|
|||||||
return t.AmountLeft <= 0 && t.TorrentPath != ""
|
return t.AmountLeft <= 0 && t.TorrentPath != ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) discordContext() string {
|
||||||
|
format := `
|
||||||
|
**Name:** %s
|
||||||
|
**Arr:** %s
|
||||||
|
**Hash:** %s
|
||||||
|
**MagnetURI:** %s
|
||||||
|
**Debrid:** %s
|
||||||
|
`
|
||||||
|
return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid)
|
||||||
|
}
|
||||||
|
|
||||||
type TorrentProperties struct {
|
type TorrentProperties struct {
|
||||||
AdditionDate int64 `json:"addition_date,omitempty"`
|
AdditionDate int64 `json:"addition_date,omitempty"`
|
||||||
Comment string `json:"comment,omitempty"`
|
Comment string `json:"comment,omitempty"`
|
||||||
|
|||||||
@@ -1,39 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) StartWorker(ctx context.Context) {
|
|
||||||
q.logger.Info().Msg("Qbit Worker started")
|
|
||||||
q.StartRefreshWorker(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) StartRefreshWorker(ctx context.Context) {
|
|
||||||
refreshCtx := context.WithValue(ctx, "worker", "refresh")
|
|
||||||
refreshTicker := time.NewTicker(time.Duration(q.RefreshInterval) * time.Second)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-refreshCtx.Done():
|
|
||||||
q.logger.Info().Msg("Qbit Refresh Worker stopped")
|
|
||||||
return
|
|
||||||
case <-refreshTicker.C:
|
|
||||||
torrents := q.Storage.GetAll("", "", nil)
|
|
||||||
if len(torrents) > 0 {
|
|
||||||
q.RefreshArrs()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) RefreshArrs() {
|
|
||||||
arrs := service.GetService().Arr
|
|
||||||
for _, arr := range arrs.GetAll() {
|
|
||||||
err := arr.Refresh()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,283 +0,0 @@
|
|||||||
package rclone
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/webdav"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Remote struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Url string `json:"url"`
|
|
||||||
MountPoint string `json:"mount_point"`
|
|
||||||
Flags map[string]string `json:"flags"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *Rclone) Config() string {
|
|
||||||
var content string
|
|
||||||
|
|
||||||
for _, remote := range rc.Remotes {
|
|
||||||
content += fmt.Sprintf("[%s]\n", remote.Name)
|
|
||||||
content += fmt.Sprintf("type = %s\n", remote.Type)
|
|
||||||
content += fmt.Sprintf("url = %s\n", remote.Url)
|
|
||||||
content += fmt.Sprintf("vendor = other\n")
|
|
||||||
|
|
||||||
for key, value := range remote.Flags {
|
|
||||||
content += fmt.Sprintf("%s = %s\n", key, value)
|
|
||||||
}
|
|
||||||
content += "\n\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
return content
|
|
||||||
}
|
|
||||||
|
|
||||||
type Rclone struct {
|
|
||||||
Remotes map[string]Remote `json:"remotes"`
|
|
||||||
logger zerolog.Logger
|
|
||||||
cmd *exec.Cmd
|
|
||||||
configPath string
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(webdav *webdav.WebDav) (*Rclone, error) {
|
|
||||||
// Check if rclone is installed
|
|
||||||
cfg := config.GetConfig()
|
|
||||||
configPath := fmt.Sprintf("%s/rclone.conf", cfg.Path)
|
|
||||||
|
|
||||||
if _, err := exec.LookPath("rclone"); err != nil {
|
|
||||||
return nil, fmt.Errorf("rclone is not installed: %w", err)
|
|
||||||
}
|
|
||||||
remotes := make(map[string]Remote)
|
|
||||||
for _, handler := range webdav.Handlers {
|
|
||||||
url := fmt.Sprintf("http://localhost:%s/webdav/%s/", cfg.QBitTorrent.Port, strings.ToLower(handler.Name))
|
|
||||||
rmt := Remote{
|
|
||||||
Type: "webdav",
|
|
||||||
Name: handler.Name,
|
|
||||||
Url: url,
|
|
||||||
MountPoint: filepath.Join("/mnt/rclone/", handler.Name),
|
|
||||||
Flags: map[string]string{},
|
|
||||||
}
|
|
||||||
remotes[handler.Name] = rmt
|
|
||||||
}
|
|
||||||
|
|
||||||
rc := &Rclone{
|
|
||||||
logger: logger.NewLogger("rclone", "info", os.Stdout),
|
|
||||||
Remotes: remotes,
|
|
||||||
configPath: configPath,
|
|
||||||
}
|
|
||||||
if err := rc.WriteConfig(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *Rclone) WriteConfig() error {
|
|
||||||
|
|
||||||
// Create config directory if it doesn't exist
|
|
||||||
configDir := filepath.Dir(rc.configPath)
|
|
||||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
|
||||||
return fmt.Errorf("failed to create config directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the config file
|
|
||||||
if err := os.WriteFile(rc.configPath, []byte(rc.Config()), 0600); err != nil {
|
|
||||||
return fmt.Errorf("failed to write config file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rc.logger.Info().Msgf("Wrote rclone config with %d remotes to %s", len(rc.Remotes), rc.configPath)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *Rclone) Start(ctx context.Context) error {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
errChan := make(chan error)
|
|
||||||
for _, remote := range rc.Remotes {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(remote Remote) {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := rc.Mount(ctx, &remote); err != nil {
|
|
||||||
rc.logger.Error().Err(err).Msgf("failed to mount %s", remote.Name)
|
|
||||||
select {
|
|
||||||
case errChan <- err:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}(remote)
|
|
||||||
}
|
|
||||||
return <-errChan
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *Rclone) testConnection(ctx context.Context, remote *Remote) error {
|
|
||||||
testArgs := []string{
|
|
||||||
"ls",
|
|
||||||
"--config", rc.configPath,
|
|
||||||
"--log-level", "DEBUG",
|
|
||||||
remote.Name + ":",
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "rclone", testArgs...)
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
rc.logger.Error().Err(err).Str("output", string(output)).Msg("Connection test failed")
|
|
||||||
return fmt.Errorf("connection test failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rc.logger.Info().Msg("Connection test successful")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *Rclone) Mount(ctx context.Context, remote *Remote) error {
|
|
||||||
// Ensure the mount point directory exists
|
|
||||||
if err := os.MkdirAll(remote.MountPoint, 0755); err != nil {
|
|
||||||
rc.logger.Info().Err(err).Msgf("failed to create mount point directory: %s", remote.MountPoint)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
//if err := rc.testConnection(ctx, remote); err != nil {
|
|
||||||
// return err
|
|
||||||
//}
|
|
||||||
|
|
||||||
// Basic arguments
|
|
||||||
args := []string{
|
|
||||||
"mount",
|
|
||||||
remote.Name + ":",
|
|
||||||
remote.MountPoint,
|
|
||||||
"--config", rc.configPath,
|
|
||||||
"--vfs-cache-mode", "full",
|
|
||||||
"--log-level", "DEBUG", // Keep this, remove -vv
|
|
||||||
"--allow-other", // Keep this
|
|
||||||
"--allow-root", // Add this
|
|
||||||
"--default-permissions", // Add this
|
|
||||||
"--vfs-cache-max-age", "24h",
|
|
||||||
"--timeout", "1m",
|
|
||||||
"--transfers", "4",
|
|
||||||
"--buffer-size", "32M",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add any additional flags
|
|
||||||
for key, value := range remote.Flags {
|
|
||||||
args = append(args, "--"+key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create command
|
|
||||||
rc.cmd = exec.CommandContext(ctx, "rclone", args...)
|
|
||||||
|
|
||||||
// Set up pipes for stdout and stderr
|
|
||||||
stdout, err := rc.cmd.StdoutPipe()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
stderr, err := rc.cmd.StderrPipe()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start the command
|
|
||||||
if err := rc.cmd.Start(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Channel to signal mount success
|
|
||||||
mountReady := make(chan bool)
|
|
||||||
mountError := make(chan error)
|
|
||||||
|
|
||||||
// Monitor stdout
|
|
||||||
go func() {
|
|
||||||
scanner := bufio.NewScanner(stdout)
|
|
||||||
for scanner.Scan() {
|
|
||||||
text := scanner.Text()
|
|
||||||
rc.logger.Info().Msg("stdout: " + text)
|
|
||||||
if strings.Contains(text, "Mount succeeded") {
|
|
||||||
mountReady <- true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Monitor stderr
|
|
||||||
go func() {
|
|
||||||
scanner := bufio.NewScanner(stderr)
|
|
||||||
for scanner.Scan() {
|
|
||||||
text := scanner.Text()
|
|
||||||
rc.logger.Info().Msg("stderr: " + text)
|
|
||||||
if strings.Contains(text, "error") {
|
|
||||||
mountError <- fmt.Errorf("mount error: %s", text)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait for mount with timeout
|
|
||||||
select {
|
|
||||||
case <-mountReady:
|
|
||||||
rc.logger.Info().Msgf("Successfully mounted %s at %s", remote.Name, remote.MountPoint)
|
|
||||||
return nil
|
|
||||||
case err := <-mountError:
|
|
||||||
err = rc.cmd.Process.Kill()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
case <-ctx.Done():
|
|
||||||
err := rc.cmd.Process.Kill()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return ctx.Err()
|
|
||||||
case <-time.After(30 * time.Second):
|
|
||||||
err := rc.cmd.Process.Kill()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return fmt.Errorf("mount timeout after 30 seconds")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *Rclone) Unmount(ctx context.Context, remote *Remote) error {
|
|
||||||
if rc.cmd != nil && rc.cmd.Process != nil {
|
|
||||||
// First try graceful shutdown
|
|
||||||
if err := rc.cmd.Process.Signal(os.Interrupt); err != nil {
|
|
||||||
rc.logger.Warn().Err(err).Msg("failed to send interrupt signal")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for a bit to allow graceful shutdown
|
|
||||||
done := make(chan error)
|
|
||||||
go func() {
|
|
||||||
done <- rc.cmd.Wait()
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case err := <-done:
|
|
||||||
if err != nil {
|
|
||||||
rc.logger.Warn().Err(err).Msg("process exited with error")
|
|
||||||
}
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
// Force kill if it doesn't shut down gracefully
|
|
||||||
if err := rc.cmd.Process.Kill(); err != nil {
|
|
||||||
rc.logger.Error().Err(err).Msg("failed to kill process")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use fusermount to ensure the mountpoint is unmounted
|
|
||||||
cmd := exec.CommandContext(ctx, "fusermount", "-u", remote.MountPoint)
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
rc.logger.Warn().Err(err).Msg("fusermount unmount failed")
|
|
||||||
// Don't return error here as the process might already be dead
|
|
||||||
}
|
|
||||||
|
|
||||||
rc.logger.Info().Msgf("Successfully unmounted %s", remote.MountPoint)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
159
pkg/repair/clean.go
Normal file
159
pkg/repair/clean.go
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
package repair
|
||||||
|
|
||||||
|
//func (r *Repair) clean(job *Job) error {
|
||||||
|
// // Create a new error group
|
||||||
|
// g, ctx := errgroup.WithContext(context.Background())
|
||||||
|
//
|
||||||
|
// uniqueItems := make(map[string]string)
|
||||||
|
// mu := sync.Mutex{}
|
||||||
|
//
|
||||||
|
// // Limit concurrent goroutines
|
||||||
|
// g.SetLimit(10)
|
||||||
|
//
|
||||||
|
// for _, a := range job.Arrs {
|
||||||
|
// a := a // Capture range variable
|
||||||
|
// g.Go(func() error {
|
||||||
|
// // Check if context was canceled
|
||||||
|
// select {
|
||||||
|
// case <-ctx.Done():
|
||||||
|
// return ctx.Err()
|
||||||
|
// default:
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// items, err := r.cleanArr(job, a, "")
|
||||||
|
// if err != nil {
|
||||||
|
// r.logger.Error().Err(err).Msgf("Error cleaning %s", a)
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Safely append the found items to the shared slice
|
||||||
|
// if len(items) > 0 {
|
||||||
|
// mu.Lock()
|
||||||
|
// for k, v := range items {
|
||||||
|
// uniqueItems[k] = v
|
||||||
|
// }
|
||||||
|
// mu.Unlock()
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// return nil
|
||||||
|
// })
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if err := g.Wait(); err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if len(uniqueItems) == 0 {
|
||||||
|
// job.CompletedAt = time.Now()
|
||||||
|
// job.Status = JobCompleted
|
||||||
|
//
|
||||||
|
// go func() {
|
||||||
|
// if err := request.SendDiscordMessage("repair_clean_complete", "success", job.discordContext()); err != nil {
|
||||||
|
// r.logger.Error().Msgf("Error sending discord message: %v", err)
|
||||||
|
// }
|
||||||
|
// }()
|
||||||
|
//
|
||||||
|
// return nil
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// cache := r.deb.Caches["realdebrid"]
|
||||||
|
// if cache == nil {
|
||||||
|
// return fmt.Errorf("cache not found")
|
||||||
|
// }
|
||||||
|
// torrents := cache.GetTorrents()
|
||||||
|
//
|
||||||
|
// dangling := make([]string, 0)
|
||||||
|
// for _, t := range torrents {
|
||||||
|
// if _, ok := uniqueItems[t.Name]; !ok {
|
||||||
|
// dangling = append(dangling, t.Id)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// r.logger.Info().Msgf("Found %d delapitated items", len(dangling))
|
||||||
|
//
|
||||||
|
// if len(dangling) == 0 {
|
||||||
|
// job.CompletedAt = time.Now()
|
||||||
|
// job.Status = JobCompleted
|
||||||
|
// return nil
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client := r.deb.Clients["realdebrid"]
|
||||||
|
// if client == nil {
|
||||||
|
// return fmt.Errorf("client not found")
|
||||||
|
// }
|
||||||
|
// for _, id := range dangling {
|
||||||
|
// err := client.DeleteTorrent(id)
|
||||||
|
// if err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// return nil
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//func (r *Repair) cleanArr(j *Job, _arr string, tmdbId string) (map[string]string, error) {
|
||||||
|
// uniqueItems := make(map[string]string)
|
||||||
|
// a := r.arrs.Get(_arr)
|
||||||
|
//
|
||||||
|
// r.logger.Info().Msgf("Starting repair for %s", a.Name)
|
||||||
|
// media, err := a.GetMedia(tmdbId)
|
||||||
|
// if err != nil {
|
||||||
|
// r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err)
|
||||||
|
// return uniqueItems, err
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Create a new error group
|
||||||
|
// g, ctx := errgroup.WithContext(context.Background())
|
||||||
|
//
|
||||||
|
// mu := sync.Mutex{}
|
||||||
|
//
|
||||||
|
// // Limit concurrent goroutines
|
||||||
|
// g.SetLimit(runtime.NumCPU() * 4)
|
||||||
|
//
|
||||||
|
// for _, m := range media {
|
||||||
|
// m := m // Create a new variable scoped to the loop iteration
|
||||||
|
// g.Go(func() error {
|
||||||
|
// // Check if context was canceled
|
||||||
|
// select {
|
||||||
|
// case <-ctx.Done():
|
||||||
|
// return ctx.Err()
|
||||||
|
// default:
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// u := r.getUniquePaths(m)
|
||||||
|
// for k, v := range u {
|
||||||
|
// mu.Lock()
|
||||||
|
// uniqueItems[k] = v
|
||||||
|
// mu.Unlock()
|
||||||
|
// }
|
||||||
|
// return nil
|
||||||
|
// })
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if err := g.Wait(); err != nil {
|
||||||
|
// return uniqueItems, err
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// r.logger.Info().Msgf("Repair completed for %s. %d unique items", a.Name, len(uniqueItems))
|
||||||
|
// return uniqueItems, nil
|
||||||
|
//}
|
||||||
|
|
||||||
|
//func (r *Repair) getUniquePaths(media arr.Content) map[string]string {
|
||||||
|
// // Use zurg setup to check file availability with zurg
|
||||||
|
// // This reduces bandwidth usage significantly
|
||||||
|
//
|
||||||
|
// uniqueParents := make(map[string]string)
|
||||||
|
// files := media.Files
|
||||||
|
// for _, file := range files {
|
||||||
|
// target := getSymlinkTarget(file.Path)
|
||||||
|
// if target != "" {
|
||||||
|
// file.IsSymlink = true
|
||||||
|
// dir, f := filepath.Split(target)
|
||||||
|
// parent := filepath.Base(filepath.Clean(dir))
|
||||||
|
// // Set target path folder/file.mkv
|
||||||
|
// file.TargetPath = f
|
||||||
|
// uniqueParents[parent] = target
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// return uniqueParents
|
||||||
|
//}
|
||||||
@@ -2,6 +2,7 @@ package repair
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -129,3 +130,20 @@ func checkFileStart(filePath string) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func collectFiles(media arr.Content) map[string][]arr.ContentFile {
|
||||||
|
uniqueParents := make(map[string][]arr.ContentFile)
|
||||||
|
files := media.Files
|
||||||
|
for _, file := range files {
|
||||||
|
target := getSymlinkTarget(file.Path)
|
||||||
|
if target != "" {
|
||||||
|
file.IsSymlink = true
|
||||||
|
dir, f := filepath.Split(target)
|
||||||
|
torrentNamePath := filepath.Clean(dir)
|
||||||
|
// Set target path folder/file.mkv
|
||||||
|
file.TargetPath = f
|
||||||
|
uniqueParents[torrentNamePath] = append(uniqueParents[torrentNamePath], file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return uniqueParents
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,77 +3,214 @@ package repair
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
"log"
|
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Repair struct {
|
type Repair struct {
|
||||||
Jobs []Job `json:"jobs"`
|
Jobs map[string]*Job
|
||||||
arrs *arr.Storage
|
arrs *arr.Storage
|
||||||
deb engine.Service
|
deb *debrid.Engine
|
||||||
duration time.Duration
|
duration time.Duration
|
||||||
runOnStart bool
|
runOnStart bool
|
||||||
ZurgURL string
|
ZurgURL string
|
||||||
IsZurg bool
|
IsZurg bool
|
||||||
|
useWebdav bool
|
||||||
|
autoProcess bool
|
||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
|
filename string
|
||||||
|
workers int
|
||||||
|
ctx context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(deb *engine.Engine, arrs *arr.Storage) *Repair {
|
func New(arrs *arr.Storage, engine *debrid.Engine) *Repair {
|
||||||
cfg := config.GetConfig()
|
cfg := config.Get()
|
||||||
duration, err := parseSchedule(cfg.Repair.Interval)
|
duration, err := parseSchedule(cfg.Repair.Interval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
duration = time.Hour * 24
|
duration = time.Hour * 24
|
||||||
}
|
}
|
||||||
|
workers := runtime.NumCPU() * 20
|
||||||
|
if cfg.Repair.Workers > 0 {
|
||||||
|
workers = cfg.Repair.Workers
|
||||||
|
}
|
||||||
r := &Repair{
|
r := &Repair{
|
||||||
arrs: arrs,
|
arrs: arrs,
|
||||||
deb: deb.Get(),
|
logger: logger.New("repair"),
|
||||||
logger: logger.NewLogger("repair", cfg.LogLevel, os.Stdout),
|
|
||||||
duration: duration,
|
duration: duration,
|
||||||
runOnStart: cfg.Repair.RunOnStart,
|
runOnStart: cfg.Repair.RunOnStart,
|
||||||
ZurgURL: cfg.Repair.ZurgURL,
|
ZurgURL: cfg.Repair.ZurgURL,
|
||||||
|
useWebdav: cfg.Repair.UseWebDav,
|
||||||
|
autoProcess: cfg.Repair.AutoProcess,
|
||||||
|
filename: filepath.Join(cfg.Path, "repair.json"),
|
||||||
|
deb: engine,
|
||||||
|
workers: workers,
|
||||||
|
ctx: context.Background(),
|
||||||
}
|
}
|
||||||
if r.ZurgURL != "" {
|
if r.ZurgURL != "" {
|
||||||
r.IsZurg = true
|
r.IsZurg = true
|
||||||
}
|
}
|
||||||
|
// Load jobs from file
|
||||||
|
r.loadFromFile()
|
||||||
|
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Repair) Start(ctx context.Context) error {
|
||||||
|
cfg := config.Get()
|
||||||
|
r.ctx = ctx
|
||||||
|
if r.runOnStart {
|
||||||
|
r.logger.Info().Msgf("Running initial repair")
|
||||||
|
go func() {
|
||||||
|
if err := r.AddJob([]string{}, []string{}, r.autoProcess, true); err != nil {
|
||||||
|
r.logger.Error().Err(err).Msg("Error running initial repair")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(r.duration)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
r.logger.Info().Msgf("Starting repair worker with %v interval", r.duration)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-r.ctx.Done():
|
||||||
|
r.logger.Info().Msg("Repair worker stopped")
|
||||||
|
return nil
|
||||||
|
case t := <-ticker.C:
|
||||||
|
r.logger.Info().Msgf("Running repair at %v", t.Format("15:04:05"))
|
||||||
|
if err := r.AddJob([]string{}, []string{}, r.autoProcess, true); err != nil {
|
||||||
|
r.logger.Error().Err(err).Msg("Error running repair")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If using time-of-day schedule, reset the ticker for next day
|
||||||
|
if strings.Contains(cfg.Repair.Interval, ":") {
|
||||||
|
ticker.Reset(r.duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.logger.Info().Msgf("Next scheduled repair at %v", t.Add(r.duration).Format("15:04:05"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type JobStatus string
|
||||||
|
|
||||||
|
const (
|
||||||
|
JobStarted JobStatus = "started"
|
||||||
|
JobPending JobStatus = "pending"
|
||||||
|
JobFailed JobStatus = "failed"
|
||||||
|
JobCompleted JobStatus = "completed"
|
||||||
|
JobProcessing JobStatus = "processing"
|
||||||
|
)
|
||||||
|
|
||||||
type Job struct {
|
type Job struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Arrs []*arr.Arr `json:"arrs"`
|
Arrs []string `json:"arrs"`
|
||||||
MediaIDs []string `json:"media_ids"`
|
MediaIDs []string `json:"media_ids"`
|
||||||
StartedAt time.Time `json:"created_at"`
|
StartedAt time.Time `json:"created_at"`
|
||||||
|
BrokenItems map[string][]arr.ContentFile `json:"broken_items"`
|
||||||
|
Status JobStatus `json:"status"`
|
||||||
CompletedAt time.Time `json:"finished_at"`
|
CompletedAt time.Time `json:"finished_at"`
|
||||||
FailedAt time.Time `json:"failed_at"`
|
FailedAt time.Time `json:"failed_at"`
|
||||||
|
AutoProcess bool `json:"auto_process"`
|
||||||
|
Recurrent bool `json:"recurrent"`
|
||||||
|
|
||||||
Error string `json:"error"`
|
Error string `json:"error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Repair) NewJob(arrs []*arr.Arr, mediaIDs []string) *Job {
|
func (j *Job) discordContext() string {
|
||||||
|
format := `
|
||||||
|
**ID**: %s
|
||||||
|
**Arrs**: %s
|
||||||
|
**Media IDs**: %s
|
||||||
|
**Status**: %s
|
||||||
|
**Started At**: %s
|
||||||
|
**Completed At**: %s
|
||||||
|
`
|
||||||
|
|
||||||
|
dateFmt := "2006-01-02 15:04:05"
|
||||||
|
|
||||||
|
return fmt.Sprintf(format, j.ID, strings.Join(j.Arrs, ","), strings.Join(j.MediaIDs, ", "), j.Status, j.StartedAt.Format(dateFmt), j.CompletedAt.Format(dateFmt))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repair) getArrs(arrNames []string) []string {
|
||||||
|
arrs := make([]string, 0)
|
||||||
|
if len(arrNames) == 0 {
|
||||||
|
// No specific arrs, get all
|
||||||
|
// Also check if any arrs are set to skip repair
|
||||||
|
_arrs := r.arrs.GetAll()
|
||||||
|
for _, a := range _arrs {
|
||||||
|
if a.SkipRepair {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
arrs = append(arrs, a.Name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, name := range arrNames {
|
||||||
|
a := r.arrs.Get(name)
|
||||||
|
if a == nil || a.Host == "" || a.Token == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
arrs = append(arrs, a.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return arrs
|
||||||
|
}
|
||||||
|
|
||||||
|
func jobKey(arrNames []string, mediaIDs []string) string {
|
||||||
|
return fmt.Sprintf("%s-%s", strings.Join(arrNames, ","), strings.Join(mediaIDs, ","))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repair) reset(j *Job) {
|
||||||
|
// Update job for rerun
|
||||||
|
j.Status = JobStarted
|
||||||
|
j.StartedAt = time.Now()
|
||||||
|
j.CompletedAt = time.Time{}
|
||||||
|
j.FailedAt = time.Time{}
|
||||||
|
j.BrokenItems = nil
|
||||||
|
j.Error = ""
|
||||||
|
if j.Recurrent || j.Arrs == nil {
|
||||||
|
j.Arrs = r.getArrs([]string{}) // Get new arrs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job {
|
||||||
|
arrs := r.getArrs(arrsNames)
|
||||||
return &Job{
|
return &Job{
|
||||||
ID: uuid.New().String(),
|
ID: uuid.New().String(),
|
||||||
Arrs: arrs,
|
Arrs: arrs,
|
||||||
MediaIDs: mediaIDs,
|
MediaIDs: mediaIDs,
|
||||||
StartedAt: time.Now(),
|
StartedAt: time.Now(),
|
||||||
|
Status: JobStarted,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Repair) PreRunChecks() error {
|
func (r *Repair) preRunChecks() error {
|
||||||
|
|
||||||
|
if r.useWebdav {
|
||||||
|
if len(r.deb.Caches) == 0 {
|
||||||
|
return fmt.Errorf("no caches found")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Check if zurg url is reachable
|
// Check if zurg url is reachable
|
||||||
if !r.IsZurg {
|
if !r.IsZurg {
|
||||||
return nil
|
return nil
|
||||||
@@ -90,136 +227,211 @@ func (r *Repair) PreRunChecks() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Repair) Repair(arrs []*arr.Arr, mediaIds []string) error {
|
func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess, recurrent bool) error {
|
||||||
|
key := jobKey(arrsNames, mediaIDs)
|
||||||
j := r.NewJob(arrs, mediaIds)
|
job, ok := r.Jobs[key]
|
||||||
|
if job != nil && job.Status == JobStarted {
|
||||||
if err := r.PreRunChecks(); err != nil {
|
return fmt.Errorf("job already running")
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
var wg sync.WaitGroup
|
if !ok {
|
||||||
errors := make(chan error)
|
job = r.newJob(arrsNames, mediaIDs)
|
||||||
for _, a := range j.Arrs {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(a *arr.Arr) {
|
|
||||||
defer wg.Done()
|
|
||||||
if len(j.MediaIDs) == 0 {
|
|
||||||
if err := r.RepairArr(a, ""); err != nil {
|
|
||||||
log.Printf("Error repairing %s: %v", a.Name, err)
|
|
||||||
errors <- err
|
|
||||||
}
|
}
|
||||||
} else {
|
job.AutoProcess = autoProcess
|
||||||
for _, id := range j.MediaIDs {
|
job.Recurrent = recurrent
|
||||||
if err := r.RepairArr(a, id); err != nil {
|
r.reset(job)
|
||||||
log.Printf("Error repairing %s: %v", a.Name, err)
|
r.Jobs[key] = job
|
||||||
errors <- err
|
go r.saveToFile()
|
||||||
|
go func() {
|
||||||
|
if err := r.repair(job); err != nil {
|
||||||
|
r.logger.Error().Err(err).Msg("Error running repair")
|
||||||
|
r.logger.Error().Err(err).Msg("Error running repair")
|
||||||
|
job.FailedAt = time.Now()
|
||||||
|
job.Error = err.Error()
|
||||||
|
job.Status = JobFailed
|
||||||
|
job.CompletedAt = time.Now()
|
||||||
}
|
}
|
||||||
}
|
}()
|
||||||
}
|
|
||||||
}(a)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
close(errors)
|
|
||||||
err := <-errors
|
|
||||||
if err != nil {
|
|
||||||
j.FailedAt = time.Now()
|
|
||||||
j.Error = err.Error()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
j.CompletedAt = time.Now()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Repair) Start(ctx context.Context) error {
|
func (r *Repair) repair(job *Job) error {
|
||||||
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
|
defer r.saveToFile()
|
||||||
defer stop()
|
if err := r.preRunChecks(); err != nil {
|
||||||
cfg := config.GetConfig()
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if r.runOnStart {
|
// Use a mutex to protect concurrent access to brokenItems
|
||||||
r.logger.Info().Msgf("Running initial repair")
|
var mu sync.Mutex
|
||||||
|
brokenItems := map[string][]arr.ContentFile{}
|
||||||
|
g, ctx := errgroup.WithContext(r.ctx)
|
||||||
|
|
||||||
|
for _, a := range job.Arrs {
|
||||||
|
a := a // Capture range variable
|
||||||
|
g.Go(func() error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
var items []arr.ContentFile
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if len(job.MediaIDs) == 0 {
|
||||||
|
items, err = r.repairArr(job, a, "")
|
||||||
|
if err != nil {
|
||||||
|
r.logger.Error().Err(err).Msgf("Error repairing %s", a)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, id := range job.MediaIDs {
|
||||||
|
someItems, err := r.repairArr(job, a, id)
|
||||||
|
if err != nil {
|
||||||
|
r.logger.Error().Err(err).Msgf("Error repairing %s with ID %s", a, id)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
items = append(items, someItems...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Safely append the found items to the shared slice
|
||||||
|
if len(items) > 0 {
|
||||||
|
mu.Lock()
|
||||||
|
brokenItems[a] = items
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all goroutines to complete and check for errors
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
job.FailedAt = time.Now()
|
||||||
|
job.Error = err.Error()
|
||||||
|
job.Status = JobFailed
|
||||||
|
job.CompletedAt = time.Now()
|
||||||
go func() {
|
go func() {
|
||||||
if err := r.Repair(r.arrs.GetAll(), []string{}); err != nil {
|
if err := request.SendDiscordMessage("repair_failed", "error", job.discordContext()); err != nil {
|
||||||
r.logger.Info().Msgf("Error during initial repair: %v", err)
|
r.logger.Error().Msgf("Error sending discord message: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(brokenItems) == 0 {
|
||||||
|
job.CompletedAt = time.Now()
|
||||||
|
job.Status = JobCompleted
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := request.SendDiscordMessage("repair_complete", "success", job.discordContext()); err != nil {
|
||||||
|
r.logger.Error().Msgf("Error sending discord message: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
job.BrokenItems = brokenItems
|
||||||
|
if job.AutoProcess {
|
||||||
|
// Job is already processed
|
||||||
|
job.CompletedAt = time.Now() // Mark as completed
|
||||||
|
job.Status = JobCompleted
|
||||||
|
go func() {
|
||||||
|
if err := request.SendDiscordMessage("repair_complete", "success", job.discordContext()); err != nil {
|
||||||
|
r.logger.Error().Msgf("Error sending discord message: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
} else {
|
||||||
|
job.Status = JobPending
|
||||||
|
go func() {
|
||||||
|
if err := request.SendDiscordMessage("repair_pending", "pending", job.discordContext()); err != nil {
|
||||||
|
r.logger.Error().Msgf("Error sending discord message: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
ticker := time.NewTicker(r.duration)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
r.logger.Info().Msgf("Starting repair worker with %v interval", r.duration)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
r.logger.Info().Msg("Repair worker stopped")
|
|
||||||
return nil
|
return nil
|
||||||
case t := <-ticker.C:
|
|
||||||
r.logger.Info().Msgf("Running repair at %v", t.Format("15:04:05"))
|
|
||||||
err := r.Repair(r.arrs.GetAll(), []string{})
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Info().Msgf("Error during repair: %v", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If using time-of-day schedule, reset the ticker for next day
|
func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) {
|
||||||
if strings.Contains(cfg.Repair.Interval, ":") {
|
brokenItems := make([]arr.ContentFile, 0)
|
||||||
ticker.Reset(r.duration)
|
a := r.arrs.Get(_arr)
|
||||||
}
|
|
||||||
|
|
||||||
r.logger.Info().Msgf("Next scheduled repair at %v", t.Add(r.duration).Format("15:04:05"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Repair) RepairArr(a *arr.Arr, tmdbId string) error {
|
|
||||||
|
|
||||||
cfg := config.GetConfig()
|
|
||||||
|
|
||||||
r.logger.Info().Msgf("Starting repair for %s", a.Name)
|
r.logger.Info().Msgf("Starting repair for %s", a.Name)
|
||||||
media, err := a.GetMedia(tmdbId)
|
media, err := a.GetMedia(tmdbId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.logger.Info().Msgf("Failed to get %s media: %v", a.Type, err)
|
r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err)
|
||||||
return err
|
return brokenItems, err
|
||||||
}
|
}
|
||||||
r.logger.Info().Msgf("Found %d %s media", len(media), a.Type)
|
r.logger.Info().Msgf("Found %d %s media", len(media), a.Name)
|
||||||
|
|
||||||
if len(media) == 0 {
|
if len(media) == 0 {
|
||||||
r.logger.Info().Msgf("No %s media found", a.Type)
|
r.logger.Info().Msgf("No %s media found", a.Name)
|
||||||
return nil
|
return brokenItems, nil
|
||||||
}
|
}
|
||||||
// Check first media to confirm mounts are accessible
|
// Check first media to confirm mounts are accessible
|
||||||
if !r.isMediaAccessible(media[0]) {
|
if !r.isMediaAccessible(media[0]) {
|
||||||
r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts")
|
r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts")
|
||||||
return nil
|
return brokenItems, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore := make(chan struct{}, runtime.NumCPU()*4)
|
// Mutex for brokenItems
|
||||||
totalBrokenItems := 0
|
var mu sync.Mutex
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for _, m := range media {
|
workerChan := make(chan arr.Content, min(len(media), r.workers))
|
||||||
|
|
||||||
|
for i := 0; i < r.workers; i++ {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
semaphore <- struct{}{}
|
go func() {
|
||||||
go func(m arr.Content) {
|
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer func() { <-semaphore }()
|
for m := range workerChan {
|
||||||
brokenItems := r.getBrokenFiles(m)
|
select {
|
||||||
if brokenItems != nil {
|
case <-r.ctx.Done():
|
||||||
r.logger.Debug().Msgf("Found %d broken files for %s", len(brokenItems), m.Title)
|
return
|
||||||
if !cfg.Repair.SkipDeletion {
|
default:
|
||||||
if err := a.DeleteFiles(brokenItems); err != nil {
|
}
|
||||||
r.logger.Info().Msgf("Failed to delete broken items for %s: %v", m.Title, err)
|
items := r.getBrokenFiles(m)
|
||||||
|
if items != nil {
|
||||||
|
r.logger.Debug().Msgf("Found %d broken files for %s", len(items), m.Title)
|
||||||
|
if j.AutoProcess {
|
||||||
|
r.logger.Info().Msgf("Auto processing %d broken items for %s", len(items), m.Title)
|
||||||
|
|
||||||
|
// Delete broken items
|
||||||
|
if err := a.DeleteFiles(items); err != nil {
|
||||||
|
r.logger.Debug().Msgf("Failed to delete broken items for %s: %v", m.Title, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search for missing items
|
||||||
|
if err := a.SearchMissing(items); err != nil {
|
||||||
|
r.logger.Debug().Msgf("Failed to search missing items for %s: %v", m.Title, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := a.SearchMissing(brokenItems); err != nil {
|
|
||||||
r.logger.Info().Msgf("Failed to search missing items for %s: %v", m.Title, err)
|
mu.Lock()
|
||||||
|
brokenItems = append(brokenItems, items...)
|
||||||
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
totalBrokenItems += len(brokenItems)
|
|
||||||
}
|
}
|
||||||
}(m)
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, m := range media {
|
||||||
|
select {
|
||||||
|
case <-r.ctx.Done():
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
workerChan <- m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
close(workerChan)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
r.logger.Info().Msgf("Repair completed for %s. %d broken items found", a.Name, totalBrokenItems)
|
if len(brokenItems) == 0 {
|
||||||
return nil
|
r.logger.Info().Msgf("No broken items found for %s", a.Name)
|
||||||
|
return brokenItems, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.logger.Info().Msgf("Repair completed for %s. %d broken items found", a.Name, len(brokenItems))
|
||||||
|
return brokenItems, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Repair) isMediaAccessible(m arr.Content) bool {
|
func (r *Repair) isMediaAccessible(m arr.Content) bool {
|
||||||
@@ -229,9 +441,10 @@ func (r *Repair) isMediaAccessible(m arr.Content) bool {
|
|||||||
}
|
}
|
||||||
firstFile := files[0]
|
firstFile := files[0]
|
||||||
r.logger.Debug().Msgf("Checking parent directory for %s", firstFile.Path)
|
r.logger.Debug().Msgf("Checking parent directory for %s", firstFile.Path)
|
||||||
if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) {
|
//if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) {
|
||||||
return false
|
// r.logger.Debug().Msgf("Parent directory not accessible for %s", firstFile.Path)
|
||||||
}
|
// return false
|
||||||
|
//}
|
||||||
// Check symlink parent directory
|
// Check symlink parent directory
|
||||||
symlinkPath := getSymlinkTarget(firstFile.Path)
|
symlinkPath := getSymlinkTarget(firstFile.Path)
|
||||||
|
|
||||||
@@ -248,7 +461,9 @@ func (r *Repair) isMediaAccessible(m arr.Content) bool {
|
|||||||
|
|
||||||
func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile {
|
func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile {
|
||||||
|
|
||||||
if r.IsZurg {
|
if r.useWebdav {
|
||||||
|
return r.getWebdavBrokenFiles(media)
|
||||||
|
} else if r.IsZurg {
|
||||||
return r.getZurgBrokenFiles(media)
|
return r.getZurgBrokenFiles(media)
|
||||||
} else {
|
} else {
|
||||||
return r.getFileBrokenFiles(media)
|
return r.getFileBrokenFiles(media)
|
||||||
@@ -260,17 +475,7 @@ func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile {
|
|||||||
|
|
||||||
brokenFiles := make([]arr.ContentFile, 0)
|
brokenFiles := make([]arr.ContentFile, 0)
|
||||||
|
|
||||||
uniqueParents := make(map[string][]arr.ContentFile)
|
uniqueParents := collectFiles(media)
|
||||||
files := media.Files
|
|
||||||
for _, file := range files {
|
|
||||||
target := getSymlinkTarget(file.Path)
|
|
||||||
if target != "" {
|
|
||||||
file.IsSymlink = true
|
|
||||||
dir, _ := filepath.Split(target)
|
|
||||||
parent := filepath.Base(filepath.Clean(dir))
|
|
||||||
uniqueParents[parent] = append(uniqueParents[parent], file)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for parent, f := range uniqueParents {
|
for parent, f := range uniqueParents {
|
||||||
// Check stat
|
// Check stat
|
||||||
@@ -296,25 +501,21 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
|
|||||||
// This reduces bandwidth usage significantly
|
// This reduces bandwidth usage significantly
|
||||||
|
|
||||||
brokenFiles := make([]arr.ContentFile, 0)
|
brokenFiles := make([]arr.ContentFile, 0)
|
||||||
uniqueParents := make(map[string][]arr.ContentFile)
|
uniqueParents := collectFiles(media)
|
||||||
files := media.Files
|
tr := &http.Transport{
|
||||||
for _, file := range files {
|
TLSHandshakeTimeout: 60 * time.Second,
|
||||||
target := getSymlinkTarget(file.Path)
|
DialContext: (&net.Dialer{
|
||||||
if target != "" {
|
Timeout: 20 * time.Second,
|
||||||
file.IsSymlink = true
|
KeepAlive: 30 * time.Second,
|
||||||
dir, f := filepath.Split(target)
|
}).DialContext,
|
||||||
parent := filepath.Base(filepath.Clean(dir))
|
|
||||||
// Set target path folder/file.mkv
|
|
||||||
file.TargetPath = f
|
|
||||||
uniqueParents[parent] = append(uniqueParents[parent], file)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
client := request.New(request.WithTimeout(0), request.WithTransport(tr))
|
||||||
// Access zurg url + symlink folder + first file(encoded)
|
// Access zurg url + symlink folder + first file(encoded)
|
||||||
for parent, f := range uniqueParents {
|
for parent, f := range uniqueParents {
|
||||||
r.logger.Debug().Msgf("Checking %s", parent)
|
r.logger.Debug().Msgf("Checking %s", parent)
|
||||||
encodedParent := url.PathEscape(parent)
|
torrentName := url.PathEscape(filepath.Base(parent))
|
||||||
encodedFile := url.PathEscape(f[0].TargetPath)
|
encodedFile := url.PathEscape(f[0].TargetPath)
|
||||||
fullURL := fmt.Sprintf("%s/http/__all__/%s/%s", r.ZurgURL, encodedParent, encodedFile)
|
fullURL := fmt.Sprintf("%s/http/__all__/%s/%s", r.ZurgURL, torrentName, encodedFile)
|
||||||
// Check file stat first
|
// Check file stat first
|
||||||
if _, err := os.Stat(f[0].Path); os.IsNotExist(err) {
|
if _, err := os.Stat(f[0].Path); os.IsNotExist(err) {
|
||||||
r.logger.Debug().Msgf("Broken symlink found: %s", fullURL)
|
r.logger.Debug().Msgf("Broken symlink found: %s", fullURL)
|
||||||
@@ -322,21 +523,25 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := http.Get(fullURL)
|
resp, err := client.Get(fullURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.logger.Debug().Err(err).Msgf("Failed to reach %s", fullURL)
|
r.logger.Debug().Err(err).Msgf("Failed to reach %s", fullURL)
|
||||||
brokenFiles = append(brokenFiles, f...)
|
brokenFiles = append(brokenFiles, f...)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
resp.Body.Close()
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||||
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
|
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
|
||||||
|
resp.Body.Close()
|
||||||
brokenFiles = append(brokenFiles, f...)
|
brokenFiles = append(brokenFiles, f...)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
downloadUrl := resp.Request.URL.String()
|
downloadUrl := resp.Request.URL.String()
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
if downloadUrl != "" {
|
if downloadUrl != "" {
|
||||||
r.logger.Debug().Msgf("Found download url: %s", downloadUrl)
|
r.logger.Trace().Msgf("Found download url: %s", downloadUrl)
|
||||||
} else {
|
} else {
|
||||||
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
|
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
|
||||||
brokenFiles = append(brokenFiles, f...)
|
brokenFiles = append(brokenFiles, f...)
|
||||||
@@ -350,3 +555,226 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
|
|||||||
r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
|
r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
|
||||||
return brokenFiles
|
return brokenFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile {
|
||||||
|
// Use internal webdav setup to check file availability
|
||||||
|
|
||||||
|
caches := r.deb.Caches
|
||||||
|
if len(caches) == 0 {
|
||||||
|
r.logger.Info().Msg("No caches found. Can't use webdav")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
clients := r.deb.Clients
|
||||||
|
if len(clients) == 0 {
|
||||||
|
r.logger.Info().Msg("No clients found. Can't use webdav")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
brokenFiles := make([]arr.ContentFile, 0)
|
||||||
|
uniqueParents := collectFiles(media)
|
||||||
|
// Access zurg url + symlink folder + first file(encoded)
|
||||||
|
for torrentPath, f := range uniqueParents {
|
||||||
|
r.logger.Debug().Msgf("Checking %s", torrentPath)
|
||||||
|
// Get the debrid first
|
||||||
|
dir := filepath.Dir(torrentPath)
|
||||||
|
debridName := ""
|
||||||
|
for _, client := range clients {
|
||||||
|
mountPath := client.GetMountPath()
|
||||||
|
if mountPath == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if filepath.Clean(mountPath) == filepath.Clean(dir) {
|
||||||
|
debridName = client.GetName()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if debridName == "" {
|
||||||
|
r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cache, ok := caches[debridName]
|
||||||
|
if !ok {
|
||||||
|
r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Check if torrent exists
|
||||||
|
torrentName := filepath.Clean(filepath.Base(torrentPath))
|
||||||
|
torrent := cache.GetTorrentByName(torrentName)
|
||||||
|
if torrent == nil {
|
||||||
|
r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
files := make([]string, 0)
|
||||||
|
for _, file := range f {
|
||||||
|
files = append(files, file.TargetPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.IsTorrentBroken(torrent, files) {
|
||||||
|
r.logger.Debug().Msgf("[webdav] Broken symlink found: %s", torrentPath)
|
||||||
|
// Delete the torrent?
|
||||||
|
brokenFiles = append(brokenFiles, f...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
if len(brokenFiles) == 0 {
|
||||||
|
r.logger.Debug().Msgf("No broken files found for %s", media.Title)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
|
||||||
|
return brokenFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repair) GetJob(id string) *Job {
|
||||||
|
for _, job := range r.Jobs {
|
||||||
|
if job.ID == id {
|
||||||
|
return job
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repair) GetJobs() []*Job {
|
||||||
|
jobs := make([]*Job, 0)
|
||||||
|
for _, job := range r.Jobs {
|
||||||
|
jobs = append(jobs, job)
|
||||||
|
}
|
||||||
|
sort.Slice(jobs, func(i, j int) bool {
|
||||||
|
return jobs[i].StartedAt.After(jobs[j].StartedAt)
|
||||||
|
})
|
||||||
|
|
||||||
|
return jobs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repair) ProcessJob(id string) error {
|
||||||
|
job := r.GetJob(id)
|
||||||
|
if job == nil {
|
||||||
|
return fmt.Errorf("job %s not found", id)
|
||||||
|
}
|
||||||
|
// All validation checks remain the same
|
||||||
|
if job.Status != JobPending {
|
||||||
|
return fmt.Errorf("job %s not pending", id)
|
||||||
|
}
|
||||||
|
if job.StartedAt.IsZero() {
|
||||||
|
return fmt.Errorf("job %s not started", id)
|
||||||
|
}
|
||||||
|
if !job.CompletedAt.IsZero() {
|
||||||
|
return fmt.Errorf("job %s already completed", id)
|
||||||
|
}
|
||||||
|
if !job.FailedAt.IsZero() {
|
||||||
|
return fmt.Errorf("job %s already failed", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
brokenItems := job.BrokenItems
|
||||||
|
if len(brokenItems) == 0 {
|
||||||
|
r.logger.Info().Msgf("No broken items found for job %s", id)
|
||||||
|
job.CompletedAt = time.Now()
|
||||||
|
job.Status = JobCompleted
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
g, ctx := errgroup.WithContext(r.ctx)
|
||||||
|
g.SetLimit(r.workers)
|
||||||
|
|
||||||
|
for arrName, items := range brokenItems {
|
||||||
|
items := items
|
||||||
|
arrName := arrName
|
||||||
|
g.Go(func() error {
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
a := r.arrs.Get(arrName)
|
||||||
|
if a == nil {
|
||||||
|
r.logger.Error().Msgf("Arr %s not found", arrName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := a.DeleteFiles(items); err != nil {
|
||||||
|
r.logger.Error().Err(err).Msgf("Failed to delete broken items for %s", arrName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Search for missing items
|
||||||
|
if err := a.SearchMissing(items); err != nil {
|
||||||
|
r.logger.Error().Err(err).Msgf("Failed to search missing items for %s", arrName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update job status to in-progress
|
||||||
|
job.Status = JobProcessing
|
||||||
|
r.saveToFile()
|
||||||
|
|
||||||
|
// Launch a goroutine to wait for completion and update the job
|
||||||
|
go func() {
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
job.FailedAt = time.Now()
|
||||||
|
job.Error = err.Error()
|
||||||
|
job.CompletedAt = time.Now()
|
||||||
|
job.Status = JobFailed
|
||||||
|
r.logger.Error().Err(err).Msgf("Job %s failed", id)
|
||||||
|
} else {
|
||||||
|
job.CompletedAt = time.Now()
|
||||||
|
job.Status = JobCompleted
|
||||||
|
r.logger.Info().Msgf("Job %s completed successfully", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.saveToFile()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repair) saveToFile() {
|
||||||
|
// Save jobs to file
|
||||||
|
data, err := json.Marshal(r.Jobs)
|
||||||
|
if err != nil {
|
||||||
|
r.logger.Debug().Err(err).Msg("Failed to marshal jobs")
|
||||||
|
}
|
||||||
|
_ = os.WriteFile(r.filename, data, 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repair) loadFromFile() {
|
||||||
|
data, err := os.ReadFile(r.filename)
|
||||||
|
if err != nil && os.IsNotExist(err) {
|
||||||
|
r.Jobs = make(map[string]*Job)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_jobs := make(map[string]*Job)
|
||||||
|
err = json.Unmarshal(data, &_jobs)
|
||||||
|
if err != nil {
|
||||||
|
r.logger.Trace().Err(err).Msg("Failed to unmarshal jobs; resetting")
|
||||||
|
r.Jobs = make(map[string]*Job)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
jobs := make(map[string]*Job)
|
||||||
|
for k, v := range _jobs {
|
||||||
|
if v.Status != JobPending {
|
||||||
|
// Skip jobs that are not pending processing due to reboot
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
jobs[k] = v
|
||||||
|
}
|
||||||
|
r.Jobs = jobs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Repair) DeleteJobs(ids []string) {
|
||||||
|
for _, id := range ids {
|
||||||
|
if id == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for k, job := range r.Jobs {
|
||||||
|
if job.ID == id {
|
||||||
|
delete(r.Jobs, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
go r.saveToFile()
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,13 +6,15 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
"github.com/go-chi/chi/v5/middleware"
|
"github.com/go-chi/chi/v5/middleware"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"runtime"
|
||||||
"syscall"
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -22,8 +24,7 @@ type Server struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func New() *Server {
|
func New() *Server {
|
||||||
cfg := config.GetConfig()
|
l := logger.New("http")
|
||||||
l := logger.NewLogger("http", cfg.QBitTorrent.LogLevel, os.Stdout)
|
|
||||||
r := chi.NewRouter()
|
r := chi.NewRouter()
|
||||||
r.Use(middleware.Recoverer)
|
r.Use(middleware.Recoverer)
|
||||||
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
|
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
|
||||||
@@ -35,11 +36,16 @@ func New() *Server {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) Start(ctx context.Context) error {
|
func (s *Server) Start(ctx context.Context) error {
|
||||||
cfg := config.GetConfig()
|
cfg := config.Get()
|
||||||
// Register routes
|
// Register routes
|
||||||
|
// Register webhooks
|
||||||
|
s.router.Post("/webhooks/tautulli", s.handleTautulli)
|
||||||
|
|
||||||
|
// Register logs
|
||||||
s.router.Get("/logs", s.getLogs)
|
s.router.Get("/logs", s.getLogs)
|
||||||
|
s.router.Get("/stats", s.getStats)
|
||||||
port := fmt.Sprintf(":%s", cfg.QBitTorrent.Port)
|
port := fmt.Sprintf(":%s", cfg.QBitTorrent.Port)
|
||||||
s.logger.Info().Msgf("Starting server on %s", port)
|
s.logger.Info().Msgf("Server started on %s", port)
|
||||||
srv := &http.Server{
|
srv := &http.Server{
|
||||||
Addr: port,
|
Addr: port,
|
||||||
Handler: s.router,
|
Handler: s.router,
|
||||||
@@ -99,3 +105,29 @@ func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) getStats(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var memStats runtime.MemStats
|
||||||
|
runtime.ReadMemStats(&memStats)
|
||||||
|
|
||||||
|
stats := map[string]interface{}{
|
||||||
|
// Memory stats
|
||||||
|
"heap_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.HeapAlloc)/1024/1024),
|
||||||
|
"total_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.TotalAlloc)/1024/1024),
|
||||||
|
"sys_mb": fmt.Sprintf("%.2fMB", float64(memStats.Sys)/1024/1024),
|
||||||
|
|
||||||
|
// GC stats
|
||||||
|
"gc_cycles": memStats.NumGC,
|
||||||
|
// Goroutine stats
|
||||||
|
"goroutines": runtime.NumGoroutine(),
|
||||||
|
|
||||||
|
// System info
|
||||||
|
"num_cpu": runtime.NumCPU(),
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
if err := json.NewEncoder(w).Encode(stats); err != nil {
|
||||||
|
s.logger.Error().Err(err).Msg("Failed to encode stats")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
54
pkg/server/webhook.go
Normal file
54
pkg/server/webhook.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/service"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Verify it's a POST request
|
||||||
|
if r.Method != http.MethodPost {
|
||||||
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the JSON body from Tautulli
|
||||||
|
var payload struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
TvdbID string `json:"tvdb_id"`
|
||||||
|
TmdbID string `json:"tmdb_id"`
|
||||||
|
Topic string `json:"topic"`
|
||||||
|
AutoProcess bool `json:"autoProcess"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
|
||||||
|
s.logger.Error().Err(err).Msg("Failed to parse webhook body")
|
||||||
|
http.Error(w, "Failed to parse webhook body: "+err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if payload.Topic != "tautulli" {
|
||||||
|
http.Error(w, "Invalid topic", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if payload.TmdbID == "" && payload.TvdbID == "" {
|
||||||
|
http.Error(w, "Invalid ID", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
svc := service.GetService()
|
||||||
|
repair := svc.Repair
|
||||||
|
|
||||||
|
mediaId := cmp.Or(payload.TmdbID, payload.TvdbID)
|
||||||
|
|
||||||
|
if repair == nil {
|
||||||
|
http.Error(w, "Repair service is not enabled", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := repair.AddJob([]string{}, []string{mediaId}, payload.AutoProcess, false); err != nil {
|
||||||
|
http.Error(w, "Failed to add job: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,17 +1,16 @@
|
|||||||
package service
|
package service
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
|
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
|
"github.com/sirrobot01/decypharr/pkg/repair"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/repair"
|
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Service struct {
|
type Service struct {
|
||||||
Repair *repair.Repair
|
Repair *repair.Repair
|
||||||
Arr *arr.Storage
|
Arr *arr.Storage
|
||||||
Debrid *engine.Engine
|
Debrid *debrid.Engine
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -22,9 +21,9 @@ var (
|
|||||||
func New() *Service {
|
func New() *Service {
|
||||||
once.Do(func() {
|
once.Do(func() {
|
||||||
arrs := arr.NewStorage()
|
arrs := arr.NewStorage()
|
||||||
deb := debrid.New()
|
deb := debrid.NewEngine()
|
||||||
instance = &Service{
|
instance = &Service{
|
||||||
Repair: repair.New(deb, arrs),
|
Repair: repair.New(arrs, deb),
|
||||||
Arr: arrs,
|
Arr: arrs,
|
||||||
Debrid: deb,
|
Debrid: deb,
|
||||||
}
|
}
|
||||||
@@ -42,15 +41,15 @@ func GetService() *Service {
|
|||||||
|
|
||||||
func Update() *Service {
|
func Update() *Service {
|
||||||
arrs := arr.NewStorage()
|
arrs := arr.NewStorage()
|
||||||
deb := debrid.New()
|
deb := debrid.NewEngine()
|
||||||
instance = &Service{
|
instance = &Service{
|
||||||
Repair: repair.New(deb, arrs),
|
Repair: repair.New(arrs, deb),
|
||||||
Arr: arrs,
|
Arr: arrs,
|
||||||
Debrid: deb,
|
Debrid: deb,
|
||||||
}
|
}
|
||||||
return instance
|
return instance
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDebrid() *engine.Engine {
|
func GetDebrid() *debrid.Engine {
|
||||||
return GetService().Debrid
|
return GetService().Debrid
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,6 +23,9 @@ func (ui *Handler) Routes() http.Handler {
|
|||||||
r.Get("/arrs", ui.handleGetArrs)
|
r.Get("/arrs", ui.handleGetArrs)
|
||||||
r.Post("/add", ui.handleAddContent)
|
r.Post("/add", ui.handleAddContent)
|
||||||
r.Post("/repair", ui.handleRepairMedia)
|
r.Post("/repair", ui.handleRepairMedia)
|
||||||
|
r.Get("/repair/jobs", ui.handleGetRepairJobs)
|
||||||
|
r.Post("/repair/jobs/{id}/process", ui.handleProcessRepairJob)
|
||||||
|
r.Delete("/repair/jobs", ui.handleDeleteRepairJob)
|
||||||
r.Get("/torrents", ui.handleGetTorrents)
|
r.Get("/torrents", ui.handleGetTorrents)
|
||||||
r.Delete("/torrents/{category}/{hash}", ui.handleDeleteTorrent)
|
r.Delete("/torrents/{category}/{hash}", ui.handleDeleteTorrent)
|
||||||
r.Delete("/torrents/", ui.handleDeleteTorrents)
|
r.Delete("/torrents/", ui.handleDeleteTorrents)
|
||||||
|
|||||||
@@ -2,25 +2,24 @@ package web
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"embed"
|
"embed"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
"github.com/gorilla/sessions"
|
"github.com/gorilla/sessions"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/config"
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/logger"
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/request"
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
"github.com/sirrobot01/debrid-blackhole/internal/utils"
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/qbit"
|
"github.com/sirrobot01/decypharr/pkg/qbit"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/service"
|
"github.com/sirrobot01/decypharr/pkg/service"
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
"html/template"
|
"html/template"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
"github.com/sirrobot01/debrid-blackhole/pkg/version"
|
"github.com/sirrobot01/decypharr/pkg/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AddRequest struct {
|
type AddRequest struct {
|
||||||
@@ -49,6 +48,7 @@ type RepairRequest struct {
|
|||||||
ArrName string `json:"arr"`
|
ArrName string `json:"arr"`
|
||||||
MediaIds []string `json:"mediaIds"`
|
MediaIds []string `json:"mediaIds"`
|
||||||
Async bool `json:"async"`
|
Async bool `json:"async"`
|
||||||
|
AutoProcess bool `json:"autoProcess"`
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:embed web/*
|
//go:embed web/*
|
||||||
@@ -60,10 +60,9 @@ type Handler struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func New(qbit *qbit.QBit) *Handler {
|
func New(qbit *qbit.QBit) *Handler {
|
||||||
cfg := config.GetConfig()
|
|
||||||
return &Handler{
|
return &Handler{
|
||||||
qbit: qbit,
|
qbit: qbit,
|
||||||
logger: logger.NewLogger("ui", cfg.LogLevel, os.Stdout),
|
logger: logger.New("ui"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,7 +93,7 @@ func init() {
|
|||||||
func (ui *Handler) authMiddleware(next http.Handler) http.Handler {
|
func (ui *Handler) authMiddleware(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
// Check if setup is needed
|
// Check if setup is needed
|
||||||
cfg := config.GetConfig()
|
cfg := config.Get()
|
||||||
if cfg.NeedsSetup() && r.URL.Path != "/setup" {
|
if cfg.NeedsSetup() && r.URL.Path != "/setup" {
|
||||||
http.Redirect(w, r, "/setup", http.StatusSeeOther)
|
http.Redirect(w, r, "/setup", http.StatusSeeOther)
|
||||||
return
|
return
|
||||||
@@ -128,7 +127,7 @@ func (ui *Handler) verifyAuth(username, password string) bool {
|
|||||||
if username == "" {
|
if username == "" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
auth := config.GetConfig().GetAuth()
|
auth := config.Get().GetAuth()
|
||||||
if auth == nil {
|
if auth == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -188,7 +187,7 @@ func (ui *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ui *Handler) SetupHandler(w http.ResponseWriter, r *http.Request) {
|
func (ui *Handler) SetupHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
cfg := config.GetConfig()
|
cfg := config.Get()
|
||||||
authCfg := cfg.GetAuth()
|
authCfg := cfg.GetAuth()
|
||||||
|
|
||||||
if !cfg.NeedsSetup() {
|
if !cfg.NeedsSetup() {
|
||||||
@@ -306,10 +305,11 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
arrName := r.FormValue("arr")
|
arrName := r.FormValue("arr")
|
||||||
notSymlink := r.FormValue("notSymlink") == "true"
|
notSymlink := r.FormValue("notSymlink") == "true"
|
||||||
|
downloadUncached := r.FormValue("downloadUncached") == "true"
|
||||||
|
|
||||||
_arr := svc.Arr.Get(arrName)
|
_arr := svc.Arr.Get(arrName)
|
||||||
if _arr == nil {
|
if _arr == nil {
|
||||||
_arr = arr.New(arrName, "", "", false)
|
_arr = arr.New(arrName, "", "", false, false, &downloadUncached)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle URLs
|
// Handle URLs
|
||||||
@@ -322,9 +322,13 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, url := range urlList {
|
for _, url := range urlList {
|
||||||
importReq := qbit.NewImportRequest(url, _arr, !notSymlink)
|
magnet, err := utils.GetMagnetFromUrl(url)
|
||||||
err := importReq.Process(ui.qbit)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
errs = append(errs, fmt.Sprintf("Failed to parse URL %s: %v", url, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
importReq := qbit.NewImportRequest(magnet, _arr, !notSymlink, downloadUncached)
|
||||||
|
if err := importReq.Process(ui.qbit); err != nil {
|
||||||
errs = append(errs, fmt.Sprintf("URL %s: %v", url, err))
|
errs = append(errs, fmt.Sprintf("URL %s: %v", url, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -347,7 +351,7 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
importReq := qbit.NewImportRequest(magnet.Link, _arr, !notSymlink)
|
importReq := qbit.NewImportRequest(magnet, _arr, !notSymlink, downloadUncached)
|
||||||
err = importReq.Process(ui.qbit)
|
err = importReq.Process(ui.qbit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, fmt.Sprintf("File %s: %v", fileHeader.Filename, err))
|
errs = append(errs, fmt.Sprintf("File %s: %v", fileHeader.Filename, err))
|
||||||
@@ -375,15 +379,20 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
svc := service.GetService()
|
svc := service.GetService()
|
||||||
|
|
||||||
|
var arrs []string
|
||||||
|
|
||||||
|
if req.ArrName != "" {
|
||||||
_arr := svc.Arr.Get(req.ArrName)
|
_arr := svc.Arr.Get(req.ArrName)
|
||||||
if _arr == nil {
|
if _arr == nil {
|
||||||
http.Error(w, "No Arrs found to repair", http.StatusNotFound)
|
http.Error(w, "No Arrs found to repair", http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
arrs = append(arrs, req.ArrName)
|
||||||
|
}
|
||||||
|
|
||||||
if req.Async {
|
if req.Async {
|
||||||
go func() {
|
go func() {
|
||||||
if err := svc.Repair.Repair([]*arr.Arr{_arr}, req.MediaIds); err != nil {
|
if err := svc.Repair.AddJob(arrs, req.MediaIds, req.AutoProcess, false); err != nil {
|
||||||
ui.logger.Error().Err(err).Msg("Failed to repair media")
|
ui.logger.Error().Err(err).Msg("Failed to repair media")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -391,10 +400,9 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := svc.Repair.Repair([]*arr.Arr{_arr}, req.MediaIds); err != nil {
|
if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil {
|
||||||
http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError)
|
http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
request.JSONResponse(w, "Repair completed", http.StatusOK)
|
request.JSONResponse(w, "Repair completed", http.StatusOK)
|
||||||
@@ -432,12 +440,56 @@ func (ui *Handler) handleDeleteTorrents(w http.ResponseWriter, r *http.Request)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
|
func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
|
||||||
cfg := config.GetConfig()
|
cfg := config.Get()
|
||||||
arrCfgs := make([]config.Arr, 0)
|
arrCfgs := make([]config.Arr, 0)
|
||||||
svc := service.GetService()
|
svc := service.GetService()
|
||||||
for _, a := range svc.Arr.GetAll() {
|
for _, a := range svc.Arr.GetAll() {
|
||||||
arrCfgs = append(arrCfgs, config.Arr{Host: a.Host, Name: a.Name, Token: a.Token})
|
arrCfgs = append(arrCfgs, config.Arr{
|
||||||
|
Host: a.Host,
|
||||||
|
Name: a.Name,
|
||||||
|
Token: a.Token,
|
||||||
|
Cleanup: a.Cleanup,
|
||||||
|
SkipRepair: a.SkipRepair,
|
||||||
|
DownloadUncached: a.DownloadUncached,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
cfg.Arrs = arrCfgs
|
cfg.Arrs = arrCfgs
|
||||||
request.JSONResponse(w, cfg, http.StatusOK)
|
request.JSONResponse(w, cfg, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ui *Handler) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) {
|
||||||
|
svc := service.GetService()
|
||||||
|
request.JSONResponse(w, svc.Repair.GetJobs(), http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ui *Handler) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) {
|
||||||
|
id := chi.URLParam(r, "id")
|
||||||
|
if id == "" {
|
||||||
|
http.Error(w, "No job ID provided", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
svc := service.GetService()
|
||||||
|
if err := svc.Repair.ProcessJob(id); err != nil {
|
||||||
|
ui.logger.Error().Err(err).Msg("Failed to process repair job")
|
||||||
|
}
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ui *Handler) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Read ids from body
|
||||||
|
var req struct {
|
||||||
|
IDs []string `json:"ids"`
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(req.IDs) == 0 {
|
||||||
|
http.Error(w, "No job IDs provided", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
svc := service.GetService()
|
||||||
|
svc.Repair.DeleteJobs(req.IDs)
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
@@ -11,8 +11,8 @@
|
|||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="col-md-6">
|
<div class="col-md-6">
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label for="qbitDebug">Log Level</label>
|
<label for="log-level">Log Level</label>
|
||||||
<select class="form-select" name="qbit.log_level" id="log-level" disabled>
|
<select class="form-select" name="log_level" id="log-level" disabled>
|
||||||
<option value="info">Info</option>
|
<option value="info">Info</option>
|
||||||
<option value="debug">Debug</option>
|
<option value="debug">Debug</option>
|
||||||
<option value="warn">Warning</option>
|
<option value="warn">Warning</option>
|
||||||
@@ -27,14 +27,27 @@
|
|||||||
<!-- Empty label to keep the button aligned -->
|
<!-- Empty label to keep the button aligned -->
|
||||||
</label>
|
</label>
|
||||||
<div class="btn btn-primary w-100" onclick="registerMagnetLinkHandler()" id="registerMagnetLink">
|
<div class="btn btn-primary w-100" onclick="registerMagnetLinkHandler()" id="registerMagnetLink">
|
||||||
Open Magnet Links in DecyphArr
|
Open Magnet Links in Decypharr
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="col-12 mt-3">
|
<div class="col-md-6 mt-3">
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="discordWebhookUrl">Discord Webhook URL</label>
|
||||||
|
<div class="input-group">
|
||||||
|
<textarea type="text"
|
||||||
|
class="form-control"
|
||||||
|
id="discordWebhookUrl"
|
||||||
|
name="discord_webhook_url"
|
||||||
|
disabled
|
||||||
|
placeholder="https://discord..."></textarea>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-6 mt-3">
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label for="allowedExtensions">Allowed File Extensions</label>
|
<label for="allowedExtensions">Allowed File Extensions</label>
|
||||||
<div class="input-group">
|
<div class="input-group">
|
||||||
<textarea type="text"
|
<textarea
|
||||||
class="form-control"
|
class="form-control"
|
||||||
id="allowedExtensions"
|
id="allowedExtensions"
|
||||||
name="allowed_file_types"
|
name="allowed_file_types"
|
||||||
@@ -73,13 +86,13 @@
|
|||||||
</div>
|
</div>
|
||||||
<!-- Debrid Configuration -->
|
<!-- Debrid Configuration -->
|
||||||
<div class="section mb-5">
|
<div class="section mb-5">
|
||||||
<h5 class="border-bottom pb-2">Debrid Configuration</h5>
|
<h5 class="border-bottom pb-2">Debrids</h5>
|
||||||
<div id="debridConfigs"></div>
|
<div id="debridConfigs"></div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- QBitTorrent Configuration -->
|
<!-- QBitTorrent Configuration -->
|
||||||
<div class="section mb-5">
|
<div class="section mb-5">
|
||||||
<h5 class="border-bottom pb-2">QBitTorrent Configuration</h5>
|
<h5 class="border-bottom pb-2">QBitTorrent</h5>
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="col-md-6 mb-3">
|
<div class="col-md-6 mb-3">
|
||||||
<label class="form-label">Username</label>
|
<label class="form-label">Username</label>
|
||||||
@@ -101,24 +114,16 @@
|
|||||||
<label class="form-label">Refresh Interval (seconds)</label>
|
<label class="form-label">Refresh Interval (seconds)</label>
|
||||||
<input type="number" class="form-control" name="qbit.refresh_interval">
|
<input type="number" class="form-control" name="qbit.refresh_interval">
|
||||||
</div>
|
</div>
|
||||||
<div class="col-12 mb-3">
|
<div class="col-md-6 mb-3">
|
||||||
<div class="form-group">
|
<input type="checkbox" disabled class="form-check-input" name="qbit.skip_pre_cache">
|
||||||
<label for="qbitDebug">Log Level</label>
|
<label class="form-check-label">Skip Pre-Cache On Download(This caches a tiny part of your file to speed up import)</label>
|
||||||
<select class="form-select" name="qbit.log_level" id="qbitDebug" disabled>
|
|
||||||
<option value="info">Info</option>
|
|
||||||
<option value="debug">Debug</option>
|
|
||||||
<option value="warn">Warning</option>
|
|
||||||
<option value="error">Error</option>
|
|
||||||
<option value="trace">Trace</option>
|
|
||||||
</select>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Arr Configurations -->
|
<!-- Arr Configurations -->
|
||||||
<div class="section mb-5">
|
<div class="section mb-5">
|
||||||
<h5 class="border-bottom pb-2">Arr Configurations</h5>
|
<h5 class="border-bottom pb-2">Arrs</h5>
|
||||||
<div id="arrConfigs"></div>
|
<div id="arrConfigs"></div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@@ -126,19 +131,31 @@
|
|||||||
<div class="section mb-5">
|
<div class="section mb-5">
|
||||||
<h5 class="border-bottom pb-2">Repair Configuration</h5>
|
<h5 class="border-bottom pb-2">Repair Configuration</h5>
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="col-md-6 mb-3">
|
<div class="col-md-3 mb-3">
|
||||||
<label class="form-label">Interval</label>
|
<label class="form-label">Interval</label>
|
||||||
<input type="text" disabled class="form-control" name="repair.interval" placeholder="e.g., 24h">
|
<input type="text" disabled class="form-control" name="repair.interval" placeholder="e.g., 24h">
|
||||||
</div>
|
</div>
|
||||||
|
<div class="col-md-4 mb-3">
|
||||||
|
<label class="form-label">Zurg URL</label>
|
||||||
|
<input type="text" disabled class="form-control" name="repair.zurg_url" placeholder="http://zurg:9999">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
<div class="col-12">
|
<div class="col-12">
|
||||||
<div class="form-check mb-2">
|
<div class="form-check me-3 d-inline-block">
|
||||||
<input type="checkbox" disabled class="form-check-input" name="repair.enabled" id="repairEnabled">
|
<input type="checkbox" disabled class="form-check-input" name="repair.enabled" id="repairEnabled">
|
||||||
<label class="form-check-label" for="repairEnabled">Enable Repair</label>
|
<label class="form-check-label" for="repairEnabled">Enable Repair</label>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-check">
|
<div class="form-check me-3 d-inline-block">
|
||||||
|
<input type="checkbox" disabled class="form-check-input" name="repair.use_webdav" id="repairUseWebdav">
|
||||||
|
<label class="form-check-label" for="repairUseWebdav">Use Webdav</label>
|
||||||
|
</div>
|
||||||
|
<div class="form-check me-3 d-inline-block">
|
||||||
<input type="checkbox" disabled class="form-check-input" name="repair.run_on_start" id="repairOnStart">
|
<input type="checkbox" disabled class="form-check-input" name="repair.run_on_start" id="repairOnStart">
|
||||||
<label class="form-check-label" for="repairOnStart">Run on Start</label>
|
<label class="form-check-label" for="repairOnStart">Run on Start</label>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="form-check d-inline-block">
|
||||||
|
<input type="checkbox" disabled class="form-check-input" name="repair.auto_process" id="autoProcess">
|
||||||
|
<label class="form-check-label" for="autoProcess">Auto Process(Scheduled jobs will be processed automatically)</label>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -150,7 +167,7 @@
|
|||||||
// Templates for dynamic elements
|
// Templates for dynamic elements
|
||||||
const debridTemplate = (index) => `
|
const debridTemplate = (index) => `
|
||||||
<div class="config-item position-relative mb-3 p-3 border rounded">
|
<div class="config-item position-relative mb-3 p-3 border rounded">
|
||||||
<div class="row">
|
<div class="row mb-2">
|
||||||
<div class="col-md-6 mb-3">
|
<div class="col-md-6 mb-3">
|
||||||
<label class="form-label">Name</label>
|
<label class="form-label">Name</label>
|
||||||
<input type="text" disabled class="form-control" name="debrid[${index}].name" required>
|
<input type="text" disabled class="form-control" name="debrid[${index}].name" required>
|
||||||
@@ -182,6 +199,47 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="row mt-3 webdav-${index} d-none">
|
||||||
|
<h6 class="pb-2">Webdav</h6>
|
||||||
|
<div class="col-md-3 mb-3">
|
||||||
|
<label class="form-label">Torrents Refresh Interval</label>
|
||||||
|
<input type="text" disabled class="form-control" name="debrid[${index}].torrents_refresh_interval" placeholder="15s" required>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3 mb-3">
|
||||||
|
<label class="form-label">Download Links Refresh Interval</label>
|
||||||
|
<input type="text" disabled class="form-control" name="debrid[${index}].download_links_refresh_interval" placeholder="24h" required>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3 mb-3">
|
||||||
|
<label class="form-label">Expire Links After</label>
|
||||||
|
<input type="text" disabled class="form-control" name="debrid[${index}].auto_expire_links_after" placeholder="24h" required>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3 mb-3">
|
||||||
|
<label class="form-label">Folder Naming Structure</label>
|
||||||
|
<select class="form-select" name="debrid[${index}].folder_naming" disabled>
|
||||||
|
<option value="filename">File name</option>
|
||||||
|
<option value="filename_no_ext">File name with No Ext</option>
|
||||||
|
<option value="original">Original name</option>
|
||||||
|
<option value="original_no_ext">Original name with No Ext</option>
|
||||||
|
<option value="id">Use ID</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3 mb-3">
|
||||||
|
<label class="form-label">Number of Workers</label>
|
||||||
|
<input type="text" disabled class="form-control" name="debrid[${index}].workers" required placeholder="e.g., 20">
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3 mb-3">
|
||||||
|
<label class="form-label">Rclone RC URL</label>
|
||||||
|
<input type="text" disabled class="form-control" name="debrid[${index}].rc_url" placeholder="e.g., http://localhost:9990">
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3 mb-3">
|
||||||
|
<label class="form-label">Rclone RC User</label>
|
||||||
|
<input type="text" disabled class="form-control" name="debrid[${index}].rc_user">
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3 mb-3">
|
||||||
|
<label class="form-label">Rclone RC Password</label>
|
||||||
|
<input type="password" disabled class="form-control" name="debrid[${index}].rc_pass">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
`;
|
`;
|
||||||
|
|
||||||
@@ -201,6 +259,26 @@
|
|||||||
<input type="password" disabled class="form-control" name="arr[${index}].token" required>
|
<input type="password" disabled class="form-control" name="arr[${index}].token" required>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-md-2 mb-3">
|
||||||
|
<div class="form-check">
|
||||||
|
<label class="form-check-label">Cleanup Queue</label>
|
||||||
|
<input type="checkbox" disabled class="form-check-input" name="arr[${index}].cleanup">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-2 mb-3">
|
||||||
|
<div class="form-check">
|
||||||
|
<label class="form-check-label">Skip Repair</label>
|
||||||
|
<input type="checkbox" disabled class="form-check-input" name="arr[${index}].skip_repair">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-2 mb-3">
|
||||||
|
<div class="form-check">
|
||||||
|
<label class="form-check-label">Download Uncached</label>
|
||||||
|
<input type="checkbox" disabled class="form-check-input" name="arr[${index}].download_uncached">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
`;
|
`;
|
||||||
|
|
||||||
@@ -264,6 +342,9 @@
|
|||||||
if (config.max_file_size) {
|
if (config.max_file_size) {
|
||||||
document.querySelector('[name="max_file_size"]').value = config.max_file_size;
|
document.querySelector('[name="max_file_size"]').value = config.max_file_size;
|
||||||
}
|
}
|
||||||
|
if (config.discord_webhook_url) {
|
||||||
|
document.querySelector('[name="discord_webhook_url"]').value = config.discord_webhook_url;
|
||||||
|
}
|
||||||
|
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -328,8 +409,24 @@
|
|||||||
container.insertAdjacentHTML('beforeend', debridTemplate(debridCount));
|
container.insertAdjacentHTML('beforeend', debridTemplate(debridCount));
|
||||||
|
|
||||||
if (data) {
|
if (data) {
|
||||||
Object.entries(data).forEach(([key, value]) => {
|
|
||||||
const input = container.querySelector(`[name="debrid[${debridCount}].${key}"]`);
|
if (data.use_webdav) {
|
||||||
|
let _webCfg = container.querySelector(`.webdav-${debridCount}`);
|
||||||
|
if (_webCfg) {
|
||||||
|
_webCfg.classList.remove('d-none');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function setFieldValues(obj, prefix) {
|
||||||
|
Object.entries(obj).forEach(([key, value]) => {
|
||||||
|
const fieldName = prefix ? `${prefix}.${key}` : key;
|
||||||
|
|
||||||
|
// If value is an object and not null, recursively process nested fields
|
||||||
|
if (value !== null && typeof value === 'object' && !Array.isArray(value)) {
|
||||||
|
setFieldValues(value, fieldName);
|
||||||
|
} else {
|
||||||
|
// Handle leaf values (actual form fields)
|
||||||
|
const input = container.querySelector(`[name="debrid[${debridCount}].${fieldName}"]`);
|
||||||
if (input) {
|
if (input) {
|
||||||
if (input.type === 'checkbox') {
|
if (input.type === 'checkbox') {
|
||||||
input.checked = value;
|
input.checked = value;
|
||||||
@@ -337,9 +434,14 @@
|
|||||||
input.value = value;
|
input.value = value;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start processing with the root object
|
||||||
|
setFieldValues(data, '');
|
||||||
|
}
|
||||||
|
|
||||||
debridCount++;
|
debridCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -22,14 +22,22 @@
|
|||||||
<input type="text" class="form-control" id="category" name="arr" placeholder="Enter Category (e.g sonarr, radarr, radarr4k)">
|
<input type="text" class="form-control" id="category" name="arr" placeholder="Enter Category (e.g sonarr, radarr, radarr4k)">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="mb-3">
|
<div class="row mb-3">
|
||||||
<div class="form-check">
|
<div class="col-md-2 mb-3">
|
||||||
<input class="form-check-input" type="checkbox" id="isSymlink" name="notSymlink">
|
<div class="form-check d-inline-block me-3">
|
||||||
<label class="form-check-label" for="isSymlink">
|
<input type="checkbox" class="form-check-input" id="isSymlink" name="notSymlink">
|
||||||
Download real files instead of symlinks
|
<label class="form-check-label" for="isSymlink">No Symlinks</label>
|
||||||
</label>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="col-md-2 mb-3">
|
||||||
|
<div class="form-check d-inline-block">
|
||||||
|
<input type="checkbox" class="form-check-input" name="downloadUncached" id="downloadUncached">
|
||||||
|
<label class="form-check-label" for="downloadUncached">Download Uncached</label>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
<button type="submit" class="btn btn-primary" id="submitDownload">
|
<button type="submit" class="btn btn-primary" id="submitDownload">
|
||||||
<i class="bi bi-cloud-upload me-2"></i>Add to Download Queue
|
<i class="bi bi-cloud-upload me-2"></i>Add to Download Queue
|
||||||
@@ -44,15 +52,19 @@
|
|||||||
const loadSavedDownloadOptions = () => {
|
const loadSavedDownloadOptions = () => {
|
||||||
const savedCategory = localStorage.getItem('downloadCategory');
|
const savedCategory = localStorage.getItem('downloadCategory');
|
||||||
const savedSymlink = localStorage.getItem('downloadSymlink');
|
const savedSymlink = localStorage.getItem('downloadSymlink');
|
||||||
|
const savedDownloadUncached = localStorage.getItem('downloadUncached');
|
||||||
document.getElementById('category').value = savedCategory || '';
|
document.getElementById('category').value = savedCategory || '';
|
||||||
document.getElementById('isSymlink').checked = savedSymlink === 'true'
|
document.getElementById('isSymlink').checked = savedSymlink === 'true';
|
||||||
|
document.getElementById('downloadUncached').checked = savedDownloadUncached === 'true';
|
||||||
};
|
};
|
||||||
|
|
||||||
const saveCurrentDownloadOptions = () => {
|
const saveCurrentDownloadOptions = () => {
|
||||||
const category = document.getElementById('category').value;
|
const category = document.getElementById('category').value;
|
||||||
const isSymlink = document.getElementById('isSymlink').checked;
|
const isSymlink = document.getElementById('isSymlink').checked;
|
||||||
|
const downloadUncached = document.getElementById('downloadUncached').checked;
|
||||||
localStorage.setItem('downloadCategory', category);
|
localStorage.setItem('downloadCategory', category);
|
||||||
localStorage.setItem('downloadSymlink', isSymlink.toString());
|
localStorage.setItem('downloadSymlink', isSymlink.toString());
|
||||||
|
localStorage.setItem('downloadUncached', downloadUncached.toString());
|
||||||
};
|
};
|
||||||
|
|
||||||
// Load the last used download options from local storage
|
// Load the last used download options from local storage
|
||||||
@@ -98,6 +110,7 @@
|
|||||||
|
|
||||||
formData.append('arr', document.getElementById('category').value);
|
formData.append('arr', document.getElementById('category').value);
|
||||||
formData.append('notSymlink', document.getElementById('isSymlink').checked);
|
formData.append('notSymlink', document.getElementById('isSymlink').checked);
|
||||||
|
formData.append('downloadUncached', document.getElementById('downloadUncached').checked);
|
||||||
|
|
||||||
const response = await fetch('/internal/add', {
|
const response = await fetch('/internal/add', {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
@@ -114,10 +127,9 @@
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
createToast(`Successfully added ${result.results.length} torrents!`);
|
createToast(`Successfully added ${result.results.length} torrents!`);
|
||||||
|
//document.getElementById('magnetURI').value = '';
|
||||||
|
//document.getElementById('torrentFiles').value = '';
|
||||||
}
|
}
|
||||||
|
|
||||||
document.getElementById('magnetURI').value = '';
|
|
||||||
document.getElementById('torrentFiles').value = '';
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
createToast(`Error adding downloads: ${error.message}`, 'error');
|
createToast(`Error adding downloads: ${error.message}`, 'error');
|
||||||
} finally {
|
} finally {
|
||||||
|
|||||||
@@ -12,13 +12,23 @@
|
|||||||
</button>
|
</button>
|
||||||
<select class="form-select form-select-sm d-inline-block w-auto me-2" id="stateFilter" style="flex-shrink: 0;">
|
<select class="form-select form-select-sm d-inline-block w-auto me-2" id="stateFilter" style="flex-shrink: 0;">
|
||||||
<option value="">All States</option>
|
<option value="">All States</option>
|
||||||
|
<option value="pausedup">Completed</option>
|
||||||
<option value="downloading">Downloading</option>
|
<option value="downloading">Downloading</option>
|
||||||
<option value="pausedup">Paused</option>
|
|
||||||
<option value="error">Error</option>
|
<option value="error">Error</option>
|
||||||
</select>
|
</select>
|
||||||
<select class="form-select form-select-sm d-inline-block w-auto" id="categoryFilter">
|
<select class="form-select form-select-sm d-inline-block w-auto" id="categoryFilter">
|
||||||
<option value="">All Categories</option>
|
<option value="">All Categories</option>
|
||||||
</select>
|
</select>
|
||||||
|
<select class="form-select form-select-sm d-inline-block w-auto" id="sortSelector" style="flex-shrink: 0;">
|
||||||
|
<option value="added_on" selected>Date Added (Newest First)</option>
|
||||||
|
<option value="added_on_asc">Date Added (Oldest First)</option>
|
||||||
|
<option value="name_asc">Name (A-Z)</option>
|
||||||
|
<option value="name_desc">Name (Z-A)</option>
|
||||||
|
<option value="size_desc">Size (Largest First)</option>
|
||||||
|
<option value="size_asc">Size (Smallest First)</option>
|
||||||
|
<option value="progress_desc">Progress (Most First)</option>
|
||||||
|
<option value="progress_asc">Progress (Least First)</option>
|
||||||
|
</select>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="card-body p-0">
|
<div class="card-body p-0">
|
||||||
@@ -43,6 +53,14 @@
|
|||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="d-flex justify-content-between align-items-center p-3 border-top">
|
||||||
|
<div class="pagination-info">
|
||||||
|
<span id="paginationInfo">Showing 0-0 of 0 torrents</span>
|
||||||
|
</div>
|
||||||
|
<nav aria-label="Torrents pagination">
|
||||||
|
<ul class="pagination pagination-sm m-0" id="paginationControls"></ul>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -51,9 +69,12 @@
|
|||||||
torrentsList: document.getElementById('torrentsList'),
|
torrentsList: document.getElementById('torrentsList'),
|
||||||
categoryFilter: document.getElementById('categoryFilter'),
|
categoryFilter: document.getElementById('categoryFilter'),
|
||||||
stateFilter: document.getElementById('stateFilter'),
|
stateFilter: document.getElementById('stateFilter'),
|
||||||
|
sortSelector: document.getElementById('sortSelector'),
|
||||||
selectAll: document.getElementById('selectAll'),
|
selectAll: document.getElementById('selectAll'),
|
||||||
batchDeleteBtn: document.getElementById('batchDeleteBtn'),
|
batchDeleteBtn: document.getElementById('batchDeleteBtn'),
|
||||||
refreshBtn: document.getElementById('refreshBtn'),
|
refreshBtn: document.getElementById('refreshBtn'),
|
||||||
|
paginationControls: document.getElementById('paginationControls'),
|
||||||
|
paginationInfo: document.getElementById('paginationInfo')
|
||||||
};
|
};
|
||||||
let state = {
|
let state = {
|
||||||
torrents: [],
|
torrents: [],
|
||||||
@@ -62,6 +83,9 @@
|
|||||||
states: new Set('downloading', 'pausedup', 'error'),
|
states: new Set('downloading', 'pausedup', 'error'),
|
||||||
selectedCategory: refs.categoryFilter?.value || '',
|
selectedCategory: refs.categoryFilter?.value || '',
|
||||||
selectedState: refs.stateFilter?.value || '',
|
selectedState: refs.stateFilter?.value || '',
|
||||||
|
sortBy: refs.sortSelector?.value || 'added_on',
|
||||||
|
itemsPerPage: 20,
|
||||||
|
currentPage: 1
|
||||||
};
|
};
|
||||||
|
|
||||||
const torrentRowTemplate = (torrent) => `
|
const torrentRowTemplate = (torrent) => `
|
||||||
@@ -124,8 +148,19 @@
|
|||||||
filteredTorrents = filteredTorrents.filter(t => t.state === state.selectedState);
|
filteredTorrents = filteredTorrents.filter(t => t.state === state.selectedState);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sort the filtered torrents
|
||||||
|
filteredTorrents = sortTorrents(filteredTorrents, state.sortBy);
|
||||||
|
|
||||||
|
const totalPages = Math.ceil(filteredTorrents.length / state.itemsPerPage);
|
||||||
|
if (state.currentPage > totalPages && totalPages > 0) {
|
||||||
|
state.currentPage = totalPages;
|
||||||
|
}
|
||||||
|
|
||||||
|
const paginatedTorrents = paginateTorrents(filteredTorrents);
|
||||||
|
|
||||||
// Update the torrents list table
|
// Update the torrents list table
|
||||||
refs.torrentsList.innerHTML = filteredTorrents.map(torrent => torrentRowTemplate(torrent)).join('');
|
refs.torrentsList.innerHTML = paginatedTorrents.map(torrent => torrentRowTemplate(torrent)).join('');
|
||||||
|
|
||||||
|
|
||||||
// Update the category filter dropdown
|
// Update the category filter dropdown
|
||||||
const currentCategories = Array.from(state.categories).sort();
|
const currentCategories = Array.from(state.categories).sort();
|
||||||
@@ -162,6 +197,56 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function sortTorrents(torrents, sortBy) {
|
||||||
|
// Create a copy of the array to avoid mutating the original
|
||||||
|
const result = [...torrents];
|
||||||
|
|
||||||
|
// Parse the sort value to determine field and direction
|
||||||
|
const [field, direction] = sortBy.includes('_asc') || sortBy.includes('_desc')
|
||||||
|
? [sortBy.split('_').slice(0, -1).join('_'), sortBy.endsWith('_asc') ? 'asc' : 'desc']
|
||||||
|
: [sortBy, 'desc']; // Default to descending if not specified
|
||||||
|
|
||||||
|
result.sort((a, b) => {
|
||||||
|
let valueA, valueB;
|
||||||
|
|
||||||
|
// Get values based on field
|
||||||
|
switch (field) {
|
||||||
|
case 'name':
|
||||||
|
valueA = a.name?.toLowerCase() || '';
|
||||||
|
valueB = b.name?.toLowerCase() || '';
|
||||||
|
break;
|
||||||
|
case 'size':
|
||||||
|
valueA = a.size || 0;
|
||||||
|
valueB = b.size || 0;
|
||||||
|
break;
|
||||||
|
case 'progress':
|
||||||
|
valueA = a.progress || 0;
|
||||||
|
valueB = b.progress || 0;
|
||||||
|
break;
|
||||||
|
case 'added_on':
|
||||||
|
valueA = a.added_on || 0;
|
||||||
|
valueB = b.added_on || 0;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
valueA = a[field] || 0;
|
||||||
|
valueB = b[field] || 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare based on type
|
||||||
|
if (typeof valueA === 'string') {
|
||||||
|
return direction === 'asc'
|
||||||
|
? valueA.localeCompare(valueB)
|
||||||
|
: valueB.localeCompare(valueA);
|
||||||
|
} else {
|
||||||
|
return direction === 'asc'
|
||||||
|
? valueA - valueB
|
||||||
|
: valueB - valueA;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
async function deleteTorrent(hash, category) {
|
async function deleteTorrent(hash, category) {
|
||||||
if (!confirm('Are you sure you want to delete this torrent?')) return;
|
if (!confirm('Are you sure you want to delete this torrent?')) return;
|
||||||
|
|
||||||
@@ -194,6 +279,83 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function paginateTorrents(torrents) {
|
||||||
|
const totalItems = torrents.length;
|
||||||
|
const totalPages = Math.ceil(totalItems / state.itemsPerPage);
|
||||||
|
const startIndex = (state.currentPage - 1) * state.itemsPerPage;
|
||||||
|
const endIndex = Math.min(startIndex + state.itemsPerPage, totalItems);
|
||||||
|
|
||||||
|
// Update pagination info text
|
||||||
|
refs.paginationInfo.textContent =
|
||||||
|
`Showing ${totalItems > 0 ? startIndex + 1 : 0}-${endIndex} of ${totalItems} torrents`;
|
||||||
|
|
||||||
|
// Generate pagination controls
|
||||||
|
refs.paginationControls.innerHTML = '';
|
||||||
|
|
||||||
|
if (totalPages <= 1) {
|
||||||
|
return torrents.slice(startIndex, endIndex);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Previous button
|
||||||
|
const prevLi = document.createElement('li');
|
||||||
|
prevLi.className = `page-item ${state.currentPage === 1 ? 'disabled' : ''}`;
|
||||||
|
prevLi.innerHTML = `
|
||||||
|
<a class="page-link" href="#" aria-label="Previous" ${state.currentPage === 1 ? 'tabindex="-1" aria-disabled="true"' : ''}>
|
||||||
|
<span aria-hidden="true">«</span>
|
||||||
|
</a>
|
||||||
|
`;
|
||||||
|
if (state.currentPage > 1) {
|
||||||
|
prevLi.querySelector('a').addEventListener('click', (e) => {
|
||||||
|
e.preventDefault();
|
||||||
|
state.currentPage--;
|
||||||
|
updateUI();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
refs.paginationControls.appendChild(prevLi);
|
||||||
|
|
||||||
|
// Page numbers
|
||||||
|
const maxPageButtons = 5;
|
||||||
|
let startPage = Math.max(1, state.currentPage - Math.floor(maxPageButtons / 2));
|
||||||
|
let endPage = Math.min(totalPages, startPage + maxPageButtons - 1);
|
||||||
|
|
||||||
|
if (endPage - startPage + 1 < maxPageButtons) {
|
||||||
|
startPage = Math.max(1, endPage - maxPageButtons + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = startPage; i <= endPage; i++) {
|
||||||
|
const pageLi = document.createElement('li');
|
||||||
|
pageLi.className = `page-item ${i === state.currentPage ? 'active' : ''}`;
|
||||||
|
pageLi.innerHTML = `<a class="page-link" href="#">${i}</a>`;
|
||||||
|
|
||||||
|
pageLi.querySelector('a').addEventListener('click', (e) => {
|
||||||
|
e.preventDefault();
|
||||||
|
state.currentPage = i;
|
||||||
|
updateUI();
|
||||||
|
});
|
||||||
|
|
||||||
|
refs.paginationControls.appendChild(pageLi);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next button
|
||||||
|
const nextLi = document.createElement('li');
|
||||||
|
nextLi.className = `page-item ${state.currentPage === totalPages ? 'disabled' : ''}`;
|
||||||
|
nextLi.innerHTML = `
|
||||||
|
<a class="page-link" href="#" aria-label="Next" ${state.currentPage === totalPages ? 'tabindex="-1" aria-disabled="true"' : ''}>
|
||||||
|
<span aria-hidden="true">»</span>
|
||||||
|
</a>
|
||||||
|
`;
|
||||||
|
if (state.currentPage < totalPages) {
|
||||||
|
nextLi.querySelector('a').addEventListener('click', (e) => {
|
||||||
|
e.preventDefault();
|
||||||
|
state.currentPage++;
|
||||||
|
updateUI();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
refs.paginationControls.appendChild(nextLi);
|
||||||
|
|
||||||
|
return torrents.slice(startIndex, endIndex);
|
||||||
|
}
|
||||||
|
|
||||||
document.addEventListener('DOMContentLoaded', () => {
|
document.addEventListener('DOMContentLoaded', () => {
|
||||||
loadTorrents();
|
loadTorrents();
|
||||||
const refreshInterval = setInterval(loadTorrents, 5000);
|
const refreshInterval = setInterval(loadTorrents, 5000);
|
||||||
@@ -230,11 +392,19 @@
|
|||||||
|
|
||||||
refs.categoryFilter.addEventListener('change', (e) => {
|
refs.categoryFilter.addEventListener('change', (e) => {
|
||||||
state.selectedCategory = e.target.value;
|
state.selectedCategory = e.target.value;
|
||||||
|
state.currentPage = 1; // Reset to first page
|
||||||
updateUI();
|
updateUI();
|
||||||
});
|
});
|
||||||
|
|
||||||
refs.stateFilter.addEventListener('change', (e) => {
|
refs.stateFilter.addEventListener('change', (e) => {
|
||||||
state.selectedState = e.target.value;
|
state.selectedState = e.target.value;
|
||||||
|
state.currentPage = 1; // Reset to first page
|
||||||
|
updateUI();
|
||||||
|
});
|
||||||
|
|
||||||
|
refs.sortSelector.addEventListener('change', (e) => {
|
||||||
|
state.sortBy = e.target.value;
|
||||||
|
state.currentPage = 1; // Reset to first page
|
||||||
updateUI();
|
updateUI();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{{ define "layout" }}
|
{{ define "layout" }}
|
||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
<html lang="en">
|
<html lang="en" data-bs-theme="light">
|
||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8">
|
<meta charset="UTF-8">
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
@@ -13,16 +13,36 @@
|
|||||||
:root {
|
:root {
|
||||||
--primary-color: #2563eb;
|
--primary-color: #2563eb;
|
||||||
--secondary-color: #1e40af;
|
--secondary-color: #1e40af;
|
||||||
|
--bg-color: #f8fafc;
|
||||||
|
--card-bg: #ffffff;
|
||||||
|
--text-color: #333333;
|
||||||
|
--card-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
||||||
|
--nav-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
||||||
|
--border-color: #e5e7eb;
|
||||||
|
}
|
||||||
|
|
||||||
|
[data-bs-theme="dark"] {
|
||||||
|
--primary-color: #3b82f6;
|
||||||
|
--secondary-color: #60a5fa;
|
||||||
|
--bg-color: #1e293b;
|
||||||
|
--card-bg: #283548;
|
||||||
|
--text-color: #e5e7eb;
|
||||||
|
--card-shadow: 0 4px 6px rgba(0, 0, 0, 0.3);
|
||||||
|
--nav-shadow: 0 2px 4px rgba(0, 0, 0, 0.3);
|
||||||
|
--border-color: #4b5563;
|
||||||
}
|
}
|
||||||
|
|
||||||
body {
|
body {
|
||||||
background-color: #f8fafc;
|
background-color: var(--bg-color);
|
||||||
|
color: var(--text-color);
|
||||||
|
transition: background-color 0.3s ease, color 0.3s ease;
|
||||||
}
|
}
|
||||||
|
|
||||||
.navbar {
|
.navbar {
|
||||||
padding: 1rem 0;
|
padding: 1rem 0;
|
||||||
background: #fff !important;
|
background: var(--card-bg) !important;
|
||||||
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
box-shadow: var(--nav-shadow);
|
||||||
|
border-bottom: 1px solid var(--border-color);
|
||||||
}
|
}
|
||||||
|
|
||||||
.navbar-brand {
|
.navbar-brand {
|
||||||
@@ -34,12 +54,13 @@
|
|||||||
.card {
|
.card {
|
||||||
border: none;
|
border: none;
|
||||||
border-radius: 10px;
|
border-radius: 10px;
|
||||||
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
box-shadow: var(--card-shadow);
|
||||||
|
background-color: var(--card-bg);
|
||||||
}
|
}
|
||||||
|
|
||||||
.nav-link {
|
.nav-link {
|
||||||
padding: 0.5rem 1rem;
|
padding: 0.5rem 1rem;
|
||||||
color: #4b5563;
|
color: var(--text-color);
|
||||||
}
|
}
|
||||||
|
|
||||||
.nav-link.active {
|
.nav-link.active {
|
||||||
@@ -54,7 +75,60 @@
|
|||||||
.badge#channel-badge.beta {
|
.badge#channel-badge.beta {
|
||||||
background-color: #fd7e14;
|
background-color: #fd7e14;
|
||||||
}
|
}
|
||||||
|
.badge#channel-badge.nightly {
|
||||||
|
background-color: #6c757d;
|
||||||
|
}
|
||||||
|
|
||||||
|
.table {
|
||||||
|
color: var(--text-color);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Dark mode specific overrides */
|
||||||
|
[data-bs-theme="dark"] .navbar-light .navbar-toggler-icon {
|
||||||
|
filter: invert(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
[data-bs-theme="dark"] .form-control,
|
||||||
|
[data-bs-theme="dark"] .form-select {
|
||||||
|
background-color: #374151;
|
||||||
|
color: #e5e7eb;
|
||||||
|
border-color: #4b5563;
|
||||||
|
}
|
||||||
|
|
||||||
|
[data-bs-theme="dark"] .form-control:focus,
|
||||||
|
[data-bs-theme="dark"] .form-select:focus {
|
||||||
|
border-color: var(--primary-color);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Theme toggle button styles */
|
||||||
|
.theme-toggle {
|
||||||
|
cursor: pointer;
|
||||||
|
padding: 0.5rem;
|
||||||
|
border-radius: 50%;
|
||||||
|
width: 38px;
|
||||||
|
height: 38px;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
transition: background-color 0.3s;
|
||||||
|
}
|
||||||
|
|
||||||
|
.theme-toggle:hover {
|
||||||
|
background-color: rgba(128, 128, 128, 0.2);
|
||||||
|
}
|
||||||
</style>
|
</style>
|
||||||
|
<script>
|
||||||
|
(function() {
|
||||||
|
const savedTheme = localStorage.getItem('theme');
|
||||||
|
if (savedTheme) {
|
||||||
|
document.documentElement.setAttribute('data-bs-theme', savedTheme);
|
||||||
|
} else if (window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches) {
|
||||||
|
document.documentElement.setAttribute('data-bs-theme', 'dark');
|
||||||
|
} else {
|
||||||
|
document.documentElement.setAttribute('data-bs-theme', 'light');
|
||||||
|
}
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div class="toast-container position-fixed bottom-0 end-0 p-3">
|
<div class="toast-container position-fixed bottom-0 end-0 p-3">
|
||||||
@@ -87,7 +161,12 @@
|
|||||||
</li>
|
</li>
|
||||||
<li class="nav-item">
|
<li class="nav-item">
|
||||||
<a class="nav-link {{if eq .Page "config"}}active{{end}}" href="/config">
|
<a class="nav-link {{if eq .Page "config"}}active{{end}}" href="/config">
|
||||||
<i class="bi bi-gear me-1"></i>Config
|
<i class="bi bi-gear me-1"></i>Settings
|
||||||
|
</a>
|
||||||
|
</li>
|
||||||
|
<li class="nav-item">
|
||||||
|
<a class="nav-link" href="/webdav" target="_blank">
|
||||||
|
<i class="bi bi-cloud me-1"></i>WebDAV
|
||||||
</a>
|
</a>
|
||||||
</li>
|
</li>
|
||||||
<li class="nav-item">
|
<li class="nav-item">
|
||||||
@@ -97,6 +176,10 @@
|
|||||||
</li>
|
</li>
|
||||||
</ul>
|
</ul>
|
||||||
<div class="d-flex align-items-center">
|
<div class="d-flex align-items-center">
|
||||||
|
<div class="theme-toggle me-3" id="themeToggle" title="Toggle dark mode">
|
||||||
|
<i class="bi bi-sun-fill" id="lightIcon"></i>
|
||||||
|
<i class="bi bi-moon-fill d-none" id="darkIcon"></i>
|
||||||
|
</div>
|
||||||
<span class="badge me-2" id="channel-badge">Loading...</span>
|
<span class="badge me-2" id="channel-badge">Loading...</span>
|
||||||
<span class="badge bg-primary" id="version-badge">Loading...</span>
|
<span class="badge bg-primary" id="version-badge">Loading...</span>
|
||||||
</div>
|
</div>
|
||||||
@@ -135,7 +218,7 @@
|
|||||||
success: 5000,
|
success: 5000,
|
||||||
warning: 10000,
|
warning: 10000,
|
||||||
error: 15000
|
error: 15000
|
||||||
}
|
};
|
||||||
|
|
||||||
const toastContainer = document.querySelector('.toast-container');
|
const toastContainer = document.querySelector('.toast-container');
|
||||||
const toastId = `toast-${Date.now()}`;
|
const toastId = `toast-${Date.now()}`;
|
||||||
@@ -169,6 +252,55 @@
|
|||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Theme management
|
||||||
|
const themeToggle = document.getElementById('themeToggle');
|
||||||
|
const lightIcon = document.getElementById('lightIcon');
|
||||||
|
const darkIcon = document.getElementById('darkIcon');
|
||||||
|
const htmlElement = document.documentElement;
|
||||||
|
|
||||||
|
// Function to set the theme
|
||||||
|
function setTheme(theme) {
|
||||||
|
htmlElement.setAttribute('data-bs-theme', theme);
|
||||||
|
localStorage.setItem('theme', theme);
|
||||||
|
|
||||||
|
if (theme === 'dark') {
|
||||||
|
lightIcon.classList.add('d-none');
|
||||||
|
darkIcon.classList.remove('d-none');
|
||||||
|
} else {
|
||||||
|
lightIcon.classList.remove('d-none');
|
||||||
|
darkIcon.classList.add('d-none');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for saved theme preference or use system preference
|
||||||
|
const savedTheme = localStorage.getItem('theme');
|
||||||
|
|
||||||
|
if (savedTheme) {
|
||||||
|
setTheme(savedTheme);
|
||||||
|
} else {
|
||||||
|
// Check for system preference
|
||||||
|
if (window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches) {
|
||||||
|
setTheme('dark');
|
||||||
|
} else {
|
||||||
|
setTheme('light');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Toggle theme when button is clicked
|
||||||
|
themeToggle.addEventListener('click', () => {
|
||||||
|
const currentTheme = htmlElement.getAttribute('data-bs-theme');
|
||||||
|
setTheme(currentTheme === 'dark' ? 'light' : 'dark');
|
||||||
|
});
|
||||||
|
|
||||||
|
// Listen for system theme changes
|
||||||
|
if (window.matchMedia) {
|
||||||
|
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', e => {
|
||||||
|
if (!localStorage.getItem('theme')) {
|
||||||
|
setTheme(e.matches ? 'dark' : 'light');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
document.addEventListener('DOMContentLoaded', function() {
|
document.addEventListener('DOMContentLoaded', function() {
|
||||||
fetch('/internal/version')
|
fetch('/internal/version')
|
||||||
.then(response => response.json())
|
.then(response => response.json())
|
||||||
@@ -177,11 +309,13 @@
|
|||||||
const channelBadge = document.getElementById('channel-badge');
|
const channelBadge = document.getElementById('channel-badge');
|
||||||
|
|
||||||
// Add url to version badge
|
// Add url to version badge
|
||||||
versionBadge.innerHTML = `<a href="https://github.com/sirrobot01/debrid-blackhole/releases/tag/${data.version}" target="_blank" class="text-white">${data.version}</a>`;
|
versionBadge.innerHTML = `<a href="https://github.com/sirrobot01/decypharr/releases/tag/${data.version}" target="_blank" class="text-white">${data.version}</a>`;
|
||||||
channelBadge.textContent = data.channel.charAt(0).toUpperCase() + data.channel.slice(1);
|
channelBadge.textContent = data.channel.charAt(0).toUpperCase() + data.channel.slice(1);
|
||||||
|
|
||||||
if (data.channel === 'beta') {
|
if (data.channel === 'beta') {
|
||||||
channelBadge.classList.add('beta');
|
channelBadge.classList.add('beta');
|
||||||
|
} else if (data.channel === 'nightly') {
|
||||||
|
channelBadge.classList.add('nightly');
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.catch(error => {
|
.catch(error => {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
<form id="repairForm">
|
<form id="repairForm">
|
||||||
<div class="mb-3">
|
<div class="mb-3">
|
||||||
<label for="arrSelect" class="form-label">Select Arr Instance</label>
|
<label for="arrSelect" class="form-label">Select Arr Instance</label>
|
||||||
<select class="form-select" id="arrSelect" required>
|
<select class="form-select" id="arrSelect">
|
||||||
<option value="">Select an Arr instance</option>
|
<option value="">Select an Arr instance</option>
|
||||||
</select>
|
</select>
|
||||||
</div>
|
</div>
|
||||||
@@ -20,11 +20,20 @@
|
|||||||
<small class="text-muted">Enter TV DB ids for Sonarr, TM DB ids for Radarr</small>
|
<small class="text-muted">Enter TV DB ids for Sonarr, TM DB ids for Radarr</small>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="mb-3">
|
<div class="mb-2">
|
||||||
<div class="form-check">
|
<div class="form-check">
|
||||||
<input class="form-check-input" type="checkbox" id="isAsync" checked>
|
<input class="form-check-input" type="checkbox" id="isAsync" checked>
|
||||||
<label class="form-check-label" for="isAsync">
|
<label class="form-check-label" for="isAsync">
|
||||||
Run repair in background
|
Run in background
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="mb-3">
|
||||||
|
<div class="form-check">
|
||||||
|
<input class="form-check-input" type="checkbox" id="autoProcess">
|
||||||
|
<label class="form-check-label" for="autoProcess">
|
||||||
|
Auto Process(this will delete and re-search broken media)
|
||||||
</label>
|
</label>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -35,7 +44,111 @@
|
|||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<!-- Jobs Table Section -->
|
||||||
|
<div class="card mt-4">
|
||||||
|
<div class="card-header d-flex justify-content-between align-items-center">
|
||||||
|
<h4 class="mb-0"><i class="bi bi-list-task me-2"></i>Repair Jobs</h4>
|
||||||
|
<div>
|
||||||
|
<button id="deleteSelectedJobs" class="btn btn-sm btn-danger me-2" disabled>
|
||||||
|
<i class="bi bi-trash me-1"></i>Delete Selected
|
||||||
|
</button>
|
||||||
|
<button id="refreshJobs" class="btn btn-sm btn-outline-secondary">
|
||||||
|
<i class="bi bi-arrow-clockwise me-1"></i>Refresh
|
||||||
|
</button>
|
||||||
</div>
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="table-responsive">
|
||||||
|
<table class="table table-striped table-hover" id="jobsTable">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>
|
||||||
|
<div class="form-check">
|
||||||
|
<input class="form-check-input" type="checkbox" id="selectAllJobs">
|
||||||
|
</div>
|
||||||
|
</th>
|
||||||
|
<th>ID</th>
|
||||||
|
<th>Arr Instances</th>
|
||||||
|
<th>Started</th>
|
||||||
|
<th>Status</th>
|
||||||
|
<th>Broken Items</th>
|
||||||
|
<th>Actions</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="jobsTableBody">
|
||||||
|
<!-- Jobs will be loaded here -->
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Pagination -->
|
||||||
|
<nav aria-label="Jobs pagination" class="mt-3">
|
||||||
|
<ul class="pagination justify-content-center" id="jobsPagination">
|
||||||
|
<!-- Pagination will be generated here -->
|
||||||
|
</ul>
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<div id="noJobsMessage" class="text-center py-3 d-none">
|
||||||
|
<p class="text-muted">No repair jobs found</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Job Details Modal -->
|
||||||
|
<div class="modal fade" id="jobDetailsModal" tabindex="-1" aria-labelledby="jobDetailsModalLabel" aria-hidden="true">
|
||||||
|
<div class="modal-dialog modal-lg">
|
||||||
|
<div class="modal-content">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h5 class="modal-title" id="jobDetailsModalLabel">Job Details</h5>
|
||||||
|
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body">
|
||||||
|
<div class="row mb-3">
|
||||||
|
<div class="col-md-6">
|
||||||
|
<p><strong>Job ID:</strong> <span id="modalJobId"></span></p>
|
||||||
|
<p><strong>Status:</strong> <span id="modalJobStatus"></span></p>
|
||||||
|
<p><strong>Started:</strong> <span id="modalJobStarted"></span></p>
|
||||||
|
<p><strong>Completed:</strong> <span id="modalJobCompleted"></span></p>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-6">
|
||||||
|
<p><strong>Arrs:</strong> <span id="modalJobArrs"></span></p>
|
||||||
|
<p><strong>Media IDs:</strong> <span id="modalJobMediaIds"></span></p>
|
||||||
|
<p><strong>Auto Process:</strong> <span id="modalJobAutoProcess"></span></p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="errorContainer" class="alert alert-danger mb-3 d-none">
|
||||||
|
<strong>Error:</strong> <span id="modalJobError"></span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h6>Broken Items</h6>
|
||||||
|
<div class="table-responsive">
|
||||||
|
<table class="table table-sm table-striped">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Arr</th>
|
||||||
|
<th>Path</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="brokenItemsTableBody">
|
||||||
|
<!-- Broken items will be loaded here -->
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
<div id="noBrokenItemsMessage" class="text-center py-2 d-none">
|
||||||
|
<p class="text-muted">No broken items found</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button type="button" class="btn btn-secondary" data-bs-dismiss="modal">Close</button>
|
||||||
|
<button type="button" class="btn btn-primary" id="processJobBtn">Process Items</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
document.addEventListener('DOMContentLoaded', () => {
|
document.addEventListener('DOMContentLoaded', () => {
|
||||||
// Load Arr instances
|
// Load Arr instances
|
||||||
@@ -61,12 +174,6 @@
|
|||||||
submitBtn.innerHTML = '<span class="spinner-border spinner-border-sm me-2"></span>Repairing...';
|
submitBtn.innerHTML = '<span class="spinner-border spinner-border-sm me-2"></span>Repairing...';
|
||||||
let mediaIds = document.getElementById('mediaIds').value.split(',').map(id => id.trim());
|
let mediaIds = document.getElementById('mediaIds').value.split(',').map(id => id.trim());
|
||||||
let arr = document.getElementById('arrSelect').value;
|
let arr = document.getElementById('arrSelect').value;
|
||||||
if (!arr) {
|
|
||||||
createToast('Please select an Arr instance', 'warning');
|
|
||||||
submitBtn.disabled = false;
|
|
||||||
submitBtn.innerHTML = originalText;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
const response = await fetch('/internal/repair', {
|
const response = await fetch('/internal/repair', {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
@@ -74,14 +181,16 @@
|
|||||||
'Content-Type': 'application/json'
|
'Content-Type': 'application/json'
|
||||||
},
|
},
|
||||||
body: JSON.stringify({
|
body: JSON.stringify({
|
||||||
arr: document.getElementById('arrSelect').value,
|
arr: arr,
|
||||||
mediaIds: mediaIds,
|
mediaIds: mediaIds,
|
||||||
async: document.getElementById('isAsync').checked
|
async: document.getElementById('isAsync').checked,
|
||||||
|
autoProcess: document.getElementById('autoProcess').checked,
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!response.ok) throw new Error(await response.text());
|
if (!response.ok) throw new Error(await response.text());
|
||||||
createToast('Repair process initiated successfully!');
|
createToast('Repair process initiated successfully!');
|
||||||
|
loadJobs(1); // Refresh jobs after submission
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
createToast(`Error starting repair: ${error.message}`, 'error');
|
createToast(`Error starting repair: ${error.message}`, 'error');
|
||||||
} finally {
|
} finally {
|
||||||
@@ -89,6 +198,378 @@
|
|||||||
submitBtn.innerHTML = originalText;
|
submitBtn.innerHTML = originalText;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Jobs table pagination variables
|
||||||
|
let currentPage = 1;
|
||||||
|
const itemsPerPage = 10;
|
||||||
|
let allJobs = [];
|
||||||
|
|
||||||
|
// Load jobs function
|
||||||
|
async function loadJobs(page) {
|
||||||
|
try {
|
||||||
|
const response = await fetch('/internal/repair/jobs');
|
||||||
|
if (!response.ok) throw new Error('Failed to fetch jobs');
|
||||||
|
|
||||||
|
allJobs = await response.json();
|
||||||
|
renderJobsTable(page);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error loading jobs:', error);
|
||||||
|
createToast(`Error loading jobs: ${error.message}`, 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render jobs table with pagination
|
||||||
|
function renderJobsTable(page) {
|
||||||
|
const tableBody = document.getElementById('jobsTableBody');
|
||||||
|
const paginationElement = document.getElementById('jobsPagination');
|
||||||
|
const noJobsMessage = document.getElementById('noJobsMessage');
|
||||||
|
const deleteSelectedBtn = document.getElementById('deleteSelectedJobs');
|
||||||
|
|
||||||
|
// Clear previous content
|
||||||
|
tableBody.innerHTML = '';
|
||||||
|
paginationElement.innerHTML = '';
|
||||||
|
|
||||||
|
document.getElementById('selectAllJobs').checked = false;
|
||||||
|
deleteSelectedBtn.disabled = true;
|
||||||
|
|
||||||
|
if (allJobs.length === 0) {
|
||||||
|
noJobsMessage.classList.remove('d-none');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
noJobsMessage.classList.add('d-none');
|
||||||
|
|
||||||
|
// Calculate pagination
|
||||||
|
const totalPages = Math.ceil(allJobs.length / itemsPerPage);
|
||||||
|
const startIndex = (page - 1) * itemsPerPage;
|
||||||
|
const endIndex = Math.min(startIndex + itemsPerPage, allJobs.length);
|
||||||
|
|
||||||
|
// Display jobs for current page
|
||||||
|
for (let i = startIndex; i < endIndex; i++) {
|
||||||
|
const job = allJobs[i];
|
||||||
|
const row = document.createElement('tr');
|
||||||
|
|
||||||
|
// Format date
|
||||||
|
const startedDate = new Date(job.created_at);
|
||||||
|
const formattedDate = startedDate.toLocaleString();
|
||||||
|
|
||||||
|
// Determine status
|
||||||
|
let status = 'In Progress';
|
||||||
|
let statusClass = 'text-primary';
|
||||||
|
let canDelete = job.status !== "started";
|
||||||
|
let totalItems = job.broken_items ? Object.values(job.broken_items).reduce((sum, arr) => sum + arr.length, 0) : 0;
|
||||||
|
|
||||||
|
if (job.status === 'failed') {
|
||||||
|
status = 'Failed';
|
||||||
|
statusClass = 'text-danger';
|
||||||
|
} else if (job.status === 'completed') {
|
||||||
|
status = 'Completed';
|
||||||
|
statusClass = 'text-success';
|
||||||
|
} else if (job.status === 'pending') {
|
||||||
|
status = 'Pending';
|
||||||
|
statusClass = 'text-warning';
|
||||||
|
} else if (job.status === "processing") {
|
||||||
|
status = 'Processing';
|
||||||
|
statusClass = 'text-info';
|
||||||
|
}
|
||||||
|
|
||||||
|
row.innerHTML = `
|
||||||
|
<td>
|
||||||
|
<div class="form-check">
|
||||||
|
<input class="form-check-input job-checkbox" type="checkbox" value="${job.id}"
|
||||||
|
${canDelete ? '' : 'disabled'} data-can-delete="${canDelete}">
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td><a href="#" class="text-link view-job" data-id="${job.id}"><small>${job.id.substring(0, 8)}</small></a></td>
|
||||||
|
<td>${job.arrs.join(', ')}</td>
|
||||||
|
<td><small>${formattedDate}</small></td>
|
||||||
|
<td><span class="${statusClass}">${status}</span></td>
|
||||||
|
<td>${totalItems}</td>
|
||||||
|
<td>
|
||||||
|
${job.status === "pending" ?
|
||||||
|
`<button class="btn btn-sm btn-primary process-job" data-id="${job.id}">
|
||||||
|
<i class="bi bi-play-fill"></i> Process
|
||||||
|
</button>` :
|
||||||
|
`<button class="btn btn-sm btn-primary" disabled>
|
||||||
|
<i class="bi bi-eye"></i> Process
|
||||||
|
</button>`
|
||||||
|
}
|
||||||
|
${canDelete ?
|
||||||
|
`<button class="btn btn-sm btn-danger delete-job" data-id="${job.id}">
|
||||||
|
<i class="bi bi-trash"></i>
|
||||||
|
</button>` :
|
||||||
|
`<button class="btn btn-sm btn-danger" disabled>
|
||||||
|
<i class="bi bi-trash"></i>
|
||||||
|
</button>`
|
||||||
|
}
|
||||||
|
</td>
|
||||||
|
`;
|
||||||
|
|
||||||
|
tableBody.appendChild(row);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create pagination
|
||||||
|
if (totalPages > 1) {
|
||||||
|
// Previous button
|
||||||
|
const prevLi = document.createElement('li');
|
||||||
|
prevLi.className = `page-item ${page === 1 ? 'disabled' : ''}`;
|
||||||
|
prevLi.innerHTML = `<a class="page-link" href="#" aria-label="Previous" ${page !== 1 ? `data-page="${page - 1}"` : ''}>
|
||||||
|
<span aria-hidden="true">«</span>
|
||||||
|
</a>`;
|
||||||
|
paginationElement.appendChild(prevLi);
|
||||||
|
|
||||||
|
// Page numbers
|
||||||
|
for (let i = 1; i <= totalPages; i++) {
|
||||||
|
const pageLi = document.createElement('li');
|
||||||
|
pageLi.className = `page-item ${i === page ? 'active' : ''}`;
|
||||||
|
pageLi.innerHTML = `<a class="page-link" href="#" data-page="${i}">${i}</a>`;
|
||||||
|
paginationElement.appendChild(pageLi);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next button
|
||||||
|
const nextLi = document.createElement('li');
|
||||||
|
nextLi.className = `page-item ${page === totalPages ? 'disabled' : ''}`;
|
||||||
|
nextLi.innerHTML = `<a class="page-link" href="#" aria-label="Next" ${page !== totalPages ? `data-page="${page + 1}"` : ''}>
|
||||||
|
<span aria-hidden="true">»</span>
|
||||||
|
</a>`;
|
||||||
|
paginationElement.appendChild(nextLi);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add event listeners to pagination
|
||||||
|
document.querySelectorAll('#jobsPagination a[data-page]').forEach(link => {
|
||||||
|
link.addEventListener('click', (e) => {
|
||||||
|
e.preventDefault();
|
||||||
|
const newPage = parseInt(e.currentTarget.dataset.page);
|
||||||
|
currentPage = newPage;
|
||||||
|
renderJobsTable(newPage);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
document.querySelectorAll('.job-checkbox').forEach(checkbox => {
|
||||||
|
checkbox.addEventListener('change', updateDeleteButtonState);
|
||||||
|
});
|
||||||
|
|
||||||
|
document.querySelectorAll('.delete-job').forEach(button => {
|
||||||
|
button.addEventListener('click', (e) => {
|
||||||
|
const jobId = e.currentTarget.dataset.id;
|
||||||
|
deleteJob(jobId);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add event listeners to action buttons
|
||||||
|
document.querySelectorAll('.process-job').forEach(button => {
|
||||||
|
button.addEventListener('click', (e) => {
|
||||||
|
const jobId = e.currentTarget.dataset.id;
|
||||||
|
processJob(jobId);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
document.querySelectorAll('.view-job').forEach(button => {
|
||||||
|
button.addEventListener('click', (e) => {
|
||||||
|
const jobId = e.currentTarget.dataset.id;
|
||||||
|
viewJobDetails(jobId);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
document.getElementById('selectAllJobs').addEventListener('change', function() {
|
||||||
|
const isChecked = this.checked;
|
||||||
|
document.querySelectorAll('.job-checkbox:not(:disabled)').forEach(checkbox => {
|
||||||
|
checkbox.checked = isChecked;
|
||||||
|
});
|
||||||
|
updateDeleteButtonState();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Function to update delete button state
|
||||||
|
function updateDeleteButtonState() {
|
||||||
|
const deleteBtn = document.getElementById('deleteSelectedJobs');
|
||||||
|
const selectedCheckboxes = document.querySelectorAll('.job-checkbox:checked');
|
||||||
|
deleteBtn.disabled = selectedCheckboxes.length === 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete selected jobs
|
||||||
|
document.getElementById('deleteSelectedJobs').addEventListener('click', async () => {
|
||||||
|
const selectedIds = Array.from(
|
||||||
|
document.querySelectorAll('.job-checkbox:checked')
|
||||||
|
).map(checkbox => checkbox.value);
|
||||||
|
|
||||||
|
if (!selectedIds.length) return;
|
||||||
|
|
||||||
|
if (confirm(`Are you sure you want to delete ${selectedIds.length} job(s)?`)) {
|
||||||
|
await deleteMultipleJobs(selectedIds);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
async function deleteJob(jobId) {
|
||||||
|
if (confirm('Are you sure you want to delete this job?')) {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`/internal/repair/jobs`, {
|
||||||
|
method: 'DELETE',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
},
|
||||||
|
body: JSON.stringify({ ids: [jobId] })
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) throw new Error(await response.text());
|
||||||
|
createToast('Job deleted successfully');
|
||||||
|
await loadJobs(currentPage); // Refresh the jobs list
|
||||||
|
} catch (error) {
|
||||||
|
createToast(`Error deleting job: ${error.message}`, 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function deleteMultipleJobs(jobIds) {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`/internal/repair/jobs`, {
|
||||||
|
method: 'DELETE',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
},
|
||||||
|
body: JSON.stringify({ ids: jobIds })
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) throw new Error(await response.text());
|
||||||
|
createToast(`${jobIds.length} job(s) deleted successfully`);
|
||||||
|
await loadJobs(currentPage); // Refresh the jobs list
|
||||||
|
} catch (error) {
|
||||||
|
createToast(`Error deleting jobs: ${error.message}`, 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process job function
|
||||||
|
async function processJob(jobId) {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`/internal/repair/jobs/${jobId}/process`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) throw new Error(await response.text());
|
||||||
|
createToast('Job processing started successfully');
|
||||||
|
await loadJobs(currentPage); // Refresh the jobs list
|
||||||
|
} catch (error) {
|
||||||
|
createToast(`Error processing job: ${error.message}`, 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// View job details function
|
||||||
|
function viewJobDetails(jobId) {
|
||||||
|
// Find the job
|
||||||
|
const job = allJobs.find(j => j.id === jobId);
|
||||||
|
if (!job) return;
|
||||||
|
|
||||||
|
// Prepare modal data
|
||||||
|
document.getElementById('modalJobId').textContent = job.id.substring(0, 8);
|
||||||
|
|
||||||
|
// Format dates
|
||||||
|
const startedDate = new Date(job.created_at);
|
||||||
|
document.getElementById('modalJobStarted').textContent = startedDate.toLocaleString();
|
||||||
|
|
||||||
|
if (job.finished_at) {
|
||||||
|
const completedDate = new Date(job.finished_at);
|
||||||
|
document.getElementById('modalJobCompleted').textContent = completedDate.toLocaleString();
|
||||||
|
} else {
|
||||||
|
document.getElementById('modalJobCompleted').textContent = 'N/A';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set status with color
|
||||||
|
let status = 'In Progress';
|
||||||
|
let statusClass = 'text-primary';
|
||||||
|
|
||||||
|
if (job.status === 'failed') {
|
||||||
|
status = 'Failed';
|
||||||
|
statusClass = 'text-danger';
|
||||||
|
} else if (job.status === 'completed') {
|
||||||
|
status = 'Completed';
|
||||||
|
statusClass = 'text-success';
|
||||||
|
} else if (job.status === 'pending') {
|
||||||
|
status = 'Pending';
|
||||||
|
statusClass = 'text-warning';
|
||||||
|
} else if (job.status === "processing") {
|
||||||
|
status = 'Processing';
|
||||||
|
statusClass = 'text-info';
|
||||||
|
}
|
||||||
|
|
||||||
|
document.getElementById('modalJobStatus').innerHTML = `<span class="${statusClass}">${status}</span>`;
|
||||||
|
|
||||||
|
// Set other job details
|
||||||
|
document.getElementById('modalJobArrs').textContent = job.arrs.join(', ');
|
||||||
|
document.getElementById('modalJobMediaIds').textContent = job.media_ids && job.media_ids.length > 0 ?
|
||||||
|
job.media_ids.join(', ') : 'All';
|
||||||
|
document.getElementById('modalJobAutoProcess').textContent = job.auto_process ? 'Yes' : 'No';
|
||||||
|
|
||||||
|
// Show/hide error message
|
||||||
|
const errorContainer = document.getElementById('errorContainer');
|
||||||
|
if (job.error) {
|
||||||
|
document.getElementById('modalJobError').textContent = job.error;
|
||||||
|
errorContainer.classList.remove('d-none');
|
||||||
|
} else {
|
||||||
|
errorContainer.classList.add('d-none');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process button visibility
|
||||||
|
const processBtn = document.getElementById('processJobBtn');
|
||||||
|
if (job.status === 'pending') {
|
||||||
|
processBtn.classList.remove('d-none');
|
||||||
|
processBtn.onclick = () => {
|
||||||
|
processJob(job.id);
|
||||||
|
const modal = bootstrap.Modal.getInstance(document.getElementById('jobDetailsModal'));
|
||||||
|
modal.hide();
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
processBtn.classList.add('d-none');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate broken items table
|
||||||
|
const brokenItemsTableBody = document.getElementById('brokenItemsTableBody');
|
||||||
|
const noBrokenItemsMessage = document.getElementById('noBrokenItemsMessage');
|
||||||
|
brokenItemsTableBody.innerHTML = '';
|
||||||
|
|
||||||
|
let hasBrokenItems = false;
|
||||||
|
|
||||||
|
// Check if broken_items exists and has entries
|
||||||
|
if (job.broken_items && Object.entries(job.broken_items).length > 0) {
|
||||||
|
hasBrokenItems = true;
|
||||||
|
|
||||||
|
// Loop through each Arr's broken items
|
||||||
|
for (const [arrName, items] of Object.entries(job.broken_items)) {
|
||||||
|
if (items && items.length > 0) {
|
||||||
|
// Add each item to the table
|
||||||
|
items.forEach(item => {
|
||||||
|
const row = document.createElement('tr');
|
||||||
|
row.innerHTML = `
|
||||||
|
<td>${arrName}</td>
|
||||||
|
<td><small class="text-muted">${item.path}</small></td>
|
||||||
|
`;
|
||||||
|
brokenItemsTableBody.appendChild(row);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show/hide no items message
|
||||||
|
if (hasBrokenItems) {
|
||||||
|
noBrokenItemsMessage.classList.add('d-none');
|
||||||
|
} else {
|
||||||
|
noBrokenItemsMessage.classList.remove('d-none');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show the modal
|
||||||
|
const modal = new bootstrap.Modal(document.getElementById('jobDetailsModal'));
|
||||||
|
modal.show();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add event listener for refresh button
|
||||||
|
document.getElementById('refreshJobs').addEventListener('click', () => {
|
||||||
|
loadJobs(currentPage);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Load jobs on page load
|
||||||
|
loadJobs(1);
|
||||||
});
|
});
|
||||||
</script>
|
</script>
|
||||||
{{ end }}
|
{{ end }}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user