Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e9d3e120f3 | ||
|
|
104df3c33c | ||
|
|
810c9d705e | ||
|
|
4ff00859a3 | ||
|
|
b77dbcc4f4 | ||
|
|
58c0aafab1 | ||
|
|
357da54083 | ||
|
|
88a7196eaf | ||
|
|
abc86a0460 | ||
|
|
dd0b7efdff | ||
|
|
7359f280b0 | ||
|
|
4eb3539347 | ||
|
|
9fb1118475 | ||
|
|
07491b43fe | ||
|
|
8f7c9a19c5 | ||
|
|
a51364d150 | ||
|
|
df2aa4e361 | ||
|
|
b51cb954f8 | ||
|
|
8bdb2e3547 | ||
|
|
2c9a076cd2 | ||
|
|
d2a77620bc | ||
|
|
4b8f1ccfb6 | ||
|
|
f118c5b794 |
52
.air.toml
Normal file
52
.air.toml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
root = "."
|
||||||
|
testdata_dir = "testdata"
|
||||||
|
tmp_dir = "tmp"
|
||||||
|
|
||||||
|
[build]
|
||||||
|
args_bin = []
|
||||||
|
bin = "./tmp/main"
|
||||||
|
cmd = "go build -o ./tmp/main ."
|
||||||
|
delay = 1000
|
||||||
|
exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"]
|
||||||
|
exclude_file = []
|
||||||
|
exclude_regex = ["_test.go"]
|
||||||
|
exclude_unchanged = false
|
||||||
|
follow_symlink = false
|
||||||
|
full_bin = ""
|
||||||
|
include_dir = []
|
||||||
|
include_ext = ["go", "tpl", "tmpl", "html", ".json"]
|
||||||
|
include_file = []
|
||||||
|
kill_delay = "0s"
|
||||||
|
log = "build-errors.log"
|
||||||
|
poll = false
|
||||||
|
poll_interval = 0
|
||||||
|
post_cmd = []
|
||||||
|
pre_cmd = []
|
||||||
|
rerun = false
|
||||||
|
rerun_delay = 500
|
||||||
|
send_interrupt = false
|
||||||
|
stop_on_error = false
|
||||||
|
|
||||||
|
[color]
|
||||||
|
app = ""
|
||||||
|
build = "yellow"
|
||||||
|
main = "magenta"
|
||||||
|
runner = "green"
|
||||||
|
watcher = "cyan"
|
||||||
|
|
||||||
|
[log]
|
||||||
|
main_only = false
|
||||||
|
silent = false
|
||||||
|
time = false
|
||||||
|
|
||||||
|
[misc]
|
||||||
|
clean_on_exit = false
|
||||||
|
|
||||||
|
[proxy]
|
||||||
|
app_port = 0
|
||||||
|
enabled = false
|
||||||
|
proxy_port = 0
|
||||||
|
|
||||||
|
[screen]
|
||||||
|
clear_on_rebuild = false
|
||||||
|
keep_scroll = true
|
||||||
@@ -5,4 +5,5 @@ docker-compose.yml
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
**/.idea/
|
**/.idea/
|
||||||
*.magnet
|
*.magnet
|
||||||
**.torrent
|
**.torrent
|
||||||
|
|
||||||
|
|||||||
54
.github/workflows/docker.yml
vendored
Normal file
54
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
name: Docker Build and Push
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- beta
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Get version
|
||||||
|
id: get_version
|
||||||
|
run: |
|
||||||
|
LATEST_TAG=$(git tag | sort -V | tail -n1)
|
||||||
|
echo "latest_tag=${LATEST_TAG}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push for beta branch
|
||||||
|
if: github.ref == 'refs/heads/beta'
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
|
push: true
|
||||||
|
tags: cy01/blackhole:beta
|
||||||
|
|
||||||
|
- name: Build and push for main branch
|
||||||
|
if: github.ref == 'refs/heads/main'
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
cy01/blackhole:latest
|
||||||
|
cy01/blackhole:${{ env.latest_tag }}
|
||||||
32
.github/workflows/release.yml
vendored
Normal file
32
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
goreleaser:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: '1.22'
|
||||||
|
|
||||||
|
- name: Run GoReleaser
|
||||||
|
uses: goreleaser/goreleaser-action@v5
|
||||||
|
with:
|
||||||
|
distribution: goreleaser
|
||||||
|
version: latest
|
||||||
|
args: release --clean
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,3 +9,4 @@ docker-compose.yml
|
|||||||
*.log
|
*.log
|
||||||
*.log.*
|
*.log.*
|
||||||
dist/
|
dist/
|
||||||
|
tmp/**
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
version: 2
|
version: 1
|
||||||
|
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
|
|||||||
40
CHANGELOG.md
40
CHANGELOG.md
@@ -67,4 +67,42 @@
|
|||||||
- Add file download support(Sequential Download)
|
- Add file download support(Sequential Download)
|
||||||
- Fix http handler error
|
- Fix http handler error
|
||||||
- Fix *arrs map failing concurrently
|
- Fix *arrs map failing concurrently
|
||||||
- Fix cache not being updated
|
- Fix cache not being updated
|
||||||
|
|
||||||
|
#### 0.2.5
|
||||||
|
- Fix ContentPath not being set prior
|
||||||
|
- Rewrote Readme
|
||||||
|
- Cleaned up the code
|
||||||
|
|
||||||
|
#### 0.2.6
|
||||||
|
- Delete torrent for empty matched files
|
||||||
|
- Update Readme
|
||||||
|
|
||||||
|
#### 0.2.7
|
||||||
|
|
||||||
|
- Add support for multiple debrid providers
|
||||||
|
- Add Torbox support
|
||||||
|
- Add support for configurable debrid cache checks
|
||||||
|
- Add support for configurable debrid download uncached torrents
|
||||||
|
|
||||||
|
#### 0.3.0
|
||||||
|
|
||||||
|
- Add UI for adding torrents
|
||||||
|
- Refraction of the code
|
||||||
|
- -Fix Torbox bug
|
||||||
|
- Update CI/CD
|
||||||
|
- Update Readme
|
||||||
|
|
||||||
|
#### 0.3.1
|
||||||
|
|
||||||
|
- Add DebridLink Support
|
||||||
|
- Refactor error handling
|
||||||
|
|
||||||
|
#### 0.3.2
|
||||||
|
|
||||||
|
- Fix DebridLink not downloading
|
||||||
|
- Fix Torbox with uncached torrents
|
||||||
|
- Add new /internal/cached endpoint to check if an hash is cached
|
||||||
|
- implement per-debrid local cache
|
||||||
|
- Fix file check for torbox
|
||||||
|
- Other minor bug fixes
|
||||||
@@ -20,8 +20,11 @@ RUN CGO_ENABLED=0 GOOS=$(echo $TARGETPLATFORM | cut -d '/' -f1) GOARCH=$(echo $T
|
|||||||
FROM scratch
|
FROM scratch
|
||||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||||
COPY --from=builder /blackhole /blackhole
|
COPY --from=builder /blackhole /blackhole
|
||||||
|
COPY --from=builder /app/README.md /README.md
|
||||||
|
|
||||||
EXPOSE 8181
|
EXPOSE 8181
|
||||||
|
|
||||||
|
VOLUME ["/app"]
|
||||||
|
|
||||||
# Run
|
# Run
|
||||||
CMD ["/blackhole", "--config", "/app/config.json"]
|
CMD ["/blackhole", "--config", "/app/config.json"]
|
||||||
|
|||||||
84
README.md
84
README.md
@@ -1,13 +1,25 @@
|
|||||||
### GoBlackHole(with Debrid Proxy Support)
|
### GoBlackHole(with Debrid Proxy Support)
|
||||||
|
|
||||||
This is a Golang implementation go Torrent QbitTorrent with a **Real Debrid Proxy Support**.
|
This is a Golang implementation go Torrent QbitTorrent with a **Real Debrid & Torbox Support**.
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
#### Uses
|
|
||||||
- Mock Qbittorent API that supports the Arrs(Sonarr, Radarr, etc)
|
- Mock Qbittorent API that supports the Arrs(Sonarr, Radarr, etc)
|
||||||
- Proxy support for the Arrs
|
- Proxy support for the Arrs
|
||||||
|
- Real Debrid Support
|
||||||
|
- Torbox Support
|
||||||
|
- Debrid Link Support
|
||||||
|
- Multi-Debrid Providers support
|
||||||
|
- UI for adding torrents directly to *arrs
|
||||||
|
|
||||||
The proxy is useful in filtering out un-cached Real Debrid torrents
|
The proxy is useful in filtering out un-cached Real Debrid torrents
|
||||||
|
|
||||||
|
### Supported Debrid Providers
|
||||||
|
- Real Debrid
|
||||||
|
- Torbox
|
||||||
|
- Debrid Link
|
||||||
|
|
||||||
### Changelog
|
### Changelog
|
||||||
|
|
||||||
- View the [CHANGELOG.md](CHANGELOG.md) for the latest changes
|
- View the [CHANGELOG.md](CHANGELOG.md) for the latest changes
|
||||||
@@ -37,6 +49,8 @@ services:
|
|||||||
- QBIT_PORT=8282 # qBittorrent Port. This is optional. You can set this in the config file
|
- QBIT_PORT=8282 # qBittorrent Port. This is optional. You can set this in the config file
|
||||||
- PORT=8181 # Proxy Port. This is optional. You can set this in the config file
|
- PORT=8181 # Proxy Port. This is optional. You can set this in the config file
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
- rclone # If you are using rclone with docker
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -50,16 +64,38 @@ Download the binary from the releases page and run it with the config file.
|
|||||||
#### Config
|
#### Config
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"debrid": {
|
"debrids": [
|
||||||
"name": "realdebrid",
|
{
|
||||||
"host": "https://api.real-debrid.com/rest/1.0",
|
"name": "torbox",
|
||||||
"api_key": "realdebrid_api_key",
|
"host": "https://api.torbox.app/v1",
|
||||||
"folder": "data/realdebrid/torrents/",
|
"api_key": "torbox_api_key",
|
||||||
"rate_limit": "250/minute"
|
"folder": "data/torbox/torrents/",
|
||||||
},
|
"rate_limit": "250/minute",
|
||||||
|
"download_uncached": false,
|
||||||
|
"check_cached": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "realdebrid",
|
||||||
|
"host": "https://api.real-debrid.com/rest/1.0",
|
||||||
|
"api_key": "realdebrid_key",
|
||||||
|
"folder": "data/realdebrid/torrents/",
|
||||||
|
"rate_limit": "250/minute",
|
||||||
|
"download_uncached": false,
|
||||||
|
"check_cached": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "debridlink",
|
||||||
|
"host": "https://debrid-link.com/api/v2",
|
||||||
|
"api_key": "debridlink_key",
|
||||||
|
"folder": "data/debridlink/torrents/",
|
||||||
|
"rate_limit": "250/minute",
|
||||||
|
"download_uncached": false,
|
||||||
|
"check_cached": false
|
||||||
|
}
|
||||||
|
],
|
||||||
"proxy": {
|
"proxy": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"port": "8181",
|
"port": "8100",
|
||||||
"debug": false,
|
"debug": false,
|
||||||
"username": "username",
|
"username": "username",
|
||||||
"password": "password",
|
"password": "password",
|
||||||
@@ -68,18 +104,28 @@ Download the binary from the releases page and run it with the config file.
|
|||||||
"max_cache_size": 1000,
|
"max_cache_size": 1000,
|
||||||
"qbittorrent": {
|
"qbittorrent": {
|
||||||
"port": "8282",
|
"port": "8282",
|
||||||
"username": "admin", // deprecated
|
|
||||||
"password": "admin", // deprecated
|
|
||||||
"download_folder": "/media/symlinks/",
|
"download_folder": "/media/symlinks/",
|
||||||
"categories": ["sonarr", "radarr"],
|
"categories": ["sonarr", "radarr"],
|
||||||
"refresh_interval": 5 // in seconds
|
"refresh_interval": 5
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Config Notes
|
#### Config Notes
|
||||||
|
##### Max Cache Size
|
||||||
|
- The `max_cache_size` key is used to set the maximum number of infohashes that can be stored in the availability cache. This is used to prevent round trip to the debrid provider when using the proxy/Qbittorrent
|
||||||
|
- The default value is `1000`
|
||||||
|
- The cache is stored in memory and is not persisted on restart
|
||||||
|
|
||||||
##### Debrid Config
|
##### Debrid Config
|
||||||
- This config key is important as it's used for both Blackhole and Proxy
|
- The `debrids` key is an array of debrid providers
|
||||||
|
- The `name` key is the name of the debrid provider
|
||||||
|
- The `host` key is the API endpoint of the debrid provider
|
||||||
|
- The `api_key` key is the API key of the debrid provider
|
||||||
|
- The `folder` key is the folder where the torrents will be downloaded. e.g `data/realdebrid/torrents/`
|
||||||
|
- The `rate_limit` key is the rate limit of the debrid provider(null by default)
|
||||||
|
- The `download_uncached` bool key is used to download uncached torrents(disabled by default)
|
||||||
|
- The `check_cached` bool key is used to check if the torrent is cached(disabled by default)
|
||||||
|
|
||||||
##### Proxy Config
|
##### Proxy Config
|
||||||
- The `enabled` key is used to enable the proxy
|
- The `enabled` key is used to enable the proxy
|
||||||
@@ -93,6 +139,7 @@ Download the binary from the releases page and run it with the config file.
|
|||||||
- The `port` key is the port the qBittorrent will listen on
|
- The `port` key is the port the qBittorrent will listen on
|
||||||
- The `download_folder` is the folder where the torrents will be downloaded. e.g `/media/symlinks/`
|
- The `download_folder` is the folder where the torrents will be downloaded. e.g `/media/symlinks/`
|
||||||
- The `categories` key is used to filter out torrents based on the category. e.g `sonarr`, `radarr`
|
- The `categories` key is used to filter out torrents based on the category. e.g `sonarr`, `radarr`
|
||||||
|
- The `refresh_interval` key is used to set the interval in minutes to refresh the Arrs Monitored Downloads(it's in seconds). The default value is `5` seconds
|
||||||
|
|
||||||
### Proxy
|
### Proxy
|
||||||
|
|
||||||
@@ -128,14 +175,17 @@ Setting Up Qbittorrent in Arr
|
|||||||
- Test
|
- Test
|
||||||
- Save
|
- Save
|
||||||
|
|
||||||
|
### UI for adding torrents
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
The UI is a simple web interface that allows you to add torrents directly to the Arrs(Sonarr, Radarr, etc)
|
||||||
|
|
||||||
### TODO
|
### TODO
|
||||||
- [ ] A proper name!!!!
|
- [ ] A proper name!!!!
|
||||||
- [ ] Debrid
|
- [ ] Debrid
|
||||||
- [ ] Add more Debrid Providers
|
- [ ] Add more Debrid Providers
|
||||||
|
|
||||||
- [ ] Proxy
|
|
||||||
- [ ] Add more Proxy features
|
|
||||||
|
|
||||||
- [ ] Qbittorrent
|
- [ ] Qbittorrent
|
||||||
- [ ] Add more Qbittorrent features
|
- [ ] Add more Qbittorrent features
|
||||||
- [ ] Persist torrents on restart/server crash
|
- [ ] Persist torrents on restart/server crash
|
||||||
|
|||||||
30
cmd/main.go
30
cmd/main.go
@@ -2,6 +2,7 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"cmp"
|
"cmp"
|
||||||
|
"context"
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
"goBlack/pkg/debrid"
|
"goBlack/pkg/debrid"
|
||||||
"goBlack/pkg/proxy"
|
"goBlack/pkg/proxy"
|
||||||
@@ -9,32 +10,43 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Start(config *common.Config) {
|
func Start(ctx context.Context, config *common.Config) error {
|
||||||
maxCacheSize := cmp.Or(config.MaxCacheSize, 1000)
|
maxCacheSize := cmp.Or(config.MaxCacheSize, 1000)
|
||||||
cache := common.NewCache(maxCacheSize)
|
|
||||||
|
|
||||||
deb := debrid.NewDebrid(config.Debrid, cache)
|
deb := debrid.NewDebrid(config.Debrids, maxCacheSize)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
errChan := make(chan error, 2)
|
||||||
|
|
||||||
if config.Proxy.Enabled {
|
if config.Proxy.Enabled {
|
||||||
p := proxy.NewProxy(*config, deb, cache)
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
p.Start()
|
if err := proxy.NewProxy(*config, deb).Start(ctx); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
if config.QBitTorrent.Port != "" {
|
if config.QBitTorrent.Port != "" {
|
||||||
qb := qbit.NewQBit(config, deb, cache)
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
qb.Start()
|
if err := qbit.Start(ctx, config, deb); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait indefinitely
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
close(errChan)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for context cancellation or completion or error
|
||||||
|
select {
|
||||||
|
case err := <-errChan:
|
||||||
|
return err
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,14 +40,16 @@ func (c *Cache) AddMultiple(values map[string]bool) {
|
|||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
for value := range values {
|
for value, exists := range values {
|
||||||
if _, exists := c.data[value]; !exists {
|
if !exists {
|
||||||
if len(c.order) >= c.maxItems {
|
if _, exists := c.data[value]; !exists {
|
||||||
delete(c.data, c.order[0])
|
if len(c.order) >= c.maxItems {
|
||||||
c.order = c.order[1:]
|
delete(c.data, c.order[0])
|
||||||
|
c.order = c.order[1:]
|
||||||
|
}
|
||||||
|
c.data[value] = struct{}{}
|
||||||
|
c.order = append(c.order, value)
|
||||||
}
|
}
|
||||||
c.data[value] = struct{}{}
|
|
||||||
c.order = append(c.order, value)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,8 +2,11 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DebridConfig struct {
|
type DebridConfig struct {
|
||||||
@@ -12,6 +15,7 @@ type DebridConfig struct {
|
|||||||
APIKey string `json:"api_key"`
|
APIKey string `json:"api_key"`
|
||||||
Folder string `json:"folder"`
|
Folder string `json:"folder"`
|
||||||
DownloadUncached bool `json:"download_uncached"`
|
DownloadUncached bool `json:"download_uncached"`
|
||||||
|
CheckCached bool `json:"check_cached"`
|
||||||
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
|
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,11 +40,88 @@ type QBitTorrentConfig struct {
|
|||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Debrid DebridConfig `json:"debrid"`
|
Debrid DebridConfig `json:"debrid"`
|
||||||
|
Debrids []DebridConfig `json:"debrids"`
|
||||||
Proxy ProxyConfig `json:"proxy"`
|
Proxy ProxyConfig `json:"proxy"`
|
||||||
MaxCacheSize int `json:"max_cache_size"`
|
MaxCacheSize int `json:"max_cache_size"`
|
||||||
QBitTorrent QBitTorrentConfig `json:"qbittorrent"`
|
QBitTorrent QBitTorrentConfig `json:"qbittorrent"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateDebrids(debrids []DebridConfig) error {
|
||||||
|
if len(debrids) == 0 {
|
||||||
|
return errors.New("no debrids configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
errChan := make(chan error, len(debrids))
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
for _, debrid := range debrids {
|
||||||
|
// Basic field validation
|
||||||
|
if debrid.Host == "" {
|
||||||
|
return errors.New("debrid host is required")
|
||||||
|
}
|
||||||
|
if debrid.APIKey == "" {
|
||||||
|
return errors.New("debrid api key is required")
|
||||||
|
}
|
||||||
|
if debrid.Folder == "" {
|
||||||
|
return errors.New("debrid folder is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check folder existence concurrently
|
||||||
|
wg.Add(1)
|
||||||
|
go func(folder string) {
|
||||||
|
defer wg.Done()
|
||||||
|
if _, err := os.Stat(folder); os.IsNotExist(err) {
|
||||||
|
errChan <- fmt.Errorf("debrid folder does not exist: %s", folder)
|
||||||
|
}
|
||||||
|
}(debrid.Folder)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all checks to complete
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(errChan)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Return first error if any
|
||||||
|
if err := <-errChan; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateQbitTorrent(config *QBitTorrentConfig) error {
|
||||||
|
if config.DownloadFolder == "" {
|
||||||
|
return errors.New("qbittorent download folder is required")
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
|
||||||
|
return errors.New("qbittorent download folder does not exist")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateConfig(config *Config) error {
|
||||||
|
// Run validations concurrently
|
||||||
|
errChan := make(chan error, 2)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
errChan <- validateDebrids(config.Debrids)
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
errChan <- validateQbitTorrent(&config.QBitTorrent)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Check for errors
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if err := <-errChan; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func LoadConfig(path string) (*Config, error) {
|
func LoadConfig(path string) (*Config, error) {
|
||||||
// Load the config file
|
// Load the config file
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
@@ -60,10 +141,15 @@ func LoadConfig(path string) (*Config, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if config.Proxy.CachedOnly == nil {
|
|
||||||
config.Proxy.CachedOnly = new(bool)
|
if config.Debrid.Name != "" {
|
||||||
*config.Proxy.CachedOnly = true
|
config.Debrids = append(config.Debrids, config.Debrid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate the config
|
||||||
|
//if err := validateConfig(config); err != nil {
|
||||||
|
// return nil, err
|
||||||
|
//}
|
||||||
|
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|||||||
14
common/logger.go
Normal file
14
common/logger.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewLogger(prefix string, output *os.File) *log.Logger {
|
||||||
|
f := fmt.Sprintf("[%s] ", prefix)
|
||||||
|
return log.New(output, f, log.LstdFlags)
|
||||||
|
}
|
||||||
|
|
||||||
|
var Logger = NewLogger("Main", os.Stdout)
|
||||||
@@ -2,6 +2,7 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
"io"
|
"io"
|
||||||
@@ -60,11 +61,7 @@ func (c *RLHTTPClient) Do(req *http.Request) (*http.Response, error) {
|
|||||||
return resp, fmt.Errorf("max retries exceeded")
|
return resp, fmt.Errorf("max retries exceeded")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *RLHTTPClient) MakeRequest(method string, url string, body io.Reader) ([]byte, error) {
|
func (c *RLHTTPClient) MakeRequest(req *http.Request) ([]byte, error) {
|
||||||
req, err := http.NewRequest(method, url, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if c.Headers != nil {
|
if c.Headers != nil {
|
||||||
for key, value := range c.Headers {
|
for key, value := range c.Headers {
|
||||||
req.Header.Set(key, value)
|
req.Header.Set(key, value)
|
||||||
@@ -75,9 +72,12 @@ func (c *RLHTTPClient) MakeRequest(method string, url string, body io.Reader) ([
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
b, _ := io.ReadAll(res.Body)
|
||||||
statusOk := strconv.Itoa(res.StatusCode)[0] == '2'
|
statusOk := strconv.Itoa(res.StatusCode)[0] == '2'
|
||||||
if !statusOk {
|
if !statusOk {
|
||||||
return nil, fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
// Add status code error to the body
|
||||||
|
b = append(b, []byte(fmt.Sprintf("\nstatus code: %d", res.StatusCode))...)
|
||||||
|
return nil, fmt.Errorf(string(b))
|
||||||
}
|
}
|
||||||
defer func(Body io.ReadCloser) {
|
defer func(Body io.ReadCloser) {
|
||||||
err := Body.Close()
|
err := Body.Close()
|
||||||
@@ -85,7 +85,7 @@ func (c *RLHTTPClient) MakeRequest(method string, url string, body io.Reader) ([
|
|||||||
log.Println(err)
|
log.Println(err)
|
||||||
}
|
}
|
||||||
}(res.Body)
|
}(res.Body)
|
||||||
return io.ReadAll(res.Body)
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRLHTTPClient(rl *rate.Limiter, headers map[string]string) *RLHTTPClient {
|
func NewRLHTTPClient(rl *rate.Limiter, headers map[string]string) *RLHTTPClient {
|
||||||
@@ -128,3 +128,9 @@ func ParseRateLimit(rateStr string) *rate.Limiter {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(code)
|
||||||
|
json.NewEncoder(w).Encode(data)
|
||||||
|
}
|
||||||
|
|||||||
@@ -209,11 +209,6 @@ func processInfoHash(input string) (string, error) {
|
|||||||
return "", fmt.Errorf("invalid infohash: %s", input)
|
return "", fmt.Errorf("invalid infohash: %s", input)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLogger(prefix string, output *os.File) *log.Logger {
|
|
||||||
f := fmt.Sprintf("[%s] ", prefix)
|
|
||||||
return log.New(output, f, log.LstdFlags)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetInfohashFromURL(url string) (string, error) {
|
func GetInfohashFromURL(url string) (string, error) {
|
||||||
// Download the torrent file
|
// Download the torrent file
|
||||||
var magnetLink string
|
var magnetLink string
|
||||||
|
|||||||
BIN
doc/ui.png
Normal file
BIN
doc/ui.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 124 KiB |
6
main.go
6
main.go
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"goBlack/cmd"
|
"goBlack/cmd"
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
@@ -17,6 +18,9 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
cmd.Start(conf)
|
ctx := context.Background()
|
||||||
|
if err := cmd.Start(ctx, conf); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
120
pkg/arr/arr.go
Normal file
120
pkg/arr/arr.go
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"goBlack/common"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Type is a type of arr
|
||||||
|
type Type string
|
||||||
|
|
||||||
|
const (
|
||||||
|
Sonarr Type = "sonarr"
|
||||||
|
Radarr Type = "radarr"
|
||||||
|
Lidarr Type = "lidarr"
|
||||||
|
Readarr Type = "readarr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
client *common.RLHTTPClient = common.NewRLHTTPClient(nil, nil)
|
||||||
|
logger *log.Logger = common.NewLogger("QBit", os.Stdout)
|
||||||
|
)
|
||||||
|
|
||||||
|
type Arr struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
Token string `json:"token"`
|
||||||
|
Type Type `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewArr(name, host, token string, arrType Type) *Arr {
|
||||||
|
return &Arr{
|
||||||
|
Name: name,
|
||||||
|
Host: host,
|
||||||
|
Token: token,
|
||||||
|
Type: arrType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Response, error) {
|
||||||
|
if a.Token == "" || a.Host == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
url, err := common.JoinURL(a.Host, endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var jsonPayload []byte
|
||||||
|
|
||||||
|
if payload != nil {
|
||||||
|
jsonPayload, err = json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(method, url, bytes.NewBuffer(jsonPayload))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("X-Api-Key", a.Token)
|
||||||
|
return client.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Storage struct {
|
||||||
|
Arrs map[string]*Arr // name -> arr
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func inferType(host, name string) Type {
|
||||||
|
switch {
|
||||||
|
case strings.Contains(host, "sonarr") || strings.Contains(name, "sonarr"):
|
||||||
|
return Sonarr
|
||||||
|
case strings.Contains(host, "radarr") || strings.Contains(name, "radarr"):
|
||||||
|
return Radarr
|
||||||
|
case strings.Contains(host, "lidarr") || strings.Contains(name, "lidarr"):
|
||||||
|
return Lidarr
|
||||||
|
case strings.Contains(host, "readarr") || strings.Contains(name, "readarr"):
|
||||||
|
return Readarr
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStorage() *Storage {
|
||||||
|
arrs := make(map[string]*Arr)
|
||||||
|
//for name, arrCfg := range cfg {
|
||||||
|
// arrs[name] = NewArr(name, arrCfg.Host, arrCfg.Token, inferType(arrCfg.Host, name))
|
||||||
|
//}
|
||||||
|
return &Storage{
|
||||||
|
Arrs: arrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *Storage) AddOrUpdate(arr *Arr) {
|
||||||
|
as.mu.Lock()
|
||||||
|
defer as.mu.Unlock()
|
||||||
|
as.Arrs[arr.Host] = arr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *Storage) Get(name string) *Arr {
|
||||||
|
as.mu.RLock()
|
||||||
|
defer as.mu.RUnlock()
|
||||||
|
return as.Arrs[name]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *Storage) GetAll() []*Arr {
|
||||||
|
as.mu.RLock()
|
||||||
|
defer as.mu.RUnlock()
|
||||||
|
arrs := make([]*Arr, 0, len(as.Arrs))
|
||||||
|
for _, arr := range as.Arrs {
|
||||||
|
arrs = append(arrs, arr)
|
||||||
|
}
|
||||||
|
return arrs
|
||||||
|
}
|
||||||
29
pkg/arr/content.go
Normal file
29
pkg/arr/content.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ContentRequest struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Title string `json:"name"`
|
||||||
|
Arr string `json:"arr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) GetContents() *ContentRequest {
|
||||||
|
resp, err := a.Request(http.MethodGet, "api/v3/series", nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var data *ContentRequest
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fmt.Printf("Data: %v\n", data)
|
||||||
|
//data.Arr = a.Name
|
||||||
|
return data
|
||||||
|
}
|
||||||
41
pkg/arr/history.go
Normal file
41
pkg/arr/history.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
gourl "net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HistorySchema struct {
|
||||||
|
Page int `json:"page"`
|
||||||
|
PageSize int `json:"pageSize"`
|
||||||
|
SortKey string `json:"sortKey"`
|
||||||
|
SortDirection string `json:"sortDirection"`
|
||||||
|
TotalRecords int `json:"totalRecords"`
|
||||||
|
Records []struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
DownloadID string `json:"downloadId"`
|
||||||
|
} `json:"records"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) GetHistory(downloadId, eventType string) *HistorySchema {
|
||||||
|
query := gourl.Values{}
|
||||||
|
if downloadId != "" {
|
||||||
|
query.Add("downloadId", downloadId)
|
||||||
|
}
|
||||||
|
query.Add("eventType", eventType)
|
||||||
|
query.Add("pageSize", "100")
|
||||||
|
url := "history" + "?" + query.Encode()
|
||||||
|
resp, err := a.Request(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var data *HistorySchema
|
||||||
|
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
|
||||||
|
}
|
||||||
209
pkg/arr/import.go
Normal file
209
pkg/arr/import.go
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
gourl "net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ImportResponseSchema struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
RelativePath string `json:"relativePath"`
|
||||||
|
FolderName string `json:"folderName"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
Series struct {
|
||||||
|
Title string `json:"title"`
|
||||||
|
SortTitle string `json:"sortTitle"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Ended bool `json:"ended"`
|
||||||
|
Overview string `json:"overview"`
|
||||||
|
Network string `json:"network"`
|
||||||
|
AirTime string `json:"airTime"`
|
||||||
|
Images []struct {
|
||||||
|
CoverType string `json:"coverType"`
|
||||||
|
RemoteUrl string `json:"remoteUrl"`
|
||||||
|
} `json:"images"`
|
||||||
|
OriginalLanguage struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"originalLanguage"`
|
||||||
|
Seasons []struct {
|
||||||
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
|
Monitored bool `json:"monitored"`
|
||||||
|
} `json:"seasons"`
|
||||||
|
Year int `json:"year"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
QualityProfileId int `json:"qualityProfileId"`
|
||||||
|
SeasonFolder bool `json:"seasonFolder"`
|
||||||
|
Monitored bool `json:"monitored"`
|
||||||
|
MonitorNewItems string `json:"monitorNewItems"`
|
||||||
|
UseSceneNumbering bool `json:"useSceneNumbering"`
|
||||||
|
Runtime int `json:"runtime"`
|
||||||
|
TvdbId int `json:"tvdbId"`
|
||||||
|
TvRageId int `json:"tvRageId"`
|
||||||
|
TvMazeId int `json:"tvMazeId"`
|
||||||
|
TmdbId int `json:"tmdbId"`
|
||||||
|
FirstAired time.Time `json:"firstAired"`
|
||||||
|
LastAired time.Time `json:"lastAired"`
|
||||||
|
SeriesType string `json:"seriesType"`
|
||||||
|
CleanTitle string `json:"cleanTitle"`
|
||||||
|
ImdbId string `json:"imdbId"`
|
||||||
|
TitleSlug string `json:"titleSlug"`
|
||||||
|
Certification string `json:"certification"`
|
||||||
|
Genres []string `json:"genres"`
|
||||||
|
Tags []interface{} `json:"tags"`
|
||||||
|
Added time.Time `json:"added"`
|
||||||
|
Ratings struct {
|
||||||
|
Votes int `json:"votes"`
|
||||||
|
Value float64 `json:"value"`
|
||||||
|
} `json:"ratings"`
|
||||||
|
LanguageProfileId int `json:"languageProfileId"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
} `json:"series"`
|
||||||
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
|
Episodes []struct {
|
||||||
|
SeriesId int `json:"seriesId"`
|
||||||
|
TvdbId int `json:"tvdbId"`
|
||||||
|
EpisodeFileId int `json:"episodeFileId"`
|
||||||
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
|
EpisodeNumber int `json:"episodeNumber"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
AirDate string `json:"airDate"`
|
||||||
|
AirDateUtc time.Time `json:"airDateUtc"`
|
||||||
|
Runtime int `json:"runtime"`
|
||||||
|
Overview string `json:"overview"`
|
||||||
|
HasFile bool `json:"hasFile"`
|
||||||
|
Monitored bool `json:"monitored"`
|
||||||
|
AbsoluteEpisodeNumber int `json:"absoluteEpisodeNumber"`
|
||||||
|
UnverifiedSceneNumbering bool `json:"unverifiedSceneNumbering"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
FinaleType string `json:"finaleType,omitempty"`
|
||||||
|
} `json:"episodes"`
|
||||||
|
ReleaseGroup string `json:"releaseGroup"`
|
||||||
|
Quality struct {
|
||||||
|
Quality struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
Resolution int `json:"resolution"`
|
||||||
|
} `json:"quality"`
|
||||||
|
Revision struct {
|
||||||
|
Version int `json:"version"`
|
||||||
|
Real int `json:"real"`
|
||||||
|
IsRepack bool `json:"isRepack"`
|
||||||
|
} `json:"revision"`
|
||||||
|
} `json:"quality"`
|
||||||
|
Languages []struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"languages"`
|
||||||
|
QualityWeight int `json:"qualityWeight"`
|
||||||
|
CustomFormats []interface{} `json:"customFormats"`
|
||||||
|
CustomFormatScore int `json:"customFormatScore"`
|
||||||
|
IndexerFlags int `json:"indexerFlags"`
|
||||||
|
ReleaseType string `json:"releaseType"`
|
||||||
|
Rejections []struct {
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
} `json:"rejections"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ManualImportRequestFile struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
SeriesId int `json:"seriesId"`
|
||||||
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
|
EpisodeIds []int `json:"episodeIds"`
|
||||||
|
Quality struct {
|
||||||
|
Quality struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
Resolution int `json:"resolution"`
|
||||||
|
} `json:"quality"`
|
||||||
|
Revision struct {
|
||||||
|
Version int `json:"version"`
|
||||||
|
Real int `json:"real"`
|
||||||
|
IsRepack bool `json:"isRepack"`
|
||||||
|
} `json:"revision"`
|
||||||
|
} `json:"quality"`
|
||||||
|
Languages []struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"languages"`
|
||||||
|
ReleaseGroup string `json:"releaseGroup"`
|
||||||
|
CustomFormats []interface{} `json:"customFormats"`
|
||||||
|
CustomFormatScore int `json:"customFormatScore"`
|
||||||
|
IndexerFlags int `json:"indexerFlags"`
|
||||||
|
ReleaseType string `json:"releaseType"`
|
||||||
|
Rejections []struct {
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
} `json:"rejections"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ManualImportRequestSchema struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Files []ManualImportRequestFile `json:"files"`
|
||||||
|
ImportMode string `json:"importMode"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, error) {
|
||||||
|
query := gourl.Values{}
|
||||||
|
query.Add("folder", path)
|
||||||
|
if seriesId != 0 {
|
||||||
|
query.Add("seriesId", strconv.Itoa(seriesId))
|
||||||
|
}
|
||||||
|
url := "api/v3/manualimport" + "?" + query.Encode()
|
||||||
|
resp, err := a.Request(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to import, invalid file: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var data []ImportResponseSchema
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var files []ManualImportRequestFile
|
||||||
|
for _, d := range data {
|
||||||
|
episodesIds := []int{}
|
||||||
|
for _, e := range d.Episodes {
|
||||||
|
episodesIds = append(episodesIds, e.Id)
|
||||||
|
}
|
||||||
|
file := ManualImportRequestFile{
|
||||||
|
Path: d.Path,
|
||||||
|
SeriesId: d.Series.Id,
|
||||||
|
SeasonNumber: d.SeasonNumber,
|
||||||
|
EpisodeIds: episodesIds,
|
||||||
|
Quality: d.Quality,
|
||||||
|
Languages: d.Languages,
|
||||||
|
ReleaseGroup: d.ReleaseGroup,
|
||||||
|
CustomFormats: d.CustomFormats,
|
||||||
|
CustomFormatScore: d.CustomFormatScore,
|
||||||
|
IndexerFlags: d.IndexerFlags,
|
||||||
|
ReleaseType: d.ReleaseType,
|
||||||
|
Rejections: d.Rejections,
|
||||||
|
}
|
||||||
|
files = append(files, file)
|
||||||
|
}
|
||||||
|
request := ManualImportRequestSchema{
|
||||||
|
Name: "ManualImport",
|
||||||
|
Files: files,
|
||||||
|
ImportMode: "copy",
|
||||||
|
}
|
||||||
|
|
||||||
|
url = "api/v3/command"
|
||||||
|
resp, err = a.Request(http.MethodPost, url, request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to import: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
return resp.Body, nil
|
||||||
|
|
||||||
|
}
|
||||||
54
pkg/arr/refresh.go
Normal file
54
pkg/arr/refresh.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"fmt"
|
||||||
|
"goBlack/common"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (a *Arr) Refresh() error {
|
||||||
|
payload := map[string]string{"name": "RefreshMonitoredDownloads"}
|
||||||
|
|
||||||
|
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
||||||
|
if err == nil && resp != nil {
|
||||||
|
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
|
||||||
|
if statusOk {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("failed to refresh monitored downloads for %s", cmp.Or(a.Name, a.Host))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) MarkAsFailed(infoHash string) error {
|
||||||
|
downloadId := strings.ToUpper(infoHash)
|
||||||
|
history := a.GetHistory(downloadId, "grabbed")
|
||||||
|
if history == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
torrentId := 0
|
||||||
|
for _, record := range history.Records {
|
||||||
|
if strings.EqualFold(record.DownloadID, downloadId) {
|
||||||
|
torrentId = record.ID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if torrentId != 0 {
|
||||||
|
url, err := common.JoinURL(a.Host, "history/failed/", strconv.Itoa(torrentId))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
client := &http.Client{}
|
||||||
|
_, err = client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("failed to mark %s as failed: %v", cmp.Or(a.Name, a.Host), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
31
pkg/arr/tmdb.go
Normal file
31
pkg/arr/tmdb.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
url2 "net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TMDBResponse struct {
|
||||||
|
Page int `json:"page"`
|
||||||
|
Results []struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
MediaType string `json:"media_type"`
|
||||||
|
PosterPath string `json:"poster_path"`
|
||||||
|
} `json:"results"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func SearchTMDB(term string) (*TMDBResponse, error) {
|
||||||
|
resp, err := http.Get("https://api.themoviedb.org/3/search/multi?api_key=key&query=" + url2.QueryEscape(term))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var data *TMDBResponse
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
@@ -4,10 +4,23 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
"log"
|
"log"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type BaseDebrid struct {
|
||||||
|
Name string
|
||||||
|
Host string `json:"host"`
|
||||||
|
APIKey string
|
||||||
|
DownloadUncached bool
|
||||||
|
client *common.RLHTTPClient
|
||||||
|
cache *common.Cache
|
||||||
|
MountPath string
|
||||||
|
logger *log.Logger
|
||||||
|
CheckCached bool
|
||||||
|
}
|
||||||
|
|
||||||
type Service interface {
|
type Service interface {
|
||||||
SubmitMagnet(torrent *Torrent) (*Torrent, error)
|
SubmitMagnet(torrent *Torrent) (*Torrent, error)
|
||||||
CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error)
|
CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error)
|
||||||
@@ -15,26 +28,34 @@ type Service interface {
|
|||||||
DeleteTorrent(torrent *Torrent)
|
DeleteTorrent(torrent *Torrent)
|
||||||
IsAvailable(infohashes []string) map[string]bool
|
IsAvailable(infohashes []string) map[string]bool
|
||||||
GetMountPath() string
|
GetMountPath() string
|
||||||
GetDownloadUncached() bool
|
GetCheckCached() bool
|
||||||
GetTorrent(id string) (*Torrent, error)
|
GetTorrent(id string) (*Torrent, error)
|
||||||
GetName() string
|
GetName() string
|
||||||
GetLogger() *log.Logger
|
GetLogger() *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
type Debrid struct {
|
func NewDebrid(debs []common.DebridConfig, maxCachedSize int) *DebridService {
|
||||||
Host string `json:"host"`
|
debrids := make([]Service, 0)
|
||||||
APIKey string
|
// Divide the cache size by the number of debrids
|
||||||
DownloadUncached bool
|
maxCacheSize := maxCachedSize / len(debs)
|
||||||
client *common.RLHTTPClient
|
|
||||||
cache *common.Cache
|
for _, dc := range debs {
|
||||||
MountPath string
|
d := createDebrid(dc, common.NewCache(maxCacheSize))
|
||||||
logger *log.Logger
|
d.GetLogger().Println("Debrid Service started")
|
||||||
|
debrids = append(debrids, d)
|
||||||
|
}
|
||||||
|
d := &DebridService{debrids: debrids, lastUsed: 0}
|
||||||
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDebrid(dc common.DebridConfig, cache *common.Cache) Service {
|
func createDebrid(dc common.DebridConfig, cache *common.Cache) Service {
|
||||||
switch dc.Name {
|
switch dc.Name {
|
||||||
case "realdebrid":
|
case "realdebrid":
|
||||||
return NewRealDebrid(dc, cache)
|
return NewRealDebrid(dc, cache)
|
||||||
|
case "torbox":
|
||||||
|
return NewTorbox(dc, cache)
|
||||||
|
case "debridlink":
|
||||||
|
return NewDebridLink(dc, cache)
|
||||||
default:
|
default:
|
||||||
return NewRealDebrid(dc, cache)
|
return NewRealDebrid(dc, cache)
|
||||||
}
|
}
|
||||||
@@ -77,16 +98,17 @@ func getTorrentInfo(filePath string) (*Torrent, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
infoLength := info.Length
|
||||||
magnet := &common.Magnet{
|
magnet := &common.Magnet{
|
||||||
InfoHash: infoHash,
|
InfoHash: infoHash,
|
||||||
Name: info.Name,
|
Name: info.Name,
|
||||||
Size: info.Length,
|
Size: infoLength,
|
||||||
Link: mi.Magnet(&hash, &info).String(),
|
Link: mi.Magnet(&hash, &info).String(),
|
||||||
}
|
}
|
||||||
torrent := &Torrent{
|
torrent := &Torrent{
|
||||||
InfoHash: infoHash,
|
InfoHash: infoHash,
|
||||||
Name: info.Name,
|
Name: info.Name,
|
||||||
Size: info.Length,
|
Size: infoLength,
|
||||||
Magnet: magnet,
|
Magnet: magnet,
|
||||||
Filename: filePath,
|
Filename: filePath,
|
||||||
}
|
}
|
||||||
@@ -95,7 +117,7 @@ func getTorrentInfo(filePath string) (*Torrent, error) {
|
|||||||
|
|
||||||
func GetLocalCache(infohashes []string, cache *common.Cache) ([]string, map[string]bool) {
|
func GetLocalCache(infohashes []string, cache *common.Cache) ([]string, map[string]bool) {
|
||||||
result := make(map[string]bool)
|
result := make(map[string]bool)
|
||||||
hashes := make([]string, len(infohashes))
|
hashes := make([]string, 0)
|
||||||
|
|
||||||
if len(infohashes) == 0 {
|
if len(infohashes) == 0 {
|
||||||
return hashes, result
|
return hashes, result
|
||||||
@@ -117,32 +139,50 @@ func GetLocalCache(infohashes []string, cache *common.Cache) ([]string, map[stri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return hashes, result
|
return infohashes, result
|
||||||
}
|
}
|
||||||
|
|
||||||
func ProcessQBitTorrent(d Service, magnet *common.Magnet, arr *Arr, isSymlink bool) (*Torrent, error) {
|
func ProcessTorrent(d *DebridService, magnet *common.Magnet, a *arr.Arr, isSymlink bool) (*Torrent, error) {
|
||||||
debridTorrent := &Torrent{
|
debridTorrent := &Torrent{
|
||||||
InfoHash: magnet.InfoHash,
|
InfoHash: magnet.InfoHash,
|
||||||
Magnet: magnet,
|
Magnet: magnet,
|
||||||
Name: magnet.Name,
|
Name: magnet.Name,
|
||||||
Arr: arr,
|
Arr: a,
|
||||||
Size: magnet.Size,
|
Size: magnet.Size,
|
||||||
}
|
}
|
||||||
logger := d.GetLogger()
|
|
||||||
logger.Printf("Torrent Hash: %s", debridTorrent.InfoHash)
|
|
||||||
if !d.GetDownloadUncached() {
|
|
||||||
hash, exists := d.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
|
|
||||||
if !exists || !hash {
|
|
||||||
return debridTorrent, fmt.Errorf("torrent: %s is not cached", debridTorrent.Name)
|
|
||||||
} else {
|
|
||||||
logger.Printf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
debridTorrent, err := d.SubmitMagnet(debridTorrent)
|
errs := make([]error, 0)
|
||||||
if err != nil || debridTorrent.Id == "" {
|
|
||||||
logger.Printf("Error submitting magnet: %s", err)
|
for index, db := range d.debrids {
|
||||||
return nil, err
|
log.Println("Processing debrid: ", db.GetName())
|
||||||
|
logger := db.GetLogger()
|
||||||
|
logger.Printf("Torrent Hash: %s", debridTorrent.InfoHash)
|
||||||
|
if db.GetCheckCached() {
|
||||||
|
hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
|
||||||
|
if !exists || !hash {
|
||||||
|
logger.Printf("Torrent: %s is not cached", debridTorrent.Name)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
logger.Printf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dbt, err := db.SubmitMagnet(debridTorrent)
|
||||||
|
if dbt != nil {
|
||||||
|
dbt.Debrid = db
|
||||||
|
dbt.Arr = a
|
||||||
|
}
|
||||||
|
if err != nil || dbt == nil || dbt.Id == "" {
|
||||||
|
errs = append(errs, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
logger.Printf("Torrent: %s submitted to %s", dbt.Name, db.GetName())
|
||||||
|
d.lastUsed = index
|
||||||
|
return db.CheckStatus(dbt, isSymlink)
|
||||||
}
|
}
|
||||||
return d.CheckStatus(debridTorrent, isSymlink)
|
err := fmt.Errorf("failed to process torrent")
|
||||||
|
for _, e := range errs {
|
||||||
|
err = fmt.Errorf("%w\n%w", err, e)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
271
pkg/debrid/debrid_link.go
Normal file
271
pkg/debrid/debrid_link.go
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid/structs"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DebridLink struct {
|
||||||
|
BaseDebrid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DebridLink) GetMountPath() string {
|
||||||
|
return r.MountPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DebridLink) GetName() string {
|
||||||
|
return r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DebridLink) GetLogger() *log.Logger {
|
||||||
|
return r.logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DebridLink) IsAvailable(infohashes []string) map[string]bool {
|
||||||
|
// Check if the infohashes are available in the local cache
|
||||||
|
hashes, result := GetLocalCache(infohashes, r.cache)
|
||||||
|
|
||||||
|
if len(hashes) == 0 {
|
||||||
|
// Either all the infohashes are locally cached or none are
|
||||||
|
r.cache.AddMultiple(result)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Divide hashes into groups of 100
|
||||||
|
for i := 0; i < len(hashes); i += 100 {
|
||||||
|
end := i + 100
|
||||||
|
if end > len(hashes) {
|
||||||
|
end = len(hashes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter out empty strings
|
||||||
|
validHashes := make([]string, 0, end-i)
|
||||||
|
for _, hash := range hashes[i:end] {
|
||||||
|
if hash != "" {
|
||||||
|
validHashes = append(validHashes, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no valid hashes in this batch, continue to the next batch
|
||||||
|
if len(validHashes) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hashStr := strings.Join(validHashes, ",")
|
||||||
|
url := fmt.Sprintf("%s/seedbox/cached/%s", r.Host, hashStr)
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("Error checking availability:", err)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
var data structs.DebridLinkAvailableResponse
|
||||||
|
err = json.Unmarshal(resp, &data)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("Error marshalling availability:", err)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
if data.Value == nil {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
value := *data.Value
|
||||||
|
for _, h := range hashes[i:end] {
|
||||||
|
_, exists := value[h]
|
||||||
|
if exists {
|
||||||
|
result[h] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.cache.AddMultiple(result) // Add the results to the cache
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DebridLink) GetTorrent(id string) (*Torrent, error) {
|
||||||
|
torrent := &Torrent{}
|
||||||
|
url := fmt.Sprintf("%s/seedbox/list?ids=%s", r.Host, id)
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
var res structs.DebridLinkTorrentInfo
|
||||||
|
err = json.Unmarshal(resp, &res)
|
||||||
|
if err != nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
if res.Success == false {
|
||||||
|
return torrent, fmt.Errorf("error getting torrent")
|
||||||
|
}
|
||||||
|
if res.Value == nil {
|
||||||
|
return torrent, fmt.Errorf("torrent not found")
|
||||||
|
}
|
||||||
|
dt := *res.Value
|
||||||
|
|
||||||
|
if len(dt) == 0 {
|
||||||
|
return torrent, fmt.Errorf("torrent not found")
|
||||||
|
}
|
||||||
|
data := dt[0]
|
||||||
|
status := "downloading"
|
||||||
|
if data.Status == 100 {
|
||||||
|
status = "downloaded"
|
||||||
|
}
|
||||||
|
name := common.RemoveInvalidChars(data.Name)
|
||||||
|
torrent.Id = data.ID
|
||||||
|
torrent.Name = name
|
||||||
|
torrent.Bytes = data.TotalSize
|
||||||
|
torrent.Folder = name
|
||||||
|
torrent.Progress = data.DownloadPercent
|
||||||
|
torrent.Status = status
|
||||||
|
torrent.Speed = data.DownloadSpeed
|
||||||
|
torrent.Seeders = data.PeersConnected
|
||||||
|
torrent.Filename = name
|
||||||
|
torrent.OriginalFilename = name
|
||||||
|
files := make([]TorrentFile, len(data.Files))
|
||||||
|
for i, f := range data.Files {
|
||||||
|
files[i] = TorrentFile{
|
||||||
|
Id: f.ID,
|
||||||
|
Name: f.Name,
|
||||||
|
Size: f.Size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
torrent.Files = files
|
||||||
|
torrent.Debrid = r
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DebridLink) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
|
||||||
|
url := fmt.Sprintf("%s/seedbox/add", r.Host)
|
||||||
|
payload := map[string]string{"url": torrent.Magnet.Link}
|
||||||
|
jsonPayload, _ := json.Marshal(payload)
|
||||||
|
req, _ := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(jsonPayload))
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res structs.DebridLinkSubmitTorrentInfo
|
||||||
|
err = json.Unmarshal(resp, &res)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if res.Success == false || res.Value == nil {
|
||||||
|
return nil, fmt.Errorf("error adding torrent")
|
||||||
|
}
|
||||||
|
data := *res.Value
|
||||||
|
status := "downloading"
|
||||||
|
log.Printf("Torrent: %s added with id: %s\n", torrent.Name, data.ID)
|
||||||
|
name := common.RemoveInvalidChars(data.Name)
|
||||||
|
torrent.Id = data.ID
|
||||||
|
torrent.Name = name
|
||||||
|
torrent.Bytes = data.TotalSize
|
||||||
|
torrent.Folder = name
|
||||||
|
torrent.Progress = data.DownloadPercent
|
||||||
|
torrent.Status = status
|
||||||
|
torrent.Speed = data.DownloadSpeed
|
||||||
|
torrent.Seeders = data.PeersConnected
|
||||||
|
torrent.Filename = name
|
||||||
|
torrent.OriginalFilename = name
|
||||||
|
files := make([]TorrentFile, len(data.Files))
|
||||||
|
for i, f := range data.Files {
|
||||||
|
files[i] = TorrentFile{
|
||||||
|
Id: f.ID,
|
||||||
|
Name: f.Name,
|
||||||
|
Size: f.Size,
|
||||||
|
Link: f.DownloadURL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
torrent.Files = files
|
||||||
|
torrent.Debrid = r
|
||||||
|
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DebridLink) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
|
||||||
|
for {
|
||||||
|
t, err := r.GetTorrent(torrent.Id)
|
||||||
|
torrent = t
|
||||||
|
if err != nil || torrent == nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
status := torrent.Status
|
||||||
|
if status == "error" || status == "dead" || status == "magnet_error" {
|
||||||
|
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||||
|
} else if status == "downloaded" {
|
||||||
|
r.logger.Printf("Torrent: %s downloaded\n", torrent.Name)
|
||||||
|
if !isSymlink {
|
||||||
|
err = r.GetDownloadLinks(torrent)
|
||||||
|
if err != nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
} else if status == "downloading" {
|
||||||
|
if !r.DownloadUncached {
|
||||||
|
go torrent.Delete()
|
||||||
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
|
}
|
||||||
|
// Break out of the loop if the torrent is downloading.
|
||||||
|
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DebridLink) DeleteTorrent(torrent *Torrent) {
|
||||||
|
url := fmt.Sprintf("%s/seedbox/%s/remove", r.Host, torrent.Id)
|
||||||
|
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||||
|
_, err := r.client.MakeRequest(req)
|
||||||
|
if err == nil {
|
||||||
|
r.logger.Printf("Torrent: %s deleted\n", torrent.Name)
|
||||||
|
} else {
|
||||||
|
r.logger.Printf("Error deleting torrent: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DebridLink) GetDownloadLinks(torrent *Torrent) error {
|
||||||
|
downloadLinks := make([]TorrentDownloadLinks, 0)
|
||||||
|
for _, f := range torrent.Files {
|
||||||
|
dl := TorrentDownloadLinks{
|
||||||
|
Link: f.Link,
|
||||||
|
Filename: f.Name,
|
||||||
|
}
|
||||||
|
downloadLinks = append(downloadLinks, dl)
|
||||||
|
}
|
||||||
|
torrent.DownloadLinks = downloadLinks
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DebridLink) GetCheckCached() bool {
|
||||||
|
return r.CheckCached
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDebridLink(dc common.DebridConfig, cache *common.Cache) *DebridLink {
|
||||||
|
rl := common.ParseRateLimit(dc.RateLimit)
|
||||||
|
headers := map[string]string{
|
||||||
|
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
client := common.NewRLHTTPClient(rl, headers)
|
||||||
|
logger := common.NewLogger(dc.Name, os.Stdout)
|
||||||
|
return &DebridLink{
|
||||||
|
BaseDebrid: BaseDebrid{
|
||||||
|
Name: "debridlink",
|
||||||
|
Host: dc.Host,
|
||||||
|
APIKey: dc.APIKey,
|
||||||
|
DownloadUncached: dc.DownloadUncached,
|
||||||
|
client: client,
|
||||||
|
cache: cache,
|
||||||
|
MountPath: dc.Folder,
|
||||||
|
logger: logger,
|
||||||
|
CheckCached: dc.CheckCached,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,18 +10,13 @@ import (
|
|||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RealDebrid struct {
|
type RealDebrid struct {
|
||||||
Host string `json:"host"`
|
BaseDebrid
|
||||||
APIKey string
|
|
||||||
DownloadUncached bool
|
|
||||||
client *common.RLHTTPClient
|
|
||||||
cache *common.Cache
|
|
||||||
MountPath string
|
|
||||||
logger *log.Logger
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetMountPath() string {
|
func (r *RealDebrid) GetMountPath() string {
|
||||||
@@ -29,7 +24,7 @@ func (r *RealDebrid) GetMountPath() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetName() string {
|
func (r *RealDebrid) GetName() string {
|
||||||
return "realdebrid"
|
return r.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetLogger() *log.Logger {
|
func (r *RealDebrid) GetLogger() *log.Logger {
|
||||||
@@ -46,13 +41,13 @@ func GetTorrentFiles(data structs.RealDebridTorrentInfo) []TorrentFile {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fileId := f.ID
|
fileId := f.ID
|
||||||
file := &TorrentFile{
|
file := TorrentFile{
|
||||||
Name: name,
|
Name: name,
|
||||||
Path: name,
|
Path: name,
|
||||||
Size: int64(f.Bytes),
|
Size: f.Bytes,
|
||||||
Id: strconv.Itoa(fileId),
|
Id: strconv.Itoa(fileId),
|
||||||
}
|
}
|
||||||
files = append(files, *file)
|
files = append(files, file)
|
||||||
}
|
}
|
||||||
return files
|
return files
|
||||||
}
|
}
|
||||||
@@ -89,7 +84,8 @@ func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool {
|
|||||||
|
|
||||||
hashStr := strings.Join(validHashes, "/")
|
hashStr := strings.Join(validHashes, "/")
|
||||||
url := fmt.Sprintf("%s/torrents/instantAvailability/%s", r.Host, hashStr)
|
url := fmt.Sprintf("%s/torrents/instantAvailability/%s", r.Host, hashStr)
|
||||||
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("Error checking availability:", err)
|
log.Println("Error checking availability:", err)
|
||||||
return result
|
return result
|
||||||
@@ -117,7 +113,8 @@ func (r *RealDebrid) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
|
|||||||
"magnet": {torrent.Magnet.Link},
|
"magnet": {torrent.Magnet.Link},
|
||||||
}
|
}
|
||||||
var data structs.RealDebridAddMagnetSchema
|
var data structs.RealDebridAddMagnetSchema
|
||||||
resp, err := r.client.MakeRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -131,7 +128,8 @@ func (r *RealDebrid) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
|
|||||||
func (r *RealDebrid) GetTorrent(id string) (*Torrent, error) {
|
func (r *RealDebrid) GetTorrent(id string) (*Torrent, error) {
|
||||||
torrent := &Torrent{}
|
torrent := &Torrent{}
|
||||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, id)
|
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, id)
|
||||||
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return torrent, err
|
return torrent, err
|
||||||
}
|
}
|
||||||
@@ -152,6 +150,7 @@ func (r *RealDebrid) GetTorrent(id string) (*Torrent, error) {
|
|||||||
torrent.Filename = data.Filename
|
torrent.Filename = data.Filename
|
||||||
torrent.OriginalFilename = data.OriginalFilename
|
torrent.OriginalFilename = data.OriginalFilename
|
||||||
torrent.Links = data.Links
|
torrent.Links = data.Links
|
||||||
|
torrent.Debrid = r
|
||||||
files := GetTorrentFiles(data)
|
files := GetTorrentFiles(data)
|
||||||
torrent.Files = files
|
torrent.Files = files
|
||||||
return torrent, nil
|
return torrent, nil
|
||||||
@@ -159,8 +158,9 @@ func (r *RealDebrid) GetTorrent(id string) (*Torrent, error) {
|
|||||||
|
|
||||||
func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
|
func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
|
||||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, torrent.Id)
|
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, torrent.Id)
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
for {
|
for {
|
||||||
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
resp, err := r.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("ERROR Checking file: ", err)
|
log.Println("ERROR Checking file: ", err)
|
||||||
return torrent, err
|
return torrent, err
|
||||||
@@ -179,8 +179,10 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
|
|||||||
torrent.Seeders = data.Seeders
|
torrent.Seeders = data.Seeders
|
||||||
torrent.Links = data.Links
|
torrent.Links = data.Links
|
||||||
torrent.Status = status
|
torrent.Status = status
|
||||||
|
torrent.Debrid = r
|
||||||
|
downloadingStatus := []string{"downloading", "magnet_conversion", "queued", "compressing", "uploading"}
|
||||||
if status == "error" || status == "dead" || status == "magnet_error" {
|
if status == "error" || status == "dead" || status == "magnet_error" {
|
||||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s has error: %s", torrent.Name, status)
|
||||||
} else if status == "waiting_files_selection" {
|
} else if status == "waiting_files_selection" {
|
||||||
files := GetTorrentFiles(data)
|
files := GetTorrentFiles(data)
|
||||||
torrent.Files = files
|
torrent.Files = files
|
||||||
@@ -195,7 +197,8 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
|
|||||||
"files": {strings.Join(filesId, ",")},
|
"files": {strings.Join(filesId, ",")},
|
||||||
}
|
}
|
||||||
payload := strings.NewReader(p.Encode())
|
payload := strings.NewReader(p.Encode())
|
||||||
_, err = r.client.MakeRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, torrent.Id), payload)
|
req, _ := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, torrent.Id), payload)
|
||||||
|
_, err = r.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return torrent, err
|
return torrent, err
|
||||||
}
|
}
|
||||||
@@ -209,11 +212,9 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
|
|||||||
return torrent, err
|
return torrent, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
break
|
break
|
||||||
} else if status == "downloading" {
|
} else if slices.Contains(downloadingStatus, status) {
|
||||||
if !r.DownloadUncached {
|
if !r.DownloadUncached {
|
||||||
go r.DeleteTorrent(torrent)
|
|
||||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
}
|
}
|
||||||
// Break out of the loop if the torrent is downloading.
|
// Break out of the loop if the torrent is downloading.
|
||||||
@@ -227,7 +228,8 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
|
|||||||
|
|
||||||
func (r *RealDebrid) DeleteTorrent(torrent *Torrent) {
|
func (r *RealDebrid) DeleteTorrent(torrent *Torrent) {
|
||||||
url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrent.Id)
|
url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrent.Id)
|
||||||
_, err := r.client.MakeRequest(http.MethodDelete, url, nil)
|
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||||
|
_, err := r.client.MakeRequest(req)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
r.logger.Printf("Torrent: %s deleted\n", torrent.Name)
|
r.logger.Printf("Torrent: %s deleted\n", torrent.Name)
|
||||||
} else {
|
} else {
|
||||||
@@ -245,7 +247,8 @@ func (r *RealDebrid) GetDownloadLinks(torrent *Torrent) error {
|
|||||||
payload := gourl.Values{
|
payload := gourl.Values{
|
||||||
"link": {link},
|
"link": {link},
|
||||||
}
|
}
|
||||||
resp, err := r.client.MakeRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -264,8 +267,8 @@ func (r *RealDebrid) GetDownloadLinks(torrent *Torrent) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RealDebrid) GetDownloadUncached() bool {
|
func (r *RealDebrid) GetCheckCached() bool {
|
||||||
return r.DownloadUncached
|
return r.CheckCached
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
|
func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
|
||||||
@@ -276,12 +279,16 @@ func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
|
|||||||
client := common.NewRLHTTPClient(rl, headers)
|
client := common.NewRLHTTPClient(rl, headers)
|
||||||
logger := common.NewLogger(dc.Name, os.Stdout)
|
logger := common.NewLogger(dc.Name, os.Stdout)
|
||||||
return &RealDebrid{
|
return &RealDebrid{
|
||||||
Host: dc.Host,
|
BaseDebrid: BaseDebrid{
|
||||||
APIKey: dc.APIKey,
|
Name: "realdebrid",
|
||||||
DownloadUncached: dc.DownloadUncached,
|
Host: dc.Host,
|
||||||
client: client,
|
APIKey: dc.APIKey,
|
||||||
cache: cache,
|
DownloadUncached: dc.DownloadUncached,
|
||||||
MountPath: dc.Folder,
|
client: client,
|
||||||
logger: logger,
|
cache: cache,
|
||||||
|
MountPath: dc.Folder,
|
||||||
|
logger: logger,
|
||||||
|
CheckCached: dc.CheckCached,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
22
pkg/debrid/service.go
Normal file
22
pkg/debrid/service.go
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
type DebridService struct {
|
||||||
|
debrids []Service
|
||||||
|
lastUsed int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DebridService) Get() Service {
|
||||||
|
if d.lastUsed == 0 {
|
||||||
|
return d.debrids[0]
|
||||||
|
}
|
||||||
|
return d.debrids[d.lastUsed]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DebridService) GetByName(name string) Service {
|
||||||
|
for _, deb := range d.debrids {
|
||||||
|
if deb.GetName() == name {
|
||||||
|
return deb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
45
pkg/debrid/structs/debrid_link.go
Normal file
45
pkg/debrid/structs/debrid_link.go
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
package structs
|
||||||
|
|
||||||
|
type DebridLinkAPIResponse[T any] struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Value *T `json:"value"` // Use pointer to allow nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type DebridLinkAvailableResponse DebridLinkAPIResponse[map[string]map[string]struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
HashString string `json:"hashString"`
|
||||||
|
Files []struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
} `json:"files"`
|
||||||
|
}]
|
||||||
|
|
||||||
|
type debridLinkTorrentInfo struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
HashString string `json:"hashString"`
|
||||||
|
UploadRatio float64 `json:"uploadRatio"`
|
||||||
|
ServerID string `json:"serverId"`
|
||||||
|
Wait bool `json:"wait"`
|
||||||
|
PeersConnected int `json:"peersConnected"`
|
||||||
|
Status int `json:"status"`
|
||||||
|
TotalSize int64 `json:"totalSize"`
|
||||||
|
Files []struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
DownloadURL string `json:"downloadUrl"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
DownloadPercent int `json:"downloadPercent"`
|
||||||
|
} `json:"files"`
|
||||||
|
Trackers []struct {
|
||||||
|
Announce string `json:"announce"`
|
||||||
|
} `json:"trackers"`
|
||||||
|
Created int64 `json:"created"`
|
||||||
|
DownloadPercent float64 `json:"downloadPercent"`
|
||||||
|
DownloadSpeed int `json:"downloadSpeed"`
|
||||||
|
UploadSpeed int `json:"uploadSpeed"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DebridLinkTorrentInfo DebridLinkAPIResponse[[]debridLinkTorrentInfo]
|
||||||
|
|
||||||
|
type DebridLinkSubmitTorrentInfo DebridLinkAPIResponse[debridLinkTorrentInfo]
|
||||||
@@ -75,7 +75,7 @@ type RealDebridTorrentInfo struct {
|
|||||||
OriginalFilename string `json:"original_filename"`
|
OriginalFilename string `json:"original_filename"`
|
||||||
Hash string `json:"hash"`
|
Hash string `json:"hash"`
|
||||||
Bytes int64 `json:"bytes"`
|
Bytes int64 `json:"bytes"`
|
||||||
OriginalBytes int `json:"original_bytes"`
|
OriginalBytes int64 `json:"original_bytes"`
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
Split int `json:"split"`
|
Split int `json:"split"`
|
||||||
Progress float64 `json:"progress"`
|
Progress float64 `json:"progress"`
|
||||||
@@ -84,12 +84,12 @@ type RealDebridTorrentInfo struct {
|
|||||||
Files []struct {
|
Files []struct {
|
||||||
ID int `json:"id"`
|
ID int `json:"id"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Bytes int `json:"bytes"`
|
Bytes int64 `json:"bytes"`
|
||||||
Selected int `json:"selected"`
|
Selected int `json:"selected"`
|
||||||
} `json:"files"`
|
} `json:"files"`
|
||||||
Links []string `json:"links"`
|
Links []string `json:"links"`
|
||||||
Ended string `json:"ended,omitempty"`
|
Ended string `json:"ended,omitempty"`
|
||||||
Speed int64 `json:"speed,omitempty"`
|
Speed int `json:"speed,omitempty"`
|
||||||
Seeders int `json:"seeders,omitempty"`
|
Seeders int `json:"seeders,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,11 +97,11 @@ type RealDebridUnrestrictResponse struct {
|
|||||||
Id string `json:"id"`
|
Id string `json:"id"`
|
||||||
Filename string `json:"filename"`
|
Filename string `json:"filename"`
|
||||||
MimeType string `json:"mimeType"`
|
MimeType string `json:"mimeType"`
|
||||||
Filesize int64 `json:"filesize"`
|
Filesize int `json:"filesize"`
|
||||||
Link string `json:"link"`
|
Link string `json:"link"`
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
Chunks int64 `json:"chunks"`
|
Chunks int `json:"chunks"`
|
||||||
Crc int64 `json:"crc"`
|
Crc int `json:"crc"`
|
||||||
Download string `json:"download"`
|
Download string `json:"download"`
|
||||||
Streamable int `json:"streamable"`
|
Streamable int `json:"streamable"`
|
||||||
}
|
}
|
||||||
|
|||||||
75
pkg/debrid/structs/torbox.go
Normal file
75
pkg/debrid/structs/torbox.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
package structs
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type TorboxAPIResponse[T any] struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Error any `json:"error"`
|
||||||
|
Detail string `json:"detail"`
|
||||||
|
Data *T `json:"data"` // Use pointer to allow nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type TorBoxAvailableResponse TorboxAPIResponse[map[string]struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
}]
|
||||||
|
|
||||||
|
type TorBoxAddMagnetResponse TorboxAPIResponse[struct {
|
||||||
|
Id int `json:"torrent_id"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
}]
|
||||||
|
|
||||||
|
type torboxInfo struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
AuthId string `json:"auth_id"`
|
||||||
|
Server int `json:"server"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Magnet interface{} `json:"magnet"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Active bool `json:"active"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
DownloadState string `json:"download_state"`
|
||||||
|
Seeds int `json:"seeds"`
|
||||||
|
Peers int `json:"peers"`
|
||||||
|
Ratio float64 `json:"ratio"`
|
||||||
|
Progress float64 `json:"progress"`
|
||||||
|
DownloadSpeed int `json:"download_speed"`
|
||||||
|
UploadSpeed int `json:"upload_speed"`
|
||||||
|
Eta int `json:"eta"`
|
||||||
|
TorrentFile bool `json:"torrent_file"`
|
||||||
|
ExpiresAt interface{} `json:"expires_at"`
|
||||||
|
DownloadPresent bool `json:"download_present"`
|
||||||
|
Files []struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Md5 interface{} `json:"md5"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Zipped bool `json:"zipped"`
|
||||||
|
S3Path string `json:"s3_path"`
|
||||||
|
Infected bool `json:"infected"`
|
||||||
|
Mimetype string `json:"mimetype"`
|
||||||
|
ShortName string `json:"short_name"`
|
||||||
|
AbsolutePath string `json:"absolute_path"`
|
||||||
|
} `json:"files"`
|
||||||
|
DownloadPath string `json:"download_path"`
|
||||||
|
InactiveCheck int `json:"inactive_check"`
|
||||||
|
Availability int `json:"availability"`
|
||||||
|
DownloadFinished bool `json:"download_finished"`
|
||||||
|
Tracker interface{} `json:"tracker"`
|
||||||
|
TotalUploaded int `json:"total_uploaded"`
|
||||||
|
TotalDownloaded int `json:"total_downloaded"`
|
||||||
|
Cached bool `json:"cached"`
|
||||||
|
Owner string `json:"owner"`
|
||||||
|
SeedTorrent bool `json:"seed_torrent"`
|
||||||
|
AllowZipped bool `json:"allow_zipped"`
|
||||||
|
LongTermSeeding bool `json:"long_term_seeding"`
|
||||||
|
TrackerMessage interface{} `json:"tracker_message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TorboxInfoResponse TorboxAPIResponse[torboxInfo]
|
||||||
|
|
||||||
|
type TorBoxDownloadLinksResponse TorboxAPIResponse[string]
|
||||||
303
pkg/debrid/torbox.go
Normal file
303
pkg/debrid/torbox.go
Normal file
@@ -0,0 +1,303 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid/structs"
|
||||||
|
"log"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
gourl "net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Torbox struct {
|
||||||
|
BaseDebrid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Torbox) GetMountPath() string {
|
||||||
|
return r.MountPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Torbox) GetName() string {
|
||||||
|
return r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Torbox) GetLogger() *log.Logger {
|
||||||
|
return r.logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Torbox) IsAvailable(infohashes []string) map[string]bool {
|
||||||
|
// Check if the infohashes are available in the local cache
|
||||||
|
hashes, result := GetLocalCache(infohashes, r.cache)
|
||||||
|
|
||||||
|
if len(hashes) == 0 {
|
||||||
|
// Either all the infohashes are locally cached or none are
|
||||||
|
r.cache.AddMultiple(result)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Divide hashes into groups of 100
|
||||||
|
for i := 0; i < len(hashes); i += 100 {
|
||||||
|
end := i + 100
|
||||||
|
if end > len(hashes) {
|
||||||
|
end = len(hashes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter out empty strings
|
||||||
|
validHashes := make([]string, 0, end-i)
|
||||||
|
for _, hash := range hashes[i:end] {
|
||||||
|
if hash != "" {
|
||||||
|
validHashes = append(validHashes, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no valid hashes in this batch, continue to the next batch
|
||||||
|
if len(validHashes) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hashStr := strings.Join(validHashes, ",")
|
||||||
|
url := fmt.Sprintf("%s/api/torrents/checkcached?hash=%s", r.Host, hashStr)
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("Error checking availability:", err)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
var res structs.TorBoxAvailableResponse
|
||||||
|
err = json.Unmarshal(resp, &res)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("Error marshalling availability:", err)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
if res.Data == nil {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
for h, cache := range *res.Data {
|
||||||
|
if cache.Size > 0 {
|
||||||
|
result[strings.ToUpper(h)] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.cache.AddMultiple(result) // Add the results to the cache
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Torbox) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
|
||||||
|
url := fmt.Sprintf("%s/api/torrents/createtorrent", r.Host)
|
||||||
|
payload := &bytes.Buffer{}
|
||||||
|
writer := multipart.NewWriter(payload)
|
||||||
|
_ = writer.WriteField("magnet", torrent.Magnet.Link)
|
||||||
|
err := writer.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req, _ := http.NewRequest(http.MethodPost, url, payload)
|
||||||
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var data structs.TorBoxAddMagnetResponse
|
||||||
|
err = json.Unmarshal(resp, &data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if data.Data == nil {
|
||||||
|
return nil, fmt.Errorf("error adding torrent")
|
||||||
|
}
|
||||||
|
dt := *data.Data
|
||||||
|
torrentId := strconv.Itoa(dt.Id)
|
||||||
|
log.Printf("Torrent: %s added with id: %s\n", torrent.Name, torrentId)
|
||||||
|
torrent.Id = torrentId
|
||||||
|
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStatus(status string, finished bool) string {
|
||||||
|
if finished {
|
||||||
|
return "downloaded"
|
||||||
|
}
|
||||||
|
downloading := []string{"completed", "cached", "paused", "downloading", "uploading",
|
||||||
|
"checkingResumeData", "metaDL", "pausedUP", "queuedUP", "checkingUP",
|
||||||
|
"forcedUP", "allocating", "downloading", "metaDL", "pausedDL",
|
||||||
|
"queuedDL", "checkingDL", "forcedDL", "checkingResumeData", "moving"}
|
||||||
|
switch {
|
||||||
|
case slices.Contains(downloading, status):
|
||||||
|
return "downloading"
|
||||||
|
default:
|
||||||
|
return "error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Torbox) GetTorrent(id string) (*Torrent, error) {
|
||||||
|
torrent := &Torrent{}
|
||||||
|
url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", r.Host, id)
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
var res structs.TorboxInfoResponse
|
||||||
|
err = json.Unmarshal(resp, &res)
|
||||||
|
if err != nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
data := res.Data
|
||||||
|
name := data.Name
|
||||||
|
torrent.Id = id
|
||||||
|
torrent.Name = name
|
||||||
|
torrent.Bytes = data.Size
|
||||||
|
torrent.Folder = name
|
||||||
|
torrent.Progress = data.Progress * 100
|
||||||
|
torrent.Status = getStatus(data.DownloadState, data.DownloadFinished)
|
||||||
|
torrent.Speed = data.DownloadSpeed
|
||||||
|
torrent.Seeders = data.Seeds
|
||||||
|
torrent.Filename = name
|
||||||
|
torrent.OriginalFilename = name
|
||||||
|
files := make([]TorrentFile, 0)
|
||||||
|
for _, f := range data.Files {
|
||||||
|
fileName := filepath.Base(f.Name)
|
||||||
|
if (!common.RegexMatch(common.VIDEOMATCH, fileName) &&
|
||||||
|
!common.RegexMatch(common.SUBMATCH, fileName) &&
|
||||||
|
!common.RegexMatch(common.MUSICMATCH, fileName)) || common.RegexMatch(common.SAMPLEMATCH, fileName) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
file := TorrentFile{
|
||||||
|
Id: strconv.Itoa(f.Id),
|
||||||
|
Name: fileName,
|
||||||
|
Size: f.Size,
|
||||||
|
Path: fileName,
|
||||||
|
}
|
||||||
|
files = append(files, file)
|
||||||
|
}
|
||||||
|
var cleanPath string
|
||||||
|
if len(files) > 0 {
|
||||||
|
cleanPath = path.Clean(data.Files[0].Name)
|
||||||
|
} else {
|
||||||
|
cleanPath = path.Clean(data.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
torrent.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
||||||
|
torrent.Files = files
|
||||||
|
torrent.Debrid = r
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Torbox) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
|
||||||
|
for {
|
||||||
|
tb, err := r.GetTorrent(torrent.Id)
|
||||||
|
|
||||||
|
torrent = tb
|
||||||
|
|
||||||
|
if err != nil || tb == nil {
|
||||||
|
return tb, err
|
||||||
|
}
|
||||||
|
status := torrent.Status
|
||||||
|
if status == "error" || status == "dead" || status == "magnet_error" {
|
||||||
|
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||||
|
} else if status == "downloaded" {
|
||||||
|
r.logger.Printf("Torrent: %s downloaded\n", torrent.Name)
|
||||||
|
if !isSymlink {
|
||||||
|
err = r.GetDownloadLinks(torrent)
|
||||||
|
if err != nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
} else if status == "downloading" {
|
||||||
|
if !r.DownloadUncached {
|
||||||
|
go torrent.Delete()
|
||||||
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
|
}
|
||||||
|
// Break out of the loop if the torrent is downloading.
|
||||||
|
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Torbox) DeleteTorrent(torrent *Torrent) {
|
||||||
|
url := fmt.Sprintf("%s/api/torrents/controltorrent/%s", r.Host, torrent.Id)
|
||||||
|
payload := map[string]string{"torrent_id": torrent.Id, "action": "Delete"}
|
||||||
|
jsonPayload, _ := json.Marshal(payload)
|
||||||
|
req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(jsonPayload))
|
||||||
|
_, err := r.client.MakeRequest(req)
|
||||||
|
if err == nil {
|
||||||
|
r.logger.Printf("Torrent: %s deleted\n", torrent.Name)
|
||||||
|
} else {
|
||||||
|
r.logger.Printf("Error deleting torrent: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Torbox) GetDownloadLinks(torrent *Torrent) error {
|
||||||
|
downloadLinks := make([]TorrentDownloadLinks, 0)
|
||||||
|
for _, file := range torrent.Files {
|
||||||
|
url := fmt.Sprintf("%s/api/torrents/requestdl/", r.Host)
|
||||||
|
query := gourl.Values{}
|
||||||
|
query.Add("torrent_id", torrent.Id)
|
||||||
|
query.Add("token", r.APIKey)
|
||||||
|
query.Add("file_id", file.Id)
|
||||||
|
url += "?" + query.Encode()
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
resp, err := r.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var data structs.TorBoxDownloadLinksResponse
|
||||||
|
if err = json.Unmarshal(resp, &data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if data.Data == nil {
|
||||||
|
return fmt.Errorf("error getting download links")
|
||||||
|
}
|
||||||
|
idx := 0
|
||||||
|
link := *data.Data
|
||||||
|
|
||||||
|
dl := TorrentDownloadLinks{
|
||||||
|
Link: link,
|
||||||
|
Filename: torrent.Files[idx].Name,
|
||||||
|
DownloadLink: link,
|
||||||
|
}
|
||||||
|
downloadLinks = append(downloadLinks, dl)
|
||||||
|
}
|
||||||
|
torrent.DownloadLinks = downloadLinks
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Torbox) GetCheckCached() bool {
|
||||||
|
return r.CheckCached
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTorbox(dc common.DebridConfig, cache *common.Cache) *Torbox {
|
||||||
|
rl := common.ParseRateLimit(dc.RateLimit)
|
||||||
|
headers := map[string]string{
|
||||||
|
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||||
|
}
|
||||||
|
client := common.NewRLHTTPClient(rl, headers)
|
||||||
|
logger := common.NewLogger(dc.Name, os.Stdout)
|
||||||
|
return &Torbox{
|
||||||
|
BaseDebrid: BaseDebrid{
|
||||||
|
Name: "torbox",
|
||||||
|
Host: dc.Host,
|
||||||
|
APIKey: dc.APIKey,
|
||||||
|
DownloadUncached: dc.DownloadUncached,
|
||||||
|
client: client,
|
||||||
|
cache: cache,
|
||||||
|
MountPath: dc.Folder,
|
||||||
|
logger: logger,
|
||||||
|
CheckCached: dc.CheckCached,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,13 +2,14 @@ package debrid
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Arr struct {
|
type Arr struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Token string `json:"token"`
|
Token string `json:"-"`
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,14 +37,15 @@ type Torrent struct {
|
|||||||
Magnet *common.Magnet `json:"magnet"`
|
Magnet *common.Magnet `json:"magnet"`
|
||||||
Files []TorrentFile `json:"files"`
|
Files []TorrentFile `json:"files"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
|
Added string `json:"added"`
|
||||||
Progress float64 `json:"progress"`
|
Progress float64 `json:"progress"`
|
||||||
Speed int64 `json:"speed"`
|
Speed int `json:"speed"`
|
||||||
Seeders int `json:"seeders"`
|
Seeders int `json:"seeders"`
|
||||||
Links []string `json:"links"`
|
Links []string `json:"links"`
|
||||||
DownloadLinks []TorrentDownloadLinks `json:"download_links"`
|
DownloadLinks []TorrentDownloadLinks `json:"download_links"`
|
||||||
|
|
||||||
Debrid *Debrid
|
Debrid Service
|
||||||
Arr *Arr
|
Arr *arr.Arr
|
||||||
}
|
}
|
||||||
|
|
||||||
type TorrentDownloadLinks struct {
|
type TorrentDownloadLinks struct {
|
||||||
@@ -57,16 +59,26 @@ func (t *Torrent) GetSymlinkFolder(parent string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) GetMountFolder(rClonePath string) string {
|
func (t *Torrent) GetMountFolder(rClonePath string) string {
|
||||||
pathWithNoExt := common.RemoveExtension(t.OriginalFilename)
|
possiblePaths := []string{
|
||||||
if common.FileReady(filepath.Join(rClonePath, t.OriginalFilename)) {
|
t.OriginalFilename,
|
||||||
return t.OriginalFilename
|
t.Filename,
|
||||||
} else if common.FileReady(filepath.Join(rClonePath, t.Filename)) {
|
common.RemoveExtension(t.OriginalFilename),
|
||||||
return t.Filename
|
|
||||||
} else if common.FileReady(filepath.Join(rClonePath, pathWithNoExt)) {
|
|
||||||
return pathWithNoExt
|
|
||||||
} else {
|
|
||||||
return ""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, path := range possiblePaths {
|
||||||
|
if path != "" && common.FileReady(filepath.Join(rClonePath, path)) {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) Delete() {
|
||||||
|
if t.Debrid == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Debrid.DeleteTorrent(t)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type TorrentFile struct {
|
type TorrentFile struct {
|
||||||
@@ -74,6 +86,7 @@ type TorrentFile struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
|
Link string `json:"link"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func getEventId(eventType string) int {
|
func getEventId(eventType string) int {
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ Loop:
|
|||||||
fmt.Printf(" %s: transferred %d / %d bytes (%.2f%%)\n",
|
fmt.Printf(" %s: transferred %d / %d bytes (%.2f%%)\n",
|
||||||
resp.Filename,
|
resp.Filename,
|
||||||
resp.BytesComplete(),
|
resp.BytesComplete(),
|
||||||
resp.Size,
|
resp.Size(),
|
||||||
100*resp.Progress())
|
100*resp.Progress())
|
||||||
|
|
||||||
case <-resp.Done:
|
case <-resp.Done:
|
||||||
@@ -3,7 +3,9 @@ package proxy
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"cmp"
|
"cmp"
|
||||||
|
"context"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/elazarl/goproxy"
|
"github.com/elazarl/goproxy"
|
||||||
"github.com/elazarl/goproxy/ext/auth"
|
"github.com/elazarl/goproxy/ext/auth"
|
||||||
@@ -73,11 +75,10 @@ type Proxy struct {
|
|||||||
password string
|
password string
|
||||||
cachedOnly bool
|
cachedOnly bool
|
||||||
debrid debrid.Service
|
debrid debrid.Service
|
||||||
cache *common.Cache
|
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewProxy(config common.Config, deb debrid.Service, cache *common.Cache) *Proxy {
|
func NewProxy(config common.Config, deb *debrid.DebridService) *Proxy {
|
||||||
cfg := config.Proxy
|
cfg := config.Proxy
|
||||||
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
|
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
|
||||||
return &Proxy{
|
return &Proxy{
|
||||||
@@ -87,8 +88,7 @@ func NewProxy(config common.Config, deb debrid.Service, cache *common.Cache) *Pr
|
|||||||
username: cfg.Username,
|
username: cfg.Username,
|
||||||
password: cfg.Password,
|
password: cfg.Password,
|
||||||
cachedOnly: *cfg.CachedOnly,
|
cachedOnly: *cfg.CachedOnly,
|
||||||
debrid: deb,
|
debrid: deb.Get(),
|
||||||
cache: cache,
|
|
||||||
logger: common.NewLogger("Proxy", os.Stdout),
|
logger: common.NewLogger("Proxy", os.Stdout),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -308,7 +308,7 @@ func UrlMatches(re *regexp.Regexp) goproxy.ReqConditionFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Proxy) Start() {
|
func (p *Proxy) Start(ctx context.Context) error {
|
||||||
username, password := p.username, p.password
|
username, password := p.username, p.password
|
||||||
proxy := goproxy.NewProxyHttpServer()
|
proxy := goproxy.NewProxyHttpServer()
|
||||||
if username != "" || password != "" {
|
if username != "" || password != "" {
|
||||||
@@ -328,6 +328,17 @@ func (p *Proxy) Start() {
|
|||||||
|
|
||||||
proxy.Verbose = p.debug
|
proxy.Verbose = p.debug
|
||||||
portFmt := fmt.Sprintf(":%s", p.port)
|
portFmt := fmt.Sprintf(":%s", p.port)
|
||||||
|
srv := &http.Server{
|
||||||
|
Addr: portFmt,
|
||||||
|
Handler: proxy,
|
||||||
|
}
|
||||||
p.logger.Printf("[*] Starting proxy server on %s\n", portFmt)
|
p.logger.Printf("[*] Starting proxy server on %s\n", portFmt)
|
||||||
p.logger.Fatal(http.ListenAndServe(fmt.Sprintf("%s", portFmt), proxy))
|
go func() {
|
||||||
|
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
p.logger.Printf("Error starting proxy server: %v\n", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
<-ctx.Done()
|
||||||
|
p.logger.Println("Shutting down gracefully...")
|
||||||
|
return srv.Shutdown(context.Background())
|
||||||
}
|
}
|
||||||
|
|||||||
103
pkg/qbit/arr.go
103
pkg/qbit/arr.go
@@ -1,103 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"cmp"
|
|
||||||
"encoding/json"
|
|
||||||
"goBlack/common"
|
|
||||||
"goBlack/pkg/debrid"
|
|
||||||
"net/http"
|
|
||||||
gourl "net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) RefreshArr(arr *debrid.Arr) {
|
|
||||||
if arr.Token == "" || arr.Host == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
url, err := common.JoinURL(arr.Host, "api/v3/command")
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
payload := map[string]string{"name": "RefreshMonitoredDownloads"}
|
|
||||||
jsonPayload, err := json.Marshal(payload)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
client := &http.Client{}
|
|
||||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
req.Header.Set("X-Api-Key", arr.Token)
|
|
||||||
|
|
||||||
resp, reqErr := client.Do(req)
|
|
||||||
if reqErr == nil {
|
|
||||||
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
|
|
||||||
if statusOk {
|
|
||||||
if q.debug {
|
|
||||||
q.logger.Printf("Refreshed monitored downloads for %s", cmp.Or(arr.Name, arr.Host))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if reqErr != nil {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) GetArrHistory(arr *debrid.Arr, downloadId, eventType string) *debrid.ArrHistorySchema {
|
|
||||||
query := gourl.Values{}
|
|
||||||
if downloadId != "" {
|
|
||||||
query.Add("downloadId", downloadId)
|
|
||||||
}
|
|
||||||
query.Add("eventType", eventType)
|
|
||||||
query.Add("pageSize", "100")
|
|
||||||
url, _ := common.JoinURL(arr.Host, "history")
|
|
||||||
url += "?" + query.Encode()
|
|
||||||
resp, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var data *debrid.ArrHistorySchema
|
|
||||||
|
|
||||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return data
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) MarkArrAsFailed(torrent *Torrent, arr *debrid.Arr) error {
|
|
||||||
downloadId := strings.ToUpper(torrent.Hash)
|
|
||||||
history := q.GetArrHistory(arr, downloadId, "grabbed")
|
|
||||||
if history == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
torrentId := 0
|
|
||||||
for _, record := range history.Records {
|
|
||||||
if strings.EqualFold(record.DownloadID, downloadId) {
|
|
||||||
torrentId = record.ID
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if torrentId != 0 {
|
|
||||||
url, err := common.JoinURL(arr.Host, "history/failed/", strconv.Itoa(torrentId))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest(http.MethodPost, url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
client := &http.Client{}
|
|
||||||
_, err = client.Do(req)
|
|
||||||
if err == nil {
|
|
||||||
q.logger.Printf("Marked torrent: %s as failed", torrent.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/go-chi/chi/v5"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) AddRoutes(r chi.Router) http.Handler {
|
|
||||||
r.Route("/api/v2", func(r chi.Router) {
|
|
||||||
r.Post("/auth/login", q.handleLogin)
|
|
||||||
|
|
||||||
r.Group(func(r chi.Router) {
|
|
||||||
//r.Use(q.authMiddleware)
|
|
||||||
r.Use(q.authContext)
|
|
||||||
r.Route("/torrents", func(r chi.Router) {
|
|
||||||
r.Use(HashesCtx)
|
|
||||||
r.Get("/info", q.handleTorrentsInfo)
|
|
||||||
r.Post("/add", q.handleTorrentsAdd)
|
|
||||||
r.Post("/delete", q.handleTorrentsDelete)
|
|
||||||
r.Get("/categories", q.handleCategories)
|
|
||||||
r.Post("/createCategory", q.handleCreateCategory)
|
|
||||||
|
|
||||||
r.Get("/pause", q.handleTorrentsPause)
|
|
||||||
r.Get("/resume", q.handleTorrentsResume)
|
|
||||||
r.Get("/recheck", q.handleTorrentRecheck)
|
|
||||||
r.Get("/properties", q.handleTorrentProperties)
|
|
||||||
})
|
|
||||||
|
|
||||||
r.Route("/app", func(r chi.Router) {
|
|
||||||
r.Get("/version", q.handleVersion)
|
|
||||||
r.Get("/webapiVersion", q.handleWebAPIVersion)
|
|
||||||
r.Get("/preferences", q.handlePreferences)
|
|
||||||
r.Get("/buildInfo", q.handleBuildInfo)
|
|
||||||
r.Get("/shutdown", q.shutdown)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
})
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) handleVersion(w http.ResponseWriter, r *http.Request) {
|
|
||||||
_, _ = w.Write([]byte("v4.3.2"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
|
|
||||||
_, _ = w.Write([]byte("2.7"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) {
|
|
||||||
preferences := NewAppPreferences()
|
|
||||||
|
|
||||||
preferences.WebUiUsername = q.Username
|
|
||||||
preferences.SavePath = q.DownloadFolder
|
|
||||||
preferences.TempPath = filepath.Join(q.DownloadFolder, "temp")
|
|
||||||
|
|
||||||
JSONResponse(w, preferences, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
|
|
||||||
res := BuildInfo{
|
|
||||||
Bitness: 64,
|
|
||||||
Boost: "1.75.0",
|
|
||||||
Libtorrent: "1.2.11.0",
|
|
||||||
Openssl: "1.1.1i",
|
|
||||||
Qt: "5.15.2",
|
|
||||||
Zlib: "1.2.11",
|
|
||||||
}
|
|
||||||
JSONResponse(w, res, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) shutdown(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
w.Write([]byte("Ok."))
|
|
||||||
}
|
|
||||||
@@ -1,165 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
|
|
||||||
//log all url params
|
|
||||||
ctx := r.Context()
|
|
||||||
category := strings.Trim(r.URL.Query().Get("category"), "")
|
|
||||||
filter := strings.Trim(r.URL.Query().Get("filter"), "")
|
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
|
||||||
torrents := q.storage.GetAll(category, filter, hashes)
|
|
||||||
JSONResponse(w, torrents, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx := r.Context()
|
|
||||||
contentType := strings.Split(r.Header.Get("Content-Type"), ";")[0]
|
|
||||||
switch contentType {
|
|
||||||
case "multipart/form-data":
|
|
||||||
err := r.ParseMultipartForm(32 << 20) // 32MB max memory
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Printf("Error parsing form: %v\n", err)
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "application/x-www-form-urlencoded":
|
|
||||||
err := r.ParseForm()
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Printf("Error parsing form: %v\n", err)
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
|
|
||||||
q.logger.Printf("isSymlink: %v\n", isSymlink)
|
|
||||||
urls := r.FormValue("urls")
|
|
||||||
category := r.FormValue("category")
|
|
||||||
|
|
||||||
var urlList []string
|
|
||||||
if urls != "" {
|
|
||||||
urlList = strings.Split(urls, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
|
|
||||||
|
|
||||||
for _, url := range urlList {
|
|
||||||
if err := q.AddMagnet(ctx, url, category); err != nil {
|
|
||||||
q.logger.Printf("Error adding magnet: %v\n", err)
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if contentType == "multipart/form-data" {
|
|
||||||
files := r.MultipartForm.File["torrents"]
|
|
||||||
for _, fileHeader := range files {
|
|
||||||
if err := q.AddTorrent(ctx, fileHeader, category); err != nil {
|
|
||||||
q.logger.Printf("Error adding torrent: %v\n", err)
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx := r.Context()
|
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
|
||||||
if len(hashes) == 0 {
|
|
||||||
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, hash := range hashes {
|
|
||||||
q.storage.Delete(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx := r.Context()
|
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
|
||||||
for _, hash := range hashes {
|
|
||||||
torrent := q.storage.Get(hash)
|
|
||||||
if torrent == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
go q.PauseTorrent(torrent)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx := r.Context()
|
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
|
||||||
for _, hash := range hashes {
|
|
||||||
torrent := q.storage.Get(hash)
|
|
||||||
if torrent == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
go q.ResumeTorrent(torrent)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx := r.Context()
|
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
|
||||||
for _, hash := range hashes {
|
|
||||||
torrent := q.storage.Get(hash)
|
|
||||||
if torrent == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
go q.RefreshTorrent(torrent)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleCategories(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var categories = map[string]TorrentCategory{}
|
|
||||||
for _, cat := range q.Categories {
|
|
||||||
path := filepath.Join(q.DownloadFolder, cat)
|
|
||||||
categories[cat] = TorrentCategory{
|
|
||||||
Name: cat,
|
|
||||||
SavePath: path,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
JSONResponse(w, categories, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
|
|
||||||
err := r.ParseForm()
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
name := r.Form.Get("category")
|
|
||||||
if name == "" {
|
|
||||||
http.Error(w, "No name provided", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
q.Categories = append(q.Categories, name)
|
|
||||||
|
|
||||||
JSONResponse(w, nil, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
|
|
||||||
hash := r.URL.Query().Get("hash")
|
|
||||||
torrent := q.storage.Get(hash)
|
|
||||||
properties := q.GetTorrentProperties(torrent)
|
|
||||||
JSONResponse(w, properties, http.StatusOK)
|
|
||||||
}
|
|
||||||
@@ -1,80 +1,17 @@
|
|||||||
package qbit
|
package qbit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cmp"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/go-chi/chi/v5"
|
|
||||||
"github.com/go-chi/chi/v5/middleware"
|
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
"goBlack/pkg/debrid"
|
"goBlack/pkg/debrid"
|
||||||
"log"
|
"goBlack/pkg/qbit/server"
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type WorkerType struct {
|
func Start(ctx context.Context, config *common.Config, deb *debrid.DebridService) error {
|
||||||
ticker *time.Ticker
|
srv := server.NewServer(config, deb)
|
||||||
ctx context.Context
|
if err := srv.Start(ctx); err != nil {
|
||||||
}
|
return fmt.Errorf("failed to start qbit server: %w", err)
|
||||||
|
|
||||||
type Worker struct {
|
|
||||||
types map[string]WorkerType
|
|
||||||
}
|
|
||||||
|
|
||||||
type QBit struct {
|
|
||||||
Username string `json:"username"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
Port string `json:"port"`
|
|
||||||
DownloadFolder string `json:"download_folder"`
|
|
||||||
Categories []string `json:"categories"`
|
|
||||||
debrid debrid.Service
|
|
||||||
cache *common.Cache
|
|
||||||
storage *TorrentStorage
|
|
||||||
debug bool
|
|
||||||
logger *log.Logger
|
|
||||||
arrs sync.Map // host:token (Used for refreshing in worker)
|
|
||||||
RefreshInterval int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewQBit(config *common.Config, deb debrid.Service, cache *common.Cache) *QBit {
|
|
||||||
cfg := config.QBitTorrent
|
|
||||||
storage := NewTorrentStorage("torrents.json")
|
|
||||||
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8182")
|
|
||||||
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
|
|
||||||
return &QBit{
|
|
||||||
Username: cfg.Username,
|
|
||||||
Password: cfg.Password,
|
|
||||||
Port: port,
|
|
||||||
DownloadFolder: cfg.DownloadFolder,
|
|
||||||
Categories: cfg.Categories,
|
|
||||||
debrid: deb,
|
|
||||||
cache: cache,
|
|
||||||
debug: cfg.Debug,
|
|
||||||
storage: storage,
|
|
||||||
logger: common.NewLogger("QBit", os.Stdout),
|
|
||||||
arrs: sync.Map{},
|
|
||||||
RefreshInterval: refreshInterval,
|
|
||||||
}
|
}
|
||||||
}
|
return nil
|
||||||
|
|
||||||
func (q *QBit) Start() {
|
|
||||||
|
|
||||||
r := chi.NewRouter()
|
|
||||||
if q.debug {
|
|
||||||
r.Use(middleware.Logger)
|
|
||||||
}
|
|
||||||
r.Use(middleware.Recoverer)
|
|
||||||
|
|
||||||
q.AddRoutes(r)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
go q.StartWorker(ctx)
|
|
||||||
|
|
||||||
q.logger.Printf("Starting QBit server on :%s", q.Port)
|
|
||||||
port := fmt.Sprintf(":%s", q.Port)
|
|
||||||
q.logger.Fatal(http.ListenAndServe(port, r))
|
|
||||||
}
|
}
|
||||||
|
|||||||
116
pkg/qbit/qbit.go
116
pkg/qbit/qbit.go
@@ -1,116 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"goBlack/common"
|
|
||||||
"goBlack/pkg/debrid"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
|
|
||||||
magnet, err := common.GetMagnetFromUrl(url)
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Printf("Error parsing magnet link: %v\n", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = q.Process(ctx, magnet, category)
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Println("Failed to process magnet:", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
|
|
||||||
file, _ := fileHeader.Open()
|
|
||||||
defer file.Close()
|
|
||||||
var reader io.Reader = file
|
|
||||||
magnet, err := common.GetMagnetFromFile(reader, fileHeader.Filename)
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Printf("Error reading file: %s", fileHeader.Filename)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = q.Process(ctx, magnet, category)
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Println("Failed to process torrent:", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category string) error {
|
|
||||||
torrent := q.CreateTorrentFromMagnet(magnet, category)
|
|
||||||
arr := &debrid.Arr{
|
|
||||||
Name: category,
|
|
||||||
Token: ctx.Value("token").(string),
|
|
||||||
Host: ctx.Value("host").(string),
|
|
||||||
}
|
|
||||||
isSymlink := ctx.Value("isSymlink").(bool)
|
|
||||||
debridTorrent, err := debrid.ProcessQBitTorrent(q.debrid, magnet, arr, isSymlink)
|
|
||||||
if err != nil || debridTorrent == nil {
|
|
||||||
if err == nil {
|
|
||||||
err = fmt.Errorf("failed to process torrent")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
torrent.ID = debridTorrent.Id
|
|
||||||
torrent.DebridTorrent = debridTorrent
|
|
||||||
torrent.Name = debridTorrent.Name
|
|
||||||
q.storage.AddOrUpdate(torrent)
|
|
||||||
go q.processFiles(torrent, debridTorrent, arr, isSymlink) // We can send async for file processing not to delay the response
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) CreateTorrentFromMagnet(magnet *common.Magnet, category string) *Torrent {
|
|
||||||
torrent := &Torrent{
|
|
||||||
ID: uuid.NewString(),
|
|
||||||
Hash: strings.ToLower(magnet.InfoHash),
|
|
||||||
Name: magnet.Name,
|
|
||||||
Size: magnet.Size,
|
|
||||||
Category: category,
|
|
||||||
State: "downloading",
|
|
||||||
AddedOn: time.Now().Unix(),
|
|
||||||
MagnetUri: magnet.Link,
|
|
||||||
|
|
||||||
Tracker: "udp://tracker.opentrackr.org:1337",
|
|
||||||
UpLimit: -1,
|
|
||||||
DlLimit: -1,
|
|
||||||
FlPiecePrio: false,
|
|
||||||
ForceStart: false,
|
|
||||||
AutoTmm: false,
|
|
||||||
Availability: 2,
|
|
||||||
MaxRatio: -1,
|
|
||||||
MaxSeedingTime: -1,
|
|
||||||
NumComplete: 10,
|
|
||||||
NumIncomplete: 0,
|
|
||||||
NumLeechs: 1,
|
|
||||||
Ratio: 1,
|
|
||||||
RatioLimit: 1,
|
|
||||||
}
|
|
||||||
return torrent
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) processFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr, isSymlink bool) {
|
|
||||||
for debridTorrent.Status != "downloaded" {
|
|
||||||
progress := debridTorrent.Progress
|
|
||||||
q.logger.Printf("Progress: %.2f%%", progress)
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
dbT, err := q.debrid.CheckStatus(debridTorrent, isSymlink)
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Printf("Error checking status: %v", err)
|
|
||||||
q.MarkAsFailed(torrent)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
debridTorrent = dbT
|
|
||||||
}
|
|
||||||
if isSymlink {
|
|
||||||
q.processSymlink(torrent, debridTorrent, arr)
|
|
||||||
} else {
|
|
||||||
q.processManualFiles(torrent, debridTorrent, arr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
42
pkg/qbit/server/app_handlers.go
Normal file
42
pkg/qbit/server/app_handlers.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/qbit/shared"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) handleVersion(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_, _ = w.Write([]byte("v4.3.2"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_, _ = w.Write([]byte("2.7"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handlePreferences(w http.ResponseWriter, r *http.Request) {
|
||||||
|
preferences := shared.NewAppPreferences()
|
||||||
|
|
||||||
|
preferences.WebUiUsername = s.qbit.Username
|
||||||
|
preferences.SavePath = s.qbit.DownloadFolder
|
||||||
|
preferences.TempPath = filepath.Join(s.qbit.DownloadFolder, "temp")
|
||||||
|
|
||||||
|
common.JSONResponse(w, preferences, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
|
||||||
|
res := shared.BuildInfo{
|
||||||
|
Bitness: 64,
|
||||||
|
Boost: "1.75.0",
|
||||||
|
Libtorrent: "1.2.11.0",
|
||||||
|
Openssl: "1.1.1i",
|
||||||
|
Qt: "5.15.2",
|
||||||
|
Zlib: "1.2.11",
|
||||||
|
}
|
||||||
|
common.JSONResponse(w, res, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) shutdown(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
7
pkg/qbit/server/auth_handlers.go
Normal file
7
pkg/qbit/server/auth_handlers.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
func (s *Server) handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_, _ = w.Write([]byte("Ok."))
|
||||||
|
}
|
||||||
178
pkg/qbit/server/import.go
Normal file
178
pkg/qbit/server/import.go
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ImportRequest struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
URI string `json:"uri"`
|
||||||
|
Arr *arr.Arr `json:"arr"`
|
||||||
|
IsSymlink bool `json:"isSymlink"`
|
||||||
|
SeriesId int `json:"series"`
|
||||||
|
Seasons []int `json:"seasons"`
|
||||||
|
Episodes []string `json:"episodes"`
|
||||||
|
|
||||||
|
Failed bool `json:"failed"`
|
||||||
|
FailedAt time.Time `json:"failedAt"`
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
Completed bool `json:"completed"`
|
||||||
|
CompletedAt time.Time `json:"completedAt"`
|
||||||
|
Async bool `json:"async"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ManualImportResponseSchema struct {
|
||||||
|
Priority string `json:"priority"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Result string `json:"result"`
|
||||||
|
Queued time.Time `json:"queued"`
|
||||||
|
Trigger string `json:"trigger"`
|
||||||
|
SendUpdatesToClient bool `json:"sendUpdatesToClient"`
|
||||||
|
UpdateScheduledTask bool `json:"updateScheduledTask"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewImportRequest(uri string, arr *arr.Arr, isSymlink bool) *ImportRequest {
|
||||||
|
return &ImportRequest{
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
URI: uri,
|
||||||
|
Arr: arr,
|
||||||
|
Failed: false,
|
||||||
|
Completed: false,
|
||||||
|
Async: false,
|
||||||
|
IsSymlink: isSymlink,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImportRequest) Fail(reason string) {
|
||||||
|
i.Failed = true
|
||||||
|
i.FailedAt = time.Now()
|
||||||
|
i.Reason = reason
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImportRequest) Complete() {
|
||||||
|
i.Completed = true
|
||||||
|
i.CompletedAt = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImportRequest) Process(s *Server) (err error) {
|
||||||
|
// Use this for now.
|
||||||
|
// This sends the torrent to the arr
|
||||||
|
q := s.qbit
|
||||||
|
magnet, err := common.GetMagnetFromUrl(i.URI)
|
||||||
|
torrent := q.CreateTorrentFromMagnet(magnet, i.Arr.Name)
|
||||||
|
debridTorrent, err := debrid.ProcessTorrent(q.Debrid, magnet, i.Arr, i.IsSymlink)
|
||||||
|
if err != nil || debridTorrent == nil {
|
||||||
|
if debridTorrent != nil {
|
||||||
|
go debridTorrent.Delete()
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = fmt.Errorf("failed to process torrent")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
||||||
|
q.Storage.AddOrUpdate(torrent)
|
||||||
|
go q.ProcessFiles(torrent, debridTorrent, i.Arr, i.IsSymlink)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImportRequest) BetaProcess(s *Server) (err error) {
|
||||||
|
// THis actually imports the torrent into the arr. Needs more work
|
||||||
|
if i.Arr == nil {
|
||||||
|
return errors.New("invalid arr")
|
||||||
|
}
|
||||||
|
q := s.qbit
|
||||||
|
magnet, err := common.GetMagnetFromUrl(i.URI)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing magnet link: %w", err)
|
||||||
|
}
|
||||||
|
debridTorrent, err := debrid.ProcessTorrent(q.Debrid, magnet, i.Arr, true)
|
||||||
|
if err != nil || debridTorrent == nil {
|
||||||
|
if debridTorrent != nil {
|
||||||
|
go debridTorrent.Delete()
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = fmt.Errorf("failed to process torrent")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
debridTorrent.Arr = i.Arr
|
||||||
|
|
||||||
|
torrentPath, err := q.ProcessSymlink(debridTorrent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to process symlink: %w", err)
|
||||||
|
}
|
||||||
|
i.Path = torrentPath
|
||||||
|
body, err := i.Arr.Import(torrentPath, i.SeriesId, i.Seasons)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to import: %w", err)
|
||||||
|
}
|
||||||
|
defer body.Close()
|
||||||
|
|
||||||
|
var resp ManualImportResponseSchema
|
||||||
|
if err := json.NewDecoder(body).Decode(&resp); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %w", err)
|
||||||
|
}
|
||||||
|
if resp.Status != "success" {
|
||||||
|
return fmt.Errorf("failed to import: %s", resp.Result)
|
||||||
|
}
|
||||||
|
i.Complete()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImportStore struct {
|
||||||
|
Imports map[string]*ImportRequest
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewImportStore() *ImportStore {
|
||||||
|
return &ImportStore{
|
||||||
|
Imports: make(map[string]*ImportRequest),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ImportStore) AddImport(i *ImportRequest) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.Imports[i.ID] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ImportStore) GetImport(id string) *ImportRequest {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.Imports[id]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ImportStore) GetAllImports() []*ImportRequest {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
var imports []*ImportRequest
|
||||||
|
for _, i := range s.Imports {
|
||||||
|
imports = append(imports, i)
|
||||||
|
}
|
||||||
|
return imports
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ImportStore) DeleteImport(id string) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
delete(s.Imports, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ImportStore) UpdateImport(i *ImportRequest) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.Imports[i.ID] = i
|
||||||
|
}
|
||||||
@@ -1,29 +1,14 @@
|
|||||||
package qbit
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/subtle"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (q *QBit) authMiddleware(next http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
user, pass, ok := r.BasicAuth()
|
|
||||||
if !ok {
|
|
||||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if subtle.ConstantTimeCompare([]byte(user), []byte(q.Username)) != 1 || subtle.ConstantTimeCompare([]byte(pass), []byte(q.Password)) != 1 {
|
|
||||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
next.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func DecodeAuthHeader(header string) (string, string, error) {
|
func DecodeAuthHeader(header string) (string, string, error) {
|
||||||
encodedTokens := strings.Split(header, " ")
|
encodedTokens := strings.Split(header, " ")
|
||||||
if len(encodedTokens) != 2 {
|
if len(encodedTokens) != 2 {
|
||||||
@@ -45,17 +30,38 @@ func DecodeAuthHeader(header string) (string, string, error) {
|
|||||||
return host, token, nil
|
return host, token, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) authContext(next http.Handler) http.Handler {
|
func (s *Server) CategoryContext(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
category := strings.Trim(r.URL.Query().Get("category"), "")
|
||||||
|
if category == "" {
|
||||||
|
// Get from form
|
||||||
|
_ = r.ParseForm()
|
||||||
|
category = r.Form.Get("category")
|
||||||
|
if category == "" {
|
||||||
|
// Get from multipart form
|
||||||
|
_ = r.ParseMultipartForm(0)
|
||||||
|
category = r.FormValue("category")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx := r.Context()
|
||||||
|
ctx = context.WithValue(r.Context(), "category", category)
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) authContext(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
host, token, err := DecodeAuthHeader(r.Header.Get("Authorization"))
|
host, token, err := DecodeAuthHeader(r.Header.Get("Authorization"))
|
||||||
ctx := r.Context()
|
category := r.Context().Value("category").(string)
|
||||||
if err == nil {
|
a := &arr.Arr{
|
||||||
ctx = context.WithValue(r.Context(), "host", host)
|
Name: category,
|
||||||
ctx = context.WithValue(ctx, "token", token)
|
|
||||||
q.arrs.Store(host, token)
|
|
||||||
next.ServeHTTP(w, r.WithContext(ctx))
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
if err == nil {
|
||||||
|
a.Host = host
|
||||||
|
a.Token = token
|
||||||
|
}
|
||||||
|
s.qbit.Arrs.AddOrUpdate(a)
|
||||||
|
ctx := context.WithValue(r.Context(), "arr", a)
|
||||||
next.ServeHTTP(w, r.WithContext(ctx))
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
51
pkg/qbit/server/routes.go
Normal file
51
pkg/qbit/server/routes.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) Routes(r chi.Router) http.Handler {
|
||||||
|
r.Route("/api/v2", func(r chi.Router) {
|
||||||
|
r.Use(s.CategoryContext)
|
||||||
|
r.Post("/auth/login", s.handleLogin)
|
||||||
|
|
||||||
|
r.Group(func(r chi.Router) {
|
||||||
|
r.Use(s.authContext)
|
||||||
|
r.Route("/torrents", func(r chi.Router) {
|
||||||
|
r.Use(HashesCtx)
|
||||||
|
r.Get("/info", s.handleTorrentsInfo)
|
||||||
|
r.Post("/add", s.handleTorrentsAdd)
|
||||||
|
r.Post("/delete", s.handleTorrentsDelete)
|
||||||
|
r.Get("/categories", s.handleCategories)
|
||||||
|
r.Post("/createCategory", s.handleCreateCategory)
|
||||||
|
|
||||||
|
r.Get("/pause", s.handleTorrentsPause)
|
||||||
|
r.Get("/resume", s.handleTorrentsResume)
|
||||||
|
r.Get("/recheck", s.handleTorrentRecheck)
|
||||||
|
r.Get("/properties", s.handleTorrentProperties)
|
||||||
|
r.Get("/files", s.handleTorrentFiles)
|
||||||
|
})
|
||||||
|
|
||||||
|
r.Route("/app", func(r chi.Router) {
|
||||||
|
r.Get("/version", s.handleVersion)
|
||||||
|
r.Get("/webapiVersion", s.handleWebAPIVersion)
|
||||||
|
r.Get("/preferences", s.handlePreferences)
|
||||||
|
r.Get("/buildInfo", s.handleBuildInfo)
|
||||||
|
r.Get("/shutdown", s.shutdown)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
r.Get("/", s.handleHome)
|
||||||
|
r.Route("/internal", func(r chi.Router) {
|
||||||
|
r.Get("/arrs", s.handleGetArrs)
|
||||||
|
r.Get("/content", s.handleContent)
|
||||||
|
r.Get("/seasons/{contentId}", s.handleSeasons)
|
||||||
|
r.Get("/episodes/{contentId}", s.handleEpisodes)
|
||||||
|
r.Post("/add", s.handleAddContent)
|
||||||
|
r.Get("/search", s.handleSearch)
|
||||||
|
r.Get("/cached", s.handleCheckCached)
|
||||||
|
})
|
||||||
|
return r
|
||||||
|
}
|
||||||
66
pkg/qbit/server/server.go
Normal file
66
pkg/qbit/server/server.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"github.com/go-chi/chi/v5/middleware"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"goBlack/pkg/qbit/shared"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Server struct {
|
||||||
|
qbit *shared.QBit
|
||||||
|
logger *log.Logger
|
||||||
|
debug bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewServer(config *common.Config, deb *debrid.DebridService) *Server {
|
||||||
|
logger := common.NewLogger("QBit", os.Stdout)
|
||||||
|
q := shared.NewQBit(config, deb, logger)
|
||||||
|
return &Server{
|
||||||
|
qbit: q,
|
||||||
|
logger: logger,
|
||||||
|
debug: config.QBitTorrent.Debug,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) Start(ctx context.Context) error {
|
||||||
|
r := chi.NewRouter()
|
||||||
|
if s.debug {
|
||||||
|
r.Use(middleware.Logger)
|
||||||
|
}
|
||||||
|
r.Use(middleware.Recoverer)
|
||||||
|
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
|
||||||
|
s.Routes(r)
|
||||||
|
|
||||||
|
go s.qbit.StartWorker(context.Background())
|
||||||
|
|
||||||
|
s.logger.Printf("Starting QBit server on :%s", s.qbit.Port)
|
||||||
|
port := fmt.Sprintf(":%s", s.qbit.Port)
|
||||||
|
srv := &http.Server{
|
||||||
|
Addr: port,
|
||||||
|
Handler: r,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
|
||||||
|
defer stop()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
fmt.Printf("Error starting server: %v\n", err)
|
||||||
|
stop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
fmt.Println("Shutting down gracefully...")
|
||||||
|
return srv.Shutdown(context.Background())
|
||||||
|
}
|
||||||
334
pkg/qbit/server/static/index.html
Normal file
334
pkg/qbit/server/static/index.html
Normal file
@@ -0,0 +1,334 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>Debrid Manager</title>
|
||||||
|
<!-- Bootstrap CSS -->
|
||||||
|
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css" rel="stylesheet">
|
||||||
|
<!-- Bootstrap Icons -->
|
||||||
|
<link href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.7.2/font/bootstrap-icons.css" rel="stylesheet">
|
||||||
|
<link href="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/css/select2.min.css" rel="stylesheet"/>
|
||||||
|
<!-- Select2 Bootstrap 5 Theme CSS -->
|
||||||
|
<link rel="stylesheet"
|
||||||
|
href="https://cdn.jsdelivr.net/npm/select2-bootstrap-5-theme@1.3.0/dist/select2-bootstrap-5-theme.min.css"/>
|
||||||
|
<style>
|
||||||
|
.select2-container--bootstrap-5 .select2-results__option {
|
||||||
|
padding: 0.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.select2-result img {
|
||||||
|
border-radius: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.select2-container--bootstrap-5 .select2-results__option--highlighted {
|
||||||
|
background-color: #f8f9fa !important;
|
||||||
|
color: #000 !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
.select2-container--bootstrap-5 .select2-results__option--selected {
|
||||||
|
background-color: #e9ecef !important;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<nav class="navbar navbar-expand-lg navbar-dark bg-dark">
|
||||||
|
<div class="container-fluid">
|
||||||
|
<span class="navbar-brand">Debrid Manager</span>
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<div class="container mt-4">
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-md-8">
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="magnetURI" class="form-label">Magnet Link</label>
|
||||||
|
<textarea class="form-control" id="magnetURI" rows="3"></textarea>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="selectArr" class="form-label">Enter Category</label>
|
||||||
|
<input type="email" class="form-control" id="selectArr" placeholder="Enter Category(e.g sonarr, radarr, radarr4k)">
|
||||||
|
</div>
|
||||||
|
<div class="form-check">
|
||||||
|
<input class="form-check-input" type="checkbox" value="" id="isSymlink">
|
||||||
|
<label class="form-check-label" for="isSymlink">
|
||||||
|
Not Symlink(Download real files instead of symlinks from Debrid)
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div class="mt-3">
|
||||||
|
<button class="btn btn-primary" id="addToArr">
|
||||||
|
Add to Arr
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<!-- <div class="col-md-6">-->
|
||||||
|
|
||||||
|
<!-- <div class="mb-3 d-none">-->
|
||||||
|
<!-- <select class="form-select mb-3 select2-ajax" id="selectContent">-->
|
||||||
|
<!-- <option></option>-->
|
||||||
|
<!-- </select>-->
|
||||||
|
<!-- </div>-->
|
||||||
|
<!-- <div class="mb-3 d-none">-->
|
||||||
|
<!-- <select class="form-select mb-3 select2-multi" id="selectSeason" multiple-->
|
||||||
|
<!-- style="width: 100%; display: none;">-->
|
||||||
|
<!-- <option value="all">Select All</option>-->
|
||||||
|
<!-- </select>-->
|
||||||
|
<!-- </div>-->
|
||||||
|
<!-- <div class="mb-4 d-none">-->
|
||||||
|
<!-- <select class="form-select mb-3 select2-multi" id="selectEpisode" multiple-->
|
||||||
|
<!-- style="width: 100%; display: none;">-->
|
||||||
|
<!-- <option value="all">Select All</option>-->
|
||||||
|
<!-- </select>-->
|
||||||
|
<!-- </div>-->
|
||||||
|
|
||||||
|
<!-- </div>-->
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Bootstrap JS and Popper.js -->
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.bundle.min.js"></script>
|
||||||
|
<!-- jQuery -->
|
||||||
|
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/js/select2.min.js"></script>
|
||||||
|
<script>
|
||||||
|
$(document).ready(function () {
|
||||||
|
let $selectArr = $('#selectArr');
|
||||||
|
let $selectContent = $('#selectContent');
|
||||||
|
let $selectSeason = $('#selectSeason');
|
||||||
|
let $selectEpisode = $('#selectEpisode');
|
||||||
|
let $addBtn = $('#addToArr');
|
||||||
|
const $contentSearch = $('#contentSearch');
|
||||||
|
const $searchResults = $('#searchResults');
|
||||||
|
let isSonarr = true;
|
||||||
|
let searchTimeout;
|
||||||
|
let selectedArr, selectedContent, selectedSeasons, selectedEpisodes;
|
||||||
|
|
||||||
|
// Initially show only selectArr, hide others
|
||||||
|
$selectSeason.hide().closest('.mb-3').hide();
|
||||||
|
$selectEpisode.hide().closest('.mb-3').hide();
|
||||||
|
|
||||||
|
// Initialize Select2
|
||||||
|
$('.select2-multi').select2({
|
||||||
|
theme: 'bootstrap-5',
|
||||||
|
width: '100%',
|
||||||
|
placeholder: 'Select options',
|
||||||
|
allowClear: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Also hide the Select2 containers
|
||||||
|
$('.select2-container--bootstrap-5').hide();
|
||||||
|
|
||||||
|
$selectContent.select2({
|
||||||
|
theme: 'bootstrap-5',
|
||||||
|
width: '100%',
|
||||||
|
placeholder: 'Search shows/movies...',
|
||||||
|
allowClear: true,
|
||||||
|
minimumInputLength: 2,
|
||||||
|
ajax: {
|
||||||
|
url: '/internal/search',
|
||||||
|
dataType: 'json',
|
||||||
|
delay: 250,
|
||||||
|
data: function (params) {
|
||||||
|
return {
|
||||||
|
term: params.term
|
||||||
|
};
|
||||||
|
},
|
||||||
|
processResults: function (data) {
|
||||||
|
return {
|
||||||
|
results: data.map(function (item) {
|
||||||
|
return {
|
||||||
|
id: item.id,
|
||||||
|
text: item.media_type === 'movie' ? item.title : item.name,
|
||||||
|
media_type: item.media_type,
|
||||||
|
poster: item.poster_path ?
|
||||||
|
'https://image.tmdb.org/t/p/w92' + item.poster_path : null,
|
||||||
|
year: item.media_type === 'movie' ?
|
||||||
|
(item.release_date ? item.release_date.substring(0, 4) : '') :
|
||||||
|
(item.first_air_date ? item.first_air_date.substring(0, 4) : '')
|
||||||
|
};
|
||||||
|
})
|
||||||
|
};
|
||||||
|
},
|
||||||
|
cache: true
|
||||||
|
},
|
||||||
|
templateResult: formatResult,
|
||||||
|
templateSelection: formatSelection
|
||||||
|
});
|
||||||
|
|
||||||
|
function formatResult(item) {
|
||||||
|
if (!item.id) return item.text;
|
||||||
|
|
||||||
|
return $(`
|
||||||
|
<div class="select2-result d-flex align-items-center gap-2">
|
||||||
|
${item.poster ?
|
||||||
|
`<img src="${item.poster}" style="width: 45px; height: 68px; object-fit: cover;">` :
|
||||||
|
'<div style="width: 45px; height: 68px; background: #eee;"></div>'
|
||||||
|
}
|
||||||
|
<div>
|
||||||
|
<div class="fw-bold">${item.text}</div>
|
||||||
|
<small class="text-muted">
|
||||||
|
${item.year} • ${item.media_type === 'movie' ? 'Movie' : 'TV Series'}
|
||||||
|
</small>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`);
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatSelection(item) {
|
||||||
|
if (!item.id) return item.text;
|
||||||
|
return item.text + (item.year ? ` (${item.year})` : '');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle selection
|
||||||
|
$selectContent.on('select2:select', function (e) {
|
||||||
|
selectedContent = e.params.data.id;
|
||||||
|
const mediaType = e.params.data.media_type;
|
||||||
|
|
||||||
|
if (mediaType === 'tv') {
|
||||||
|
$selectSeason.show().closest('.mb-3').show();
|
||||||
|
$selectSeason.next('.select2-container--bootstrap-5').show();
|
||||||
|
// Fetch seasons (your existing seasons fetch code)
|
||||||
|
fetch(`/internal/seasons/${selectedContent}`)
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(seasons => {
|
||||||
|
$selectSeason.empty().append('<option value="all">Select All</option>');
|
||||||
|
seasons.forEach(season => {
|
||||||
|
$selectSeason.append(`<option value="${season}">Season ${season}</option>`);
|
||||||
|
});
|
||||||
|
$selectSeason.trigger('change.select2');
|
||||||
|
})
|
||||||
|
.catch(error => console.error('Error fetching seasons:', error));
|
||||||
|
} else {
|
||||||
|
// For movies, show the Add to Arr button directly
|
||||||
|
$selectSeason.hide().closest('.mb-3').hide();
|
||||||
|
$selectSeason.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$selectEpisode.hide().closest('.mb-3').hide();
|
||||||
|
$selectEpisode.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$addBtn.show();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Fetch Arrs
|
||||||
|
function fetchArrs() {
|
||||||
|
fetch('/internal/arrs')
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(arrs => {
|
||||||
|
$selectArr.empty().append('<option value="">Select Arr</option>');
|
||||||
|
arrs.forEach(arr => {
|
||||||
|
$selectArr.append(`<option value="${arr.name}">${arr.name}</option>`);
|
||||||
|
});
|
||||||
|
})
|
||||||
|
.catch(error => console.error('Error fetching arrs:', error));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle content selection
|
||||||
|
$selectContent.change(function () {
|
||||||
|
selectedContent = $(this).val();
|
||||||
|
selectedArr = $selectArr.val();
|
||||||
|
|
||||||
|
if (!selectedContent) {
|
||||||
|
$selectSeason.hide().closest('.mb-3').hide();
|
||||||
|
$selectSeason.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$selectEpisode.hide().closest('.mb-3').hide();
|
||||||
|
$selectEpisode.next('.select2-container--bootstrap-5').hide();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isSonarr) {
|
||||||
|
$selectSeason.show().closest('.mb-3').show();
|
||||||
|
$selectSeason.next('.select2-container--bootstrap-5').show();
|
||||||
|
// Fetch seasons
|
||||||
|
fetch(`/internal/seasons/${selectedContent}`)
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(seasons => {
|
||||||
|
$selectSeason.empty().append('<option value="all">Select All</option>');
|
||||||
|
seasons.forEach(season => {
|
||||||
|
$selectSeason.append(`<option value="${season}">Season ${season}</option>`);
|
||||||
|
});
|
||||||
|
$selectSeason.trigger('change.select2');
|
||||||
|
})
|
||||||
|
.catch(error => console.error('Error fetching seasons:', error));
|
||||||
|
} else {
|
||||||
|
// For Radarr, show the Add to Arr button directly
|
||||||
|
$selectSeason.hide().closest('.mb-3').hide();
|
||||||
|
$selectSeason.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$selectEpisode.hide().closest('.mb-3').hide();
|
||||||
|
$selectEpisode.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$addBtn.show();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle season selection
|
||||||
|
$selectSeason.change(function () {
|
||||||
|
selectedSeasons = $(this).val();
|
||||||
|
console.log('Selected seasons:', selectedSeasons);
|
||||||
|
|
||||||
|
if (!selectedSeasons || selectedSeasons.includes('all')) {
|
||||||
|
$selectEpisode.hide().closest('.mb-3').hide();
|
||||||
|
$selectEpisode.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$addBtn.show();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
$selectEpisode.show().closest('.mb-3').show();
|
||||||
|
$selectEpisode.next('.select2-container--bootstrap-5').show();
|
||||||
|
fetch(`/internal/episodes/${selectedContent}?seasons=${selectedSeasons.join(',')}`)
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(episodes => {
|
||||||
|
$selectEpisode.empty().append('<option value="all">Select All</option>');
|
||||||
|
episodes.forEach(episode => {
|
||||||
|
$selectEpisode.append(`<option value="${episode}">Episode ${episode}</option>`);
|
||||||
|
});
|
||||||
|
$selectEpisode.trigger('change.select2');
|
||||||
|
})
|
||||||
|
.catch(error => console.error('Error fetching episodes:', error));
|
||||||
|
$addBtn.show();
|
||||||
|
});
|
||||||
|
|
||||||
|
$addBtn.click(function () {
|
||||||
|
let oldText = $(this).text();
|
||||||
|
$(this).prop('disabled', true).prepend('<span class="spinner-border spinner-border-sm me-2" role="status" aria-hidden="true"></span>');
|
||||||
|
let magnet = $('#magnetURI').val();
|
||||||
|
if (!magnet) {
|
||||||
|
$(this).prop('disabled', false).text(oldText);
|
||||||
|
alert('Please provide a magnet link or upload a torrent file!');
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let data = {
|
||||||
|
arr: $selectArr.val(),
|
||||||
|
url: magnet,
|
||||||
|
notSymlink: $('#isSymlink').is(':checked'),
|
||||||
|
};
|
||||||
|
console.log('Adding to Arr:', data);
|
||||||
|
fetch('/internal/add', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
},
|
||||||
|
body: JSON.stringify(data)
|
||||||
|
})
|
||||||
|
.then(async response => {
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text();
|
||||||
|
throw new Error(errorText);
|
||||||
|
}
|
||||||
|
return response.json();
|
||||||
|
})
|
||||||
|
.then(result => {
|
||||||
|
console.log('Added to Arr:', result);
|
||||||
|
$(this).prop('disabled', false).text(oldText);
|
||||||
|
alert('Added to Arr successfully!');
|
||||||
|
})
|
||||||
|
.catch(error => {
|
||||||
|
$(this).prop('disabled', false).text(oldText);
|
||||||
|
alert(`Error adding to Arr: ${error.message || error}`);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Initial fetch of Arrs
|
||||||
|
//fetchArrs();
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
176
pkg/qbit/server/torrent_handlers.go
Normal file
176
pkg/qbit/server/torrent_handlers.go
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/qbit/shared"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
|
||||||
|
//log all url params
|
||||||
|
ctx := r.Context()
|
||||||
|
category := ctx.Value("category").(string)
|
||||||
|
filter := strings.Trim(r.URL.Query().Get("filter"), "")
|
||||||
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
|
torrents := s.qbit.Storage.GetAll(category, filter, hashes)
|
||||||
|
common.JSONResponse(w, torrents, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
contentType := strings.Split(r.Header.Get("Content-Type"), ";")[0]
|
||||||
|
switch contentType {
|
||||||
|
case "multipart/form-data":
|
||||||
|
err := r.ParseMultipartForm(32 << 20) // 32MB max memory
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Printf("Error parsing form: %v\n", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case "application/x-www-form-urlencoded":
|
||||||
|
err := r.ParseForm()
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Printf("Error parsing form: %v\n", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
|
||||||
|
s.logger.Printf("isSymlink: %v\n", isSymlink)
|
||||||
|
urls := r.FormValue("urls")
|
||||||
|
category := r.FormValue("category")
|
||||||
|
|
||||||
|
var urlList []string
|
||||||
|
if urls != "" {
|
||||||
|
urlList = strings.Split(urls, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
|
||||||
|
for _, url := range urlList {
|
||||||
|
if err := s.qbit.AddMagnet(ctx, url, category); err != nil {
|
||||||
|
s.logger.Printf("Error adding magnet: %v\n", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if contentType == "multipart/form-data" && len(r.MultipartForm.File["torrents"]) > 0 {
|
||||||
|
files := r.MultipartForm.File["torrents"]
|
||||||
|
for _, fileHeader := range files {
|
||||||
|
if err := s.qbit.AddTorrent(ctx, fileHeader, category); err != nil {
|
||||||
|
s.logger.Printf("Error adding torrent: %v\n", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
|
if len(hashes) == 0 {
|
||||||
|
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, hash := range hashes {
|
||||||
|
s.qbit.Storage.Delete(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
|
for _, hash := range hashes {
|
||||||
|
torrent := s.qbit.Storage.Get(hash)
|
||||||
|
if torrent == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
go s.qbit.PauseTorrent(torrent)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
|
for _, hash := range hashes {
|
||||||
|
torrent := s.qbit.Storage.Get(hash)
|
||||||
|
if torrent == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
go s.qbit.ResumeTorrent(torrent)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
|
for _, hash := range hashes {
|
||||||
|
torrent := s.qbit.Storage.Get(hash)
|
||||||
|
if torrent == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
go s.qbit.RefreshTorrent(torrent)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleCategories(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var categories = map[string]shared.TorrentCategory{}
|
||||||
|
for _, cat := range s.qbit.Categories {
|
||||||
|
path := filepath.Join(s.qbit.DownloadFolder, cat)
|
||||||
|
categories[cat] = shared.TorrentCategory{
|
||||||
|
Name: cat,
|
||||||
|
SavePath: path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
common.JSONResponse(w, categories, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
|
||||||
|
err := r.ParseForm()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
name := r.Form.Get("category")
|
||||||
|
if name == "" {
|
||||||
|
http.Error(w, "No name provided", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.qbit.Categories = append(s.qbit.Categories, name)
|
||||||
|
|
||||||
|
common.JSONResponse(w, nil, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
|
||||||
|
hash := r.URL.Query().Get("hash")
|
||||||
|
torrent := s.qbit.Storage.Get(hash)
|
||||||
|
properties := s.qbit.GetTorrentProperties(torrent)
|
||||||
|
common.JSONResponse(w, properties, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
|
||||||
|
hash := r.URL.Query().Get("hash")
|
||||||
|
torrent := s.qbit.Storage.Get(hash)
|
||||||
|
if torrent == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
files := s.qbit.GetTorrentFiles(torrent)
|
||||||
|
common.JSONResponse(w, files, http.StatusOK)
|
||||||
|
}
|
||||||
149
pkg/qbit/server/ui_handlers.go
Normal file
149
pkg/qbit/server/ui_handlers.go
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"embed"
|
||||||
|
"encoding/json"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"html/template"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AddRequest struct {
|
||||||
|
Url string `json:"url"`
|
||||||
|
Arr string `json:"arr"`
|
||||||
|
File string `json:"file"`
|
||||||
|
NotSymlink bool `json:"notSymlink"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
Seasons []string `json:"seasons"`
|
||||||
|
Episodes []string `json:"episodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ArrResponse struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContentResponse struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
ArrID string `json:"arr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:embed static/index.html
|
||||||
|
var content embed.FS
|
||||||
|
|
||||||
|
func (s *Server) handleHome(w http.ResponseWriter, r *http.Request) {
|
||||||
|
tmpl, err := template.ParseFS(content, "static/index.html")
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tmpl.Execute(w, nil)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleGetArrs(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
common.JSONResponse(w, s.qbit.Arrs.GetAll(), http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleContent(w http.ResponseWriter, r *http.Request) {
|
||||||
|
arrName := r.URL.Query().Get("arr")
|
||||||
|
_arr := s.qbit.Arrs.Get(arrName)
|
||||||
|
if _arr == nil {
|
||||||
|
http.Error(w, "Invalid arr", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
contents := _arr.GetContents()
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
common.JSONResponse(w, contents, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleSearch(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// arrName := r.URL.Query().Get("arr")
|
||||||
|
term := r.URL.Query().Get("term")
|
||||||
|
results, err := arr.SearchTMDB(term)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
common.JSONResponse(w, results.Results, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleSeasons(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// arrId := r.URL.Query().Get("arrId")
|
||||||
|
// contentId := chi.URLParam(r, "contentId")
|
||||||
|
seasons := []string{"Season 1", "Season 2", "Season 3", "Season 4", "Season 5"}
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
common.JSONResponse(w, seasons, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleEpisodes(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// arrId := r.URL.Query().Get("arrId")
|
||||||
|
// contentId := chi.URLParam(r, "contentId")
|
||||||
|
// seasonIds := strings.Split(r.URL.Query().Get("seasons"), ",")
|
||||||
|
episodes := []string{"Episode 1", "Episode 2", "Episode 3", "Episode 4", "Episode 5"}
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
common.JSONResponse(w, episodes, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var req AddRequest
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_arr := s.qbit.Arrs.Get(req.Arr)
|
||||||
|
if _arr == nil {
|
||||||
|
_arr = arr.NewArr(req.Arr, "", "", arr.Sonarr)
|
||||||
|
}
|
||||||
|
importReq := NewImportRequest(req.Url, _arr, !req.NotSymlink)
|
||||||
|
err := importReq.Process(s)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
common.JSONResponse(w, importReq, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleCheckCached(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
_hashes := r.URL.Query().Get("hash")
|
||||||
|
if _hashes == "" {
|
||||||
|
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
hashes := strings.Split(_hashes, ",")
|
||||||
|
if len(hashes) == 0 {
|
||||||
|
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
db := r.URL.Query().Get("debrid")
|
||||||
|
var deb debrid.Service
|
||||||
|
if db == "" {
|
||||||
|
// use the first debrid
|
||||||
|
deb = s.qbit.Debrid.Get()
|
||||||
|
} else {
|
||||||
|
deb = s.qbit.Debrid.GetByName(db)
|
||||||
|
}
|
||||||
|
if deb == nil {
|
||||||
|
http.Error(w, "Invalid debrid", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
res := deb.IsAvailable(hashes)
|
||||||
|
result := make(map[string]bool)
|
||||||
|
for _, h := range hashes {
|
||||||
|
_, exists := res[h]
|
||||||
|
result[h] = exists
|
||||||
|
}
|
||||||
|
common.JSONResponse(w, result, http.StatusOK)
|
||||||
|
}
|
||||||
@@ -1,29 +1,27 @@
|
|||||||
package qbit
|
package shared
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
"goBlack/pkg/debrid"
|
"goBlack/pkg/debrid"
|
||||||
"goBlack/pkg/qbit/downloaders"
|
"goBlack/pkg/downloaders"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (q *QBit) processManualFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr) {
|
func (q *QBit) processManualFiles(debridTorrent *debrid.Torrent) (string, error) {
|
||||||
q.logger.Printf("Downloading %d files...", len(debridTorrent.DownloadLinks))
|
q.logger.Printf("Downloading %d files...", len(debridTorrent.DownloadLinks))
|
||||||
torrentPath := common.RemoveExtension(debridTorrent.OriginalFilename)
|
torrentPath := common.RemoveExtension(debridTorrent.OriginalFilename)
|
||||||
parent := common.RemoveInvalidChars(filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath))
|
parent := common.RemoveInvalidChars(filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath))
|
||||||
err := os.MkdirAll(parent, os.ModePerm)
|
err := os.MkdirAll(parent, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.logger.Printf("Failed to create directory: %s\n", parent)
|
// add previous error to the error and return
|
||||||
q.MarkAsFailed(torrent)
|
return "", fmt.Errorf("failed to create directory: %s: %v", parent, err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
torrent.TorrentPath = torrentPath
|
|
||||||
q.downloadFiles(debridTorrent, parent)
|
q.downloadFiles(debridTorrent, parent)
|
||||||
q.UpdateTorrent(torrent, debridTorrent)
|
return torrentPath, nil
|
||||||
q.RefreshArr(arr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) downloadFiles(debridTorrent *debrid.Torrent, parent string) {
|
func (q *QBit) downloadFiles(debridTorrent *debrid.Torrent, parent string) {
|
||||||
@@ -52,26 +50,21 @@ func (q *QBit) downloadFiles(debridTorrent *debrid.Torrent, parent string) {
|
|||||||
q.logger.Printf("Downloaded all files for %s\n", debridTorrent.Name)
|
q.logger.Printf("Downloaded all files for %s\n", debridTorrent.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) processSymlink(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr) {
|
func (q *QBit) ProcessSymlink(debridTorrent *debrid.Torrent) (string, error) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
files := debridTorrent.Files
|
files := debridTorrent.Files
|
||||||
ready := make(chan debrid.TorrentFile, len(files))
|
ready := make(chan debrid.TorrentFile, len(files))
|
||||||
|
|
||||||
q.logger.Printf("Checking %d files...", len(files))
|
q.logger.Printf("Checking %d files...", len(files))
|
||||||
rCloneBase := q.debrid.GetMountPath()
|
rCloneBase := debridTorrent.Debrid.GetMountPath()
|
||||||
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
|
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.MarkAsFailed(torrent)
|
return "", fmt.Errorf("failed to get torrent path: %v", err)
|
||||||
q.logger.Printf("Error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath) // /mnt/symlinks/{category}/MyTVShow/
|
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath) // /mnt/symlinks/{category}/MyTVShow/
|
||||||
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.logger.Printf("Failed to create directory: %s\n", torrentSymlinkPath)
|
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
|
||||||
q.MarkAsFailed(torrent)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
torrentRclonePath := filepath.Join(rCloneBase, torrentPath)
|
torrentRclonePath := filepath.Join(rCloneBase, torrentPath)
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
@@ -88,32 +81,16 @@ func (q *QBit) processSymlink(torrent *Torrent, debridTorrent *debrid.Torrent, a
|
|||||||
q.logger.Println("File is ready:", f.Path)
|
q.logger.Println("File is ready:", f.Path)
|
||||||
q.createSymLink(torrentSymlinkPath, torrentRclonePath, f)
|
q.createSymLink(torrentSymlinkPath, torrentRclonePath, f)
|
||||||
}
|
}
|
||||||
// Update the torrent when all files are ready
|
return torrentPath, nil
|
||||||
torrent.TorrentPath = filepath.Base(torrentPath) // Quite important
|
|
||||||
q.UpdateTorrent(torrent, debridTorrent)
|
|
||||||
q.RefreshArr(arr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
|
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
|
||||||
pathChan := make(chan string)
|
for {
|
||||||
errChan := make(chan error)
|
torrentPath := debridTorrent.GetMountFolder(rclonePath)
|
||||||
|
if torrentPath != "" {
|
||||||
go func() {
|
return torrentPath, nil
|
||||||
for {
|
|
||||||
torrentPath := debridTorrent.GetMountFolder(rclonePath)
|
|
||||||
if torrentPath != "" {
|
|
||||||
pathChan <- torrentPath
|
|
||||||
return
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
}
|
}
|
||||||
}()
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
select {
|
|
||||||
case path := <-pathChan:
|
|
||||||
return path, nil
|
|
||||||
case err := <-errChan:
|
|
||||||
return "", err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -125,7 +102,7 @@ func (q *QBit) createSymLink(path string, torrentMountPath string, file debrid.T
|
|||||||
torrentFilePath := filepath.Join(torrentMountPath, file.Name) // debridFolder/MyTVShow/MyTVShow.S01E01.720p.mkv
|
torrentFilePath := filepath.Join(torrentMountPath, file.Name) // debridFolder/MyTVShow/MyTVShow.S01E01.720p.mkv
|
||||||
err := os.Symlink(torrentFilePath, fullPath)
|
err := os.Symlink(torrentFilePath, fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.logger.Printf("Failed to create symlink: %s\n", fullPath)
|
q.logger.Printf("Failed to create symlink: %s: %v\n", fullPath, err)
|
||||||
}
|
}
|
||||||
// Check if the file exists
|
// Check if the file exists
|
||||||
if !common.FileReady(fullPath) {
|
if !common.FileReady(fullPath) {
|
||||||
44
pkg/qbit/shared/qbit.go
Normal file
44
pkg/qbit/shared/qbit.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package shared
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type QBit struct {
|
||||||
|
Username string `json:"username"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
Port string `json:"port"`
|
||||||
|
DownloadFolder string `json:"download_folder"`
|
||||||
|
Categories []string `json:"categories"`
|
||||||
|
Debrid *debrid.DebridService
|
||||||
|
Storage *TorrentStorage
|
||||||
|
debug bool
|
||||||
|
logger *log.Logger
|
||||||
|
Arrs *arr.Storage
|
||||||
|
RefreshInterval int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewQBit(config *common.Config, deb *debrid.DebridService, logger *log.Logger) *QBit {
|
||||||
|
cfg := config.QBitTorrent
|
||||||
|
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8182")
|
||||||
|
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
|
||||||
|
arrs := arr.NewStorage()
|
||||||
|
return &QBit{
|
||||||
|
Username: cfg.Username,
|
||||||
|
Password: cfg.Password,
|
||||||
|
Port: port,
|
||||||
|
DownloadFolder: cfg.DownloadFolder,
|
||||||
|
Categories: cfg.Categories,
|
||||||
|
Debrid: deb,
|
||||||
|
debug: cfg.Debug,
|
||||||
|
Storage: NewTorrentStorage("torrents.json"),
|
||||||
|
logger: logger,
|
||||||
|
Arrs: arrs,
|
||||||
|
RefreshInterval: refreshInterval,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package qbit
|
package shared
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@@ -10,6 +10,7 @@ type TorrentStorage struct {
|
|||||||
torrents map[string]*Torrent
|
torrents map[string]*Torrent
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
order []string
|
order []string
|
||||||
|
filename string // Added to store the filename for persistence
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadTorrentsFromJSON(filename string) (map[string]*Torrent, error) {
|
func loadTorrentsFromJSON(filename string) (map[string]*Torrent, error) {
|
||||||
@@ -25,7 +26,7 @@ func loadTorrentsFromJSON(filename string) (map[string]*Torrent, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewTorrentStorage(filename string) *TorrentStorage {
|
func NewTorrentStorage(filename string) *TorrentStorage {
|
||||||
// Open the json file and read the data
|
// Open the JSON file and read the data
|
||||||
torrents, err := loadTorrentsFromJSON(filename)
|
torrents, err := loadTorrentsFromJSON(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
torrents = make(map[string]*Torrent)
|
torrents = make(map[string]*Torrent)
|
||||||
@@ -38,6 +39,7 @@ func NewTorrentStorage(filename string) *TorrentStorage {
|
|||||||
return &TorrentStorage{
|
return &TorrentStorage{
|
||||||
torrents: torrents,
|
torrents: torrents,
|
||||||
order: order,
|
order: order,
|
||||||
|
filename: filename,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,6 +48,7 @@ func (ts *TorrentStorage) Add(torrent *Torrent) {
|
|||||||
defer ts.mu.Unlock()
|
defer ts.mu.Unlock()
|
||||||
ts.torrents[torrent.Hash] = torrent
|
ts.torrents[torrent.Hash] = torrent
|
||||||
ts.order = append(ts.order, torrent.Hash)
|
ts.order = append(ts.order, torrent.Hash)
|
||||||
|
_ = ts.saveToFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) AddOrUpdate(torrent *Torrent) {
|
func (ts *TorrentStorage) AddOrUpdate(torrent *Torrent) {
|
||||||
@@ -55,6 +58,7 @@ func (ts *TorrentStorage) AddOrUpdate(torrent *Torrent) {
|
|||||||
ts.order = append(ts.order, torrent.Hash)
|
ts.order = append(ts.order, torrent.Hash)
|
||||||
}
|
}
|
||||||
ts.torrents[torrent.Hash] = torrent
|
ts.torrents[torrent.Hash] = torrent
|
||||||
|
_ = ts.saveToFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) GetByID(id string) *Torrent {
|
func (ts *TorrentStorage) GetByID(id string) *Torrent {
|
||||||
@@ -104,6 +108,7 @@ func (ts *TorrentStorage) Update(torrent *Torrent) {
|
|||||||
ts.mu.Lock()
|
ts.mu.Lock()
|
||||||
defer ts.mu.Unlock()
|
defer ts.mu.Unlock()
|
||||||
ts.torrents[torrent.Hash] = torrent
|
ts.torrents[torrent.Hash] = torrent
|
||||||
|
_ = ts.saveToFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) Delete(hash string) {
|
func (ts *TorrentStorage) Delete(hash string) {
|
||||||
@@ -122,16 +127,25 @@ func (ts *TorrentStorage) Delete(hash string) {
|
|||||||
}
|
}
|
||||||
// Delete the torrent folder
|
// Delete the torrent folder
|
||||||
if torrent.ContentPath != "" {
|
if torrent.ContentPath != "" {
|
||||||
os.RemoveAll(torrent.ContentPath)
|
err := os.RemoveAll(torrent.ContentPath)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
_ = ts.saveToFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) Save(filename string) error {
|
func (ts *TorrentStorage) Save() error {
|
||||||
ts.mu.RLock()
|
ts.mu.RLock()
|
||||||
defer ts.mu.RUnlock()
|
defer ts.mu.RUnlock()
|
||||||
data, err := json.Marshal(ts.torrents)
|
return ts.saveToFile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// saveToFile is a helper function to write the current state to the JSON file
|
||||||
|
func (ts *TorrentStorage) saveToFile() error {
|
||||||
|
data, err := json.MarshalIndent(ts.torrents, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return os.WriteFile(filename, data, 0644)
|
return os.WriteFile(ts.filename, data, 0644)
|
||||||
}
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package qbit
|
package shared
|
||||||
|
|
||||||
import "goBlack/pkg/debrid"
|
import "goBlack/pkg/debrid"
|
||||||
|
|
||||||
@@ -14,40 +14,40 @@ type BuildInfo struct {
|
|||||||
type AppPreferences struct {
|
type AppPreferences struct {
|
||||||
AddTrackers string `json:"add_trackers"`
|
AddTrackers string `json:"add_trackers"`
|
||||||
AddTrackersEnabled bool `json:"add_trackers_enabled"`
|
AddTrackersEnabled bool `json:"add_trackers_enabled"`
|
||||||
AltDlLimit int64 `json:"alt_dl_limit"`
|
AltDlLimit int `json:"alt_dl_limit"`
|
||||||
AltUpLimit int64 `json:"alt_up_limit"`
|
AltUpLimit int `json:"alt_up_limit"`
|
||||||
AlternativeWebuiEnabled bool `json:"alternative_webui_enabled"`
|
AlternativeWebuiEnabled bool `json:"alternative_webui_enabled"`
|
||||||
AlternativeWebuiPath string `json:"alternative_webui_path"`
|
AlternativeWebuiPath string `json:"alternative_webui_path"`
|
||||||
AnnounceIp string `json:"announce_ip"`
|
AnnounceIp string `json:"announce_ip"`
|
||||||
AnnounceToAllTiers bool `json:"announce_to_all_tiers"`
|
AnnounceToAllTiers bool `json:"announce_to_all_tiers"`
|
||||||
AnnounceToAllTrackers bool `json:"announce_to_all_trackers"`
|
AnnounceToAllTrackers bool `json:"announce_to_all_trackers"`
|
||||||
AnonymousMode bool `json:"anonymous_mode"`
|
AnonymousMode bool `json:"anonymous_mode"`
|
||||||
AsyncIoThreads int64 `json:"async_io_threads"`
|
AsyncIoThreads int `json:"async_io_threads"`
|
||||||
AutoDeleteMode int64 `json:"auto_delete_mode"`
|
AutoDeleteMode int `json:"auto_delete_mode"`
|
||||||
AutoTmmEnabled bool `json:"auto_tmm_enabled"`
|
AutoTmmEnabled bool `json:"auto_tmm_enabled"`
|
||||||
AutorunEnabled bool `json:"autorun_enabled"`
|
AutorunEnabled bool `json:"autorun_enabled"`
|
||||||
AutorunProgram string `json:"autorun_program"`
|
AutorunProgram string `json:"autorun_program"`
|
||||||
BannedIPs string `json:"banned_IPs"`
|
BannedIPs string `json:"banned_IPs"`
|
||||||
BittorrentProtocol int64 `json:"bittorrent_protocol"`
|
BittorrentProtocol int `json:"bittorrent_protocol"`
|
||||||
BypassAuthSubnetWhitelist string `json:"bypass_auth_subnet_whitelist"`
|
BypassAuthSubnetWhitelist string `json:"bypass_auth_subnet_whitelist"`
|
||||||
BypassAuthSubnetWhitelistEnabled bool `json:"bypass_auth_subnet_whitelist_enabled"`
|
BypassAuthSubnetWhitelistEnabled bool `json:"bypass_auth_subnet_whitelist_enabled"`
|
||||||
BypassLocalAuth bool `json:"bypass_local_auth"`
|
BypassLocalAuth bool `json:"bypass_local_auth"`
|
||||||
CategoryChangedTmmEnabled bool `json:"category_changed_tmm_enabled"`
|
CategoryChangedTmmEnabled bool `json:"category_changed_tmm_enabled"`
|
||||||
CheckingMemoryUse int64 `json:"checking_memory_use"`
|
CheckingMemoryUse int `json:"checking_memory_use"`
|
||||||
CreateSubfolderEnabled bool `json:"create_subfolder_enabled"`
|
CreateSubfolderEnabled bool `json:"create_subfolder_enabled"`
|
||||||
CurrentInterfaceAddress string `json:"current_interface_address"`
|
CurrentInterfaceAddress string `json:"current_interface_address"`
|
||||||
CurrentNetworkInterface string `json:"current_network_interface"`
|
CurrentNetworkInterface string `json:"current_network_interface"`
|
||||||
Dht bool `json:"dht"`
|
Dht bool `json:"dht"`
|
||||||
DiskCache int64 `json:"disk_cache"`
|
DiskCache int `json:"disk_cache"`
|
||||||
DiskCacheTtl int64 `json:"disk_cache_ttl"`
|
DiskCacheTtl int `json:"disk_cache_ttl"`
|
||||||
DlLimit int64 `json:"dl_limit"`
|
DlLimit int `json:"dl_limit"`
|
||||||
DontCountSlowTorrents bool `json:"dont_count_slow_torrents"`
|
DontCountSlowTorrents bool `json:"dont_count_slow_torrents"`
|
||||||
DyndnsDomain string `json:"dyndns_domain"`
|
DyndnsDomain string `json:"dyndns_domain"`
|
||||||
DyndnsEnabled bool `json:"dyndns_enabled"`
|
DyndnsEnabled bool `json:"dyndns_enabled"`
|
||||||
DyndnsPassword string `json:"dyndns_password"`
|
DyndnsPassword string `json:"dyndns_password"`
|
||||||
DyndnsService int64 `json:"dyndns_service"`
|
DyndnsService int `json:"dyndns_service"`
|
||||||
DyndnsUsername string `json:"dyndns_username"`
|
DyndnsUsername string `json:"dyndns_username"`
|
||||||
EmbeddedTrackerPort int64 `json:"embedded_tracker_port"`
|
EmbeddedTrackerPort int `json:"embedded_tracker_port"`
|
||||||
EnableCoalesceReadWrite bool `json:"enable_coalesce_read_write"`
|
EnableCoalesceReadWrite bool `json:"enable_coalesce_read_write"`
|
||||||
EnableEmbeddedTracker bool `json:"enable_embedded_tracker"`
|
EnableEmbeddedTracker bool `json:"enable_embedded_tracker"`
|
||||||
EnableMultiConnectionsFromSameIp bool `json:"enable_multi_connections_from_same_ip"`
|
EnableMultiConnectionsFromSameIp bool `json:"enable_multi_connections_from_same_ip"`
|
||||||
@@ -55,10 +55,10 @@ type AppPreferences struct {
|
|||||||
EnablePieceExtentAffinity bool `json:"enable_piece_extent_affinity"`
|
EnablePieceExtentAffinity bool `json:"enable_piece_extent_affinity"`
|
||||||
EnableSuperSeeding bool `json:"enable_super_seeding"`
|
EnableSuperSeeding bool `json:"enable_super_seeding"`
|
||||||
EnableUploadSuggestions bool `json:"enable_upload_suggestions"`
|
EnableUploadSuggestions bool `json:"enable_upload_suggestions"`
|
||||||
Encryption int64 `json:"encryption"`
|
Encryption int `json:"encryption"`
|
||||||
ExportDir string `json:"export_dir"`
|
ExportDir string `json:"export_dir"`
|
||||||
ExportDirFin string `json:"export_dir_fin"`
|
ExportDirFin string `json:"export_dir_fin"`
|
||||||
FilePoolSize int64 `json:"file_pool_size"`
|
FilePoolSize int `json:"file_pool_size"`
|
||||||
IncompleteFilesExt bool `json:"incomplete_files_ext"`
|
IncompleteFilesExt bool `json:"incomplete_files_ext"`
|
||||||
IpFilterEnabled bool `json:"ip_filter_enabled"`
|
IpFilterEnabled bool `json:"ip_filter_enabled"`
|
||||||
IpFilterPath string `json:"ip_filter_path"`
|
IpFilterPath string `json:"ip_filter_path"`
|
||||||
@@ -66,7 +66,7 @@ type AppPreferences struct {
|
|||||||
LimitLanPeers bool `json:"limit_lan_peers"`
|
LimitLanPeers bool `json:"limit_lan_peers"`
|
||||||
LimitTcpOverhead bool `json:"limit_tcp_overhead"`
|
LimitTcpOverhead bool `json:"limit_tcp_overhead"`
|
||||||
LimitUtpRate bool `json:"limit_utp_rate"`
|
LimitUtpRate bool `json:"limit_utp_rate"`
|
||||||
ListenPort int64 `json:"listen_port"`
|
ListenPort int `json:"listen_port"`
|
||||||
Locale string `json:"locale"`
|
Locale string `json:"locale"`
|
||||||
Lsd bool `json:"lsd"`
|
Lsd bool `json:"lsd"`
|
||||||
MailNotificationAuthEnabled bool `json:"mail_notification_auth_enabled"`
|
MailNotificationAuthEnabled bool `json:"mail_notification_auth_enabled"`
|
||||||
@@ -77,79 +77,79 @@ type AppPreferences struct {
|
|||||||
MailNotificationSmtp string `json:"mail_notification_smtp"`
|
MailNotificationSmtp string `json:"mail_notification_smtp"`
|
||||||
MailNotificationSslEnabled bool `json:"mail_notification_ssl_enabled"`
|
MailNotificationSslEnabled bool `json:"mail_notification_ssl_enabled"`
|
||||||
MailNotificationUsername string `json:"mail_notification_username"`
|
MailNotificationUsername string `json:"mail_notification_username"`
|
||||||
MaxActiveDownloads int64 `json:"max_active_downloads"`
|
MaxActiveDownloads int `json:"max_active_downloads"`
|
||||||
MaxActiveTorrents int64 `json:"max_active_torrents"`
|
MaxActiveTorrents int `json:"max_active_torrents"`
|
||||||
MaxActiveUploads int64 `json:"max_active_uploads"`
|
MaxActiveUploads int `json:"max_active_uploads"`
|
||||||
MaxConnec int64 `json:"max_connec"`
|
MaxConnec int `json:"max_connec"`
|
||||||
MaxConnecPerTorrent int64 `json:"max_connec_per_torrent"`
|
MaxConnecPerTorrent int `json:"max_connec_per_torrent"`
|
||||||
MaxRatio int64 `json:"max_ratio"`
|
MaxRatio int `json:"max_ratio"`
|
||||||
MaxRatioAct int64 `json:"max_ratio_act"`
|
MaxRatioAct int `json:"max_ratio_act"`
|
||||||
MaxRatioEnabled bool `json:"max_ratio_enabled"`
|
MaxRatioEnabled bool `json:"max_ratio_enabled"`
|
||||||
MaxSeedingTime int64 `json:"max_seeding_time"`
|
MaxSeedingTime int `json:"max_seeding_time"`
|
||||||
MaxSeedingTimeEnabled bool `json:"max_seeding_time_enabled"`
|
MaxSeedingTimeEnabled bool `json:"max_seeding_time_enabled"`
|
||||||
MaxUploads int64 `json:"max_uploads"`
|
MaxUploads int `json:"max_uploads"`
|
||||||
MaxUploadsPerTorrent int64 `json:"max_uploads_per_torrent"`
|
MaxUploadsPerTorrent int `json:"max_uploads_per_torrent"`
|
||||||
OutgoingPortsMax int64 `json:"outgoing_ports_max"`
|
OutgoingPortsMax int `json:"outgoing_ports_max"`
|
||||||
OutgoingPortsMin int64 `json:"outgoing_ports_min"`
|
OutgoingPortsMin int `json:"outgoing_ports_min"`
|
||||||
Pex bool `json:"pex"`
|
Pex bool `json:"pex"`
|
||||||
PreallocateAll bool `json:"preallocate_all"`
|
PreallocateAll bool `json:"preallocate_all"`
|
||||||
ProxyAuthEnabled bool `json:"proxy_auth_enabled"`
|
ProxyAuthEnabled bool `json:"proxy_auth_enabled"`
|
||||||
ProxyIp string `json:"proxy_ip"`
|
ProxyIp string `json:"proxy_ip"`
|
||||||
ProxyPassword string `json:"proxy_password"`
|
ProxyPassword string `json:"proxy_password"`
|
||||||
ProxyPeerConnections bool `json:"proxy_peer_connections"`
|
ProxyPeerConnections bool `json:"proxy_peer_connections"`
|
||||||
ProxyPort int64 `json:"proxy_port"`
|
ProxyPort int `json:"proxy_port"`
|
||||||
ProxyTorrentsOnly bool `json:"proxy_torrents_only"`
|
ProxyTorrentsOnly bool `json:"proxy_torrents_only"`
|
||||||
ProxyType int64 `json:"proxy_type"`
|
ProxyType int `json:"proxy_type"`
|
||||||
ProxyUsername string `json:"proxy_username"`
|
ProxyUsername string `json:"proxy_username"`
|
||||||
QueueingEnabled bool `json:"queueing_enabled"`
|
QueueingEnabled bool `json:"queueing_enabled"`
|
||||||
RandomPort bool `json:"random_port"`
|
RandomPort bool `json:"random_port"`
|
||||||
RecheckCompletedTorrents bool `json:"recheck_completed_torrents"`
|
RecheckCompletedTorrents bool `json:"recheck_completed_torrents"`
|
||||||
ResolvePeerCountries bool `json:"resolve_peer_countries"`
|
ResolvePeerCountries bool `json:"resolve_peer_countries"`
|
||||||
RssAutoDownloadingEnabled bool `json:"rss_auto_downloading_enabled"`
|
RssAutoDownloadingEnabled bool `json:"rss_auto_downloading_enabled"`
|
||||||
RssMaxArticlesPerFeed int64 `json:"rss_max_articles_per_feed"`
|
RssMaxArticlesPerFeed int `json:"rss_max_articles_per_feed"`
|
||||||
RssProcessingEnabled bool `json:"rss_processing_enabled"`
|
RssProcessingEnabled bool `json:"rss_processing_enabled"`
|
||||||
RssRefreshInterval int64 `json:"rss_refresh_interval"`
|
RssRefreshInterval int `json:"rss_refresh_interval"`
|
||||||
SavePath string `json:"save_path"`
|
SavePath string `json:"save_path"`
|
||||||
SavePathChangedTmmEnabled bool `json:"save_path_changed_tmm_enabled"`
|
SavePathChangedTmmEnabled bool `json:"save_path_changed_tmm_enabled"`
|
||||||
SaveResumeDataInterval int64 `json:"save_resume_data_interval"`
|
SaveResumeDataInterval int `json:"save_resume_data_interval"`
|
||||||
ScanDirs ScanDirs `json:"scan_dirs"`
|
ScanDirs ScanDirs `json:"scan_dirs"`
|
||||||
ScheduleFromHour int64 `json:"schedule_from_hour"`
|
ScheduleFromHour int `json:"schedule_from_hour"`
|
||||||
ScheduleFromMin int64 `json:"schedule_from_min"`
|
ScheduleFromMin int `json:"schedule_from_min"`
|
||||||
ScheduleToHour int64 `json:"schedule_to_hour"`
|
ScheduleToHour int `json:"schedule_to_hour"`
|
||||||
ScheduleToMin int64 `json:"schedule_to_min"`
|
ScheduleToMin int `json:"schedule_to_min"`
|
||||||
SchedulerDays int64 `json:"scheduler_days"`
|
SchedulerDays int `json:"scheduler_days"`
|
||||||
SchedulerEnabled bool `json:"scheduler_enabled"`
|
SchedulerEnabled bool `json:"scheduler_enabled"`
|
||||||
SendBufferLowWatermark int64 `json:"send_buffer_low_watermark"`
|
SendBufferLowWatermark int `json:"send_buffer_low_watermark"`
|
||||||
SendBufferWatermark int64 `json:"send_buffer_watermark"`
|
SendBufferWatermark int `json:"send_buffer_watermark"`
|
||||||
SendBufferWatermarkFactor int64 `json:"send_buffer_watermark_factor"`
|
SendBufferWatermarkFactor int `json:"send_buffer_watermark_factor"`
|
||||||
SlowTorrentDlRateThreshold int64 `json:"slow_torrent_dl_rate_threshold"`
|
SlowTorrentDlRateThreshold int `json:"slow_torrent_dl_rate_threshold"`
|
||||||
SlowTorrentInactiveTimer int64 `json:"slow_torrent_inactive_timer"`
|
SlowTorrentInactiveTimer int `json:"slow_torrent_inactive_timer"`
|
||||||
SlowTorrentUlRateThreshold int64 `json:"slow_torrent_ul_rate_threshold"`
|
SlowTorrentUlRateThreshold int `json:"slow_torrent_ul_rate_threshold"`
|
||||||
SocketBacklogSize int64 `json:"socket_backlog_size"`
|
SocketBacklogSize int `json:"socket_backlog_size"`
|
||||||
StartPausedEnabled bool `json:"start_paused_enabled"`
|
StartPausedEnabled bool `json:"start_paused_enabled"`
|
||||||
StopTrackerTimeout int64 `json:"stop_tracker_timeout"`
|
StopTrackerTimeout int `json:"stop_tracker_timeout"`
|
||||||
TempPath string `json:"temp_path"`
|
TempPath string `json:"temp_path"`
|
||||||
TempPathEnabled bool `json:"temp_path_enabled"`
|
TempPathEnabled bool `json:"temp_path_enabled"`
|
||||||
TorrentChangedTmmEnabled bool `json:"torrent_changed_tmm_enabled"`
|
TorrentChangedTmmEnabled bool `json:"torrent_changed_tmm_enabled"`
|
||||||
UpLimit int64 `json:"up_limit"`
|
UpLimit int `json:"up_limit"`
|
||||||
UploadChokingAlgorithm int64 `json:"upload_choking_algorithm"`
|
UploadChokingAlgorithm int `json:"upload_choking_algorithm"`
|
||||||
UploadSlotsBehavior int64 `json:"upload_slots_behavior"`
|
UploadSlotsBehavior int `json:"upload_slots_behavior"`
|
||||||
Upnp bool `json:"upnp"`
|
Upnp bool `json:"upnp"`
|
||||||
UpnpLeaseDuration int64 `json:"upnp_lease_duration"`
|
UpnpLeaseDuration int `json:"upnp_lease_duration"`
|
||||||
UseHttps bool `json:"use_https"`
|
UseHttps bool `json:"use_https"`
|
||||||
UtpTcpMixedMode int64 `json:"utp_tcp_mixed_mode"`
|
UtpTcpMixedMode int `json:"utp_tcp_mixed_mode"`
|
||||||
WebUiAddress string `json:"web_ui_address"`
|
WebUiAddress string `json:"web_ui_address"`
|
||||||
WebUiBanDuration int64 `json:"web_ui_ban_duration"`
|
WebUiBanDuration int `json:"web_ui_ban_duration"`
|
||||||
WebUiClickjackingProtectionEnabled bool `json:"web_ui_clickjacking_protection_enabled"`
|
WebUiClickjackingProtectionEnabled bool `json:"web_ui_clickjacking_protection_enabled"`
|
||||||
WebUiCsrfProtectionEnabled bool `json:"web_ui_csrf_protection_enabled"`
|
WebUiCsrfProtectionEnabled bool `json:"web_ui_csrf_protection_enabled"`
|
||||||
WebUiDomainList string `json:"web_ui_domain_list"`
|
WebUiDomainList string `json:"web_ui_domain_list"`
|
||||||
WebUiHostHeaderValidationEnabled bool `json:"web_ui_host_header_validation_enabled"`
|
WebUiHostHeaderValidationEnabled bool `json:"web_ui_host_header_validation_enabled"`
|
||||||
WebUiHttpsCertPath string `json:"web_ui_https_cert_path"`
|
WebUiHttpsCertPath string `json:"web_ui_https_cert_path"`
|
||||||
WebUiHttpsKeyPath string `json:"web_ui_https_key_path"`
|
WebUiHttpsKeyPath string `json:"web_ui_https_key_path"`
|
||||||
WebUiMaxAuthFailCount int64 `json:"web_ui_max_auth_fail_count"`
|
WebUiMaxAuthFailCount int `json:"web_ui_max_auth_fail_count"`
|
||||||
WebUiPort int64 `json:"web_ui_port"`
|
WebUiPort int `json:"web_ui_port"`
|
||||||
WebUiSecureCookieEnabled bool `json:"web_ui_secure_cookie_enabled"`
|
WebUiSecureCookieEnabled bool `json:"web_ui_secure_cookie_enabled"`
|
||||||
WebUiSessionTimeout int64 `json:"web_ui_session_timeout"`
|
WebUiSessionTimeout int `json:"web_ui_session_timeout"`
|
||||||
WebUiUpnp bool `json:"web_ui_upnp"`
|
WebUiUpnp bool `json:"web_ui_upnp"`
|
||||||
WebUiUsername string `json:"web_ui_username"`
|
WebUiUsername string `json:"web_ui_username"`
|
||||||
WebUiPassword string `json:"web_ui_password"`
|
WebUiPassword string `json:"web_ui_password"`
|
||||||
@@ -174,49 +174,49 @@ type Torrent struct {
|
|||||||
TorrentPath string `json:"-"`
|
TorrentPath string `json:"-"`
|
||||||
|
|
||||||
AddedOn int64 `json:"added_on,omitempty"`
|
AddedOn int64 `json:"added_on,omitempty"`
|
||||||
AmountLeft int64 `json:"amount_left,omitempty"`
|
AmountLeft int64 `json:"amount_left"`
|
||||||
AutoTmm bool `json:"auto_tmm"`
|
AutoTmm bool `json:"auto_tmm"`
|
||||||
Availability float64 `json:"availability"`
|
Availability float64 `json:"availability,omitempty"`
|
||||||
Category string `json:"category,omitempty"`
|
Category string `json:"category,omitempty"`
|
||||||
Completed int64 `json:"completed,omitempty"`
|
Completed int64 `json:"completed"`
|
||||||
CompletionOn int64 `json:"completion_on,omitempty"`
|
CompletionOn int `json:"completion_on,omitempty"`
|
||||||
ContentPath string `json:"content_path,omitempty"`
|
ContentPath string `json:"content_path"`
|
||||||
DlLimit int64 `json:"dl_limit,omitempty"`
|
DlLimit int `json:"dl_limit"`
|
||||||
Dlspeed int64 `json:"dlspeed,omitempty"`
|
Dlspeed int `json:"dlspeed"`
|
||||||
Downloaded int64 `json:"downloaded,omitempty"`
|
Downloaded int64 `json:"downloaded"`
|
||||||
DownloadedSession int64 `json:"downloaded_session,omitempty"`
|
DownloadedSession int64 `json:"downloaded_session"`
|
||||||
Eta int64 `json:"eta,omitempty"`
|
Eta int `json:"eta"`
|
||||||
FlPiecePrio bool `json:"f_l_piece_prio"`
|
FlPiecePrio bool `json:"f_l_piece_prio,omitempty"`
|
||||||
ForceStart bool `json:"force_start"`
|
ForceStart bool `json:"force_start,omitempty"`
|
||||||
Hash string `json:"hash"`
|
Hash string `json:"hash"`
|
||||||
LastActivity int64 `json:"last_activity,omitempty"`
|
LastActivity int64 `json:"last_activity,omitempty"`
|
||||||
MagnetUri string `json:"magnet_uri,omitempty"`
|
MagnetUri string `json:"magnet_uri,omitempty"`
|
||||||
MaxRatio int64 `json:"max_ratio,omitempty"`
|
MaxRatio int `json:"max_ratio,omitempty"`
|
||||||
MaxSeedingTime int64 `json:"max_seeding_time,omitempty"`
|
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
NumComplete int64 `json:"num_complete,omitempty"`
|
NumComplete int `json:"num_complete,omitempty"`
|
||||||
NumIncomplete int64 `json:"num_incomplete,omitempty"`
|
NumIncomplete int `json:"num_incomplete,omitempty"`
|
||||||
NumLeechs int64 `json:"num_leechs,omitempty"`
|
NumLeechs int `json:"num_leechs,omitempty"`
|
||||||
NumSeeds int64 `json:"num_seeds,omitempty"`
|
NumSeeds int `json:"num_seeds,omitempty"`
|
||||||
Priority int64 `json:"priority,omitempty"`
|
Priority int `json:"priority,omitempty"`
|
||||||
Progress float32 `json:"progress"`
|
Progress float64 `json:"progress"`
|
||||||
Ratio int64 `json:"ratio,omitempty"`
|
Ratio int `json:"ratio,omitempty"`
|
||||||
RatioLimit int64 `json:"ratio_limit,omitempty"`
|
RatioLimit int `json:"ratio_limit,omitempty"`
|
||||||
SavePath string `json:"save_path,omitempty"`
|
SavePath string `json:"save_path"`
|
||||||
SeedingTimeLimit int64 `json:"seeding_time_limit,omitempty"`
|
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
|
||||||
SeenComplete int64 `json:"seen_complete,omitempty"`
|
SeenComplete int64 `json:"seen_complete,omitempty"`
|
||||||
SeqDl bool `json:"seq_dl"`
|
SeqDl bool `json:"seq_dl"`
|
||||||
Size int64 `json:"size,omitempty"`
|
Size int64 `json:"size,omitempty"`
|
||||||
State string `json:"state,omitempty"`
|
State string `json:"state,omitempty"`
|
||||||
SuperSeeding bool `json:"super_seeding"`
|
SuperSeeding bool `json:"super_seeding"`
|
||||||
Tags string `json:"tags,omitempty"`
|
Tags string `json:"tags,omitempty"`
|
||||||
TimeActive int64 `json:"time_active,omitempty"`
|
TimeActive int `json:"time_active,omitempty"`
|
||||||
TotalSize int64 `json:"total_size,omitempty"`
|
TotalSize int64 `json:"total_size,omitempty"`
|
||||||
Tracker string `json:"tracker,omitempty"`
|
Tracker string `json:"tracker,omitempty"`
|
||||||
UpLimit int64 `json:"up_limit,omitempty"`
|
UpLimit int64 `json:"up_limit,omitempty"`
|
||||||
Uploaded int64 `json:"uploaded,omitempty"`
|
Uploaded int64 `json:"uploaded,omitempty"`
|
||||||
UploadedSession int64 `json:"uploaded_session,omitempty"`
|
UploadedSession int64 `json:"uploaded_session,omitempty"`
|
||||||
Upspeed int64 `json:"upspeed,omitempty"`
|
Upspeed int `json:"upspeed,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) IsReady() bool {
|
func (t *Torrent) IsReady() bool {
|
||||||
@@ -229,24 +229,24 @@ type TorrentProperties struct {
|
|||||||
CompletionDate int64 `json:"completion_date,omitempty"`
|
CompletionDate int64 `json:"completion_date,omitempty"`
|
||||||
CreatedBy string `json:"created_by,omitempty"`
|
CreatedBy string `json:"created_by,omitempty"`
|
||||||
CreationDate int64 `json:"creation_date,omitempty"`
|
CreationDate int64 `json:"creation_date,omitempty"`
|
||||||
DlLimit int64 `json:"dl_limit,omitempty"`
|
DlLimit int `json:"dl_limit,omitempty"`
|
||||||
DlSpeed int64 `json:"dl_speed,omitempty"`
|
DlSpeed int `json:"dl_speed,omitempty"`
|
||||||
DlSpeedAvg int64 `json:"dl_speed_avg,omitempty"`
|
DlSpeedAvg int `json:"dl_speed_avg,omitempty"`
|
||||||
Eta int64 `json:"eta,omitempty"`
|
Eta int `json:"eta,omitempty"`
|
||||||
LastSeen int64 `json:"last_seen,omitempty"`
|
LastSeen int64 `json:"last_seen,omitempty"`
|
||||||
NbConnections int64 `json:"nb_connections,omitempty"`
|
NbConnections int `json:"nb_connections,omitempty"`
|
||||||
NbConnectionsLimit int64 `json:"nb_connections_limit,omitempty"`
|
NbConnectionsLimit int `json:"nb_connections_limit,omitempty"`
|
||||||
Peers int64 `json:"peers,omitempty"`
|
Peers int `json:"peers,omitempty"`
|
||||||
PeersTotal int64 `json:"peers_total,omitempty"`
|
PeersTotal int `json:"peers_total,omitempty"`
|
||||||
PieceSize int64 `json:"piece_size,omitempty"`
|
PieceSize int64 `json:"piece_size,omitempty"`
|
||||||
PiecesHave int64 `json:"pieces_have,omitempty"`
|
PiecesHave int64 `json:"pieces_have,omitempty"`
|
||||||
PiecesNum int64 `json:"pieces_num,omitempty"`
|
PiecesNum int64 `json:"pieces_num,omitempty"`
|
||||||
Reannounce int64 `json:"reannounce,omitempty"`
|
Reannounce int `json:"reannounce,omitempty"`
|
||||||
SavePath string `json:"save_path,omitempty"`
|
SavePath string `json:"save_path,omitempty"`
|
||||||
SeedingTime int64 `json:"seeding_time,omitempty"`
|
SeedingTime int `json:"seeding_time,omitempty"`
|
||||||
Seeds int64 `json:"seeds,omitempty"`
|
Seeds int `json:"seeds,omitempty"`
|
||||||
SeedsTotal int64 `json:"seeds_total,omitempty"`
|
SeedsTotal int `json:"seeds_total,omitempty"`
|
||||||
ShareRatio int64 `json:"share_ratio,omitempty"`
|
ShareRatio int `json:"share_ratio,omitempty"`
|
||||||
TimeElapsed int64 `json:"time_elapsed,omitempty"`
|
TimeElapsed int64 `json:"time_elapsed,omitempty"`
|
||||||
TotalDownloaded int64 `json:"total_downloaded,omitempty"`
|
TotalDownloaded int64 `json:"total_downloaded,omitempty"`
|
||||||
TotalDownloadedSession int64 `json:"total_downloaded_session,omitempty"`
|
TotalDownloadedSession int64 `json:"total_downloaded_session,omitempty"`
|
||||||
@@ -254,9 +254,20 @@ type TorrentProperties struct {
|
|||||||
TotalUploaded int64 `json:"total_uploaded,omitempty"`
|
TotalUploaded int64 `json:"total_uploaded,omitempty"`
|
||||||
TotalUploadedSession int64 `json:"total_uploaded_session,omitempty"`
|
TotalUploadedSession int64 `json:"total_uploaded_session,omitempty"`
|
||||||
TotalWasted int64 `json:"total_wasted,omitempty"`
|
TotalWasted int64 `json:"total_wasted,omitempty"`
|
||||||
UpLimit int64 `json:"up_limit,omitempty"`
|
UpLimit int `json:"up_limit,omitempty"`
|
||||||
UpSpeed int64 `json:"up_speed,omitempty"`
|
UpSpeed int `json:"up_speed,omitempty"`
|
||||||
UpSpeedAvg int64 `json:"up_speed_avg,omitempty"`
|
UpSpeedAvg int `json:"up_speed_avg,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TorrentFile struct {
|
||||||
|
Index int `json:"index,omitempty"`
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
|
Progress int `json:"progress,omitempty"`
|
||||||
|
Priority int `json:"priority,omitempty"`
|
||||||
|
IsSeed bool `json:"is_seed,omitempty"`
|
||||||
|
PieceRange []int `json:"piece_range,omitempty"`
|
||||||
|
Availability float64 `json:"availability,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAppPreferences() *AppPreferences {
|
func NewAppPreferences() *AppPreferences {
|
||||||
274
pkg/qbit/shared/torrent.go
Normal file
274
pkg/qbit/shared/torrent.go
Normal file
@@ -0,0 +1,274 @@
|
|||||||
|
package shared
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// All torrent related helpers goes here
|
||||||
|
|
||||||
|
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
|
||||||
|
magnet, err := common.GetMagnetFromUrl(url)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing magnet link: %w", err)
|
||||||
|
}
|
||||||
|
err = q.Process(ctx, magnet, category)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to process torrent: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
|
||||||
|
file, _ := fileHeader.Open()
|
||||||
|
defer file.Close()
|
||||||
|
var reader io.Reader = file
|
||||||
|
magnet, err := common.GetMagnetFromFile(reader, fileHeader.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
|
||||||
|
}
|
||||||
|
err = q.Process(ctx, magnet, category)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to process torrent: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category string) error {
|
||||||
|
torrent := q.CreateTorrentFromMagnet(magnet, category)
|
||||||
|
a, ok := ctx.Value("arr").(*arr.Arr)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("arr not found in context")
|
||||||
|
}
|
||||||
|
isSymlink := ctx.Value("isSymlink").(bool)
|
||||||
|
debridTorrent, err := debrid.ProcessTorrent(q.Debrid, magnet, a, isSymlink)
|
||||||
|
if err != nil || debridTorrent == nil {
|
||||||
|
if debridTorrent != nil {
|
||||||
|
go debridTorrent.Delete()
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = fmt.Errorf("failed to process torrent")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
||||||
|
q.Storage.AddOrUpdate(torrent)
|
||||||
|
go q.ProcessFiles(torrent, debridTorrent, a, isSymlink) // We can send async for file processing not to delay the response
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) CreateTorrentFromMagnet(magnet *common.Magnet, category string) *Torrent {
|
||||||
|
torrent := &Torrent{
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Hash: strings.ToLower(magnet.InfoHash),
|
||||||
|
Name: magnet.Name,
|
||||||
|
Size: magnet.Size,
|
||||||
|
Category: category,
|
||||||
|
State: "downloading",
|
||||||
|
MagnetUri: magnet.Link,
|
||||||
|
|
||||||
|
Tracker: "udp://tracker.opentrackr.org:1337",
|
||||||
|
UpLimit: -1,
|
||||||
|
DlLimit: -1,
|
||||||
|
AutoTmm: false,
|
||||||
|
Ratio: 1,
|
||||||
|
RatioLimit: 1,
|
||||||
|
}
|
||||||
|
return torrent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *arr.Arr, isSymlink bool) {
|
||||||
|
for debridTorrent.Status != "downloaded" {
|
||||||
|
progress := debridTorrent.Progress
|
||||||
|
q.logger.Printf("%s Download Progress: %.2f%%", debridTorrent.Debrid.GetName(), progress)
|
||||||
|
time.Sleep(4 * time.Second)
|
||||||
|
dbT, err := debridTorrent.Debrid.CheckStatus(debridTorrent, isSymlink)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Error checking status: %v", err)
|
||||||
|
go debridTorrent.Delete()
|
||||||
|
q.MarkAsFailed(torrent)
|
||||||
|
_ = arr.Refresh()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
debridTorrent = dbT
|
||||||
|
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
torrentPath string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
debridTorrent.Arr = arr
|
||||||
|
if isSymlink {
|
||||||
|
torrentPath, err = q.ProcessSymlink(debridTorrent)
|
||||||
|
} else {
|
||||||
|
torrentPath, err = q.processManualFiles(debridTorrent)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
q.MarkAsFailed(torrent)
|
||||||
|
go debridTorrent.Delete()
|
||||||
|
q.logger.Printf("Error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
torrent.TorrentPath = filepath.Base(torrentPath)
|
||||||
|
q.UpdateTorrent(torrent, debridTorrent)
|
||||||
|
_ = arr.Refresh()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
|
||||||
|
t.State = "error"
|
||||||
|
q.Storage.AddOrUpdate(t)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
||||||
|
if debridTorrent == nil {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
|
||||||
|
if err != nil {
|
||||||
|
addedOn = time.Now()
|
||||||
|
}
|
||||||
|
totalSize := debridTorrent.Bytes
|
||||||
|
progress := cmp.Or(debridTorrent.Progress, 100)
|
||||||
|
progress = progress / 100.0
|
||||||
|
sizeCompleted := int64(float64(totalSize) * progress)
|
||||||
|
|
||||||
|
var speed int
|
||||||
|
if debridTorrent.Speed != 0 {
|
||||||
|
speed = debridTorrent.Speed
|
||||||
|
}
|
||||||
|
var eta int
|
||||||
|
if speed != 0 {
|
||||||
|
eta = int(totalSize-sizeCompleted) / speed
|
||||||
|
}
|
||||||
|
t.ID = debridTorrent.Id
|
||||||
|
t.Name = debridTorrent.Name
|
||||||
|
t.AddedOn = addedOn.Unix()
|
||||||
|
t.DebridTorrent = debridTorrent
|
||||||
|
t.Size = totalSize
|
||||||
|
t.Completed = sizeCompleted
|
||||||
|
t.Downloaded = sizeCompleted
|
||||||
|
t.DownloadedSession = sizeCompleted
|
||||||
|
t.Uploaded = sizeCompleted
|
||||||
|
t.UploadedSession = sizeCompleted
|
||||||
|
t.AmountLeft = totalSize - sizeCompleted
|
||||||
|
t.Progress = progress
|
||||||
|
t.Eta = eta
|
||||||
|
t.Dlspeed = speed
|
||||||
|
t.Upspeed = speed
|
||||||
|
t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
|
||||||
|
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
||||||
|
db := debridTorrent.Debrid
|
||||||
|
rcLoneMount := db.GetMountPath()
|
||||||
|
if debridTorrent == nil && t.ID != "" {
|
||||||
|
debridTorrent, _ = db.GetTorrent(t.ID)
|
||||||
|
}
|
||||||
|
if debridTorrent == nil {
|
||||||
|
q.logger.Printf("Torrent with ID %s not found in %s", t.ID, db.GetName())
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
if debridTorrent.Status != "downloaded" {
|
||||||
|
debridTorrent, _ = db.GetTorrent(t.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.TorrentPath == "" {
|
||||||
|
t.TorrentPath = filepath.Base(debridTorrent.GetMountFolder(rcLoneMount))
|
||||||
|
}
|
||||||
|
savePath := filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
|
||||||
|
torrentPath := filepath.Join(savePath, t.TorrentPath) + string(os.PathSeparator)
|
||||||
|
t = q.UpdateTorrentMin(t, debridTorrent)
|
||||||
|
t.ContentPath = torrentPath
|
||||||
|
|
||||||
|
if t.IsReady() {
|
||||||
|
t.State = "pausedUP"
|
||||||
|
q.Storage.Update(t)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(2 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
if t.IsReady() {
|
||||||
|
t.State = "pausedUP"
|
||||||
|
q.Storage.Update(t)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
updatedT := q.UpdateTorrent(t, debridTorrent)
|
||||||
|
t = updatedT
|
||||||
|
|
||||||
|
case <-time.After(10 * time.Minute): // Add a timeout
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) ResumeTorrent(t *Torrent) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) PauseTorrent(t *Torrent) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) RefreshTorrent(t *Torrent) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
|
||||||
|
return &TorrentProperties{
|
||||||
|
AdditionDate: t.AddedOn,
|
||||||
|
Comment: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
||||||
|
CreatedBy: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
||||||
|
CreationDate: t.AddedOn,
|
||||||
|
DlLimit: -1,
|
||||||
|
UpLimit: -1,
|
||||||
|
DlSpeed: t.Dlspeed,
|
||||||
|
UpSpeed: t.Upspeed,
|
||||||
|
TotalSize: t.Size,
|
||||||
|
TotalUploaded: t.Uploaded,
|
||||||
|
TotalDownloaded: t.Downloaded,
|
||||||
|
TotalUploadedSession: t.UploadedSession,
|
||||||
|
TotalDownloadedSession: t.DownloadedSession,
|
||||||
|
LastSeen: time.Now().Unix(),
|
||||||
|
NbConnectionsLimit: 100,
|
||||||
|
Peers: 0,
|
||||||
|
PeersTotal: 2,
|
||||||
|
SeedingTime: 1,
|
||||||
|
Seeds: 100,
|
||||||
|
ShareRatio: 100,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
|
||||||
|
files := make([]*TorrentFile, 0)
|
||||||
|
if t.DebridTorrent == nil {
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
for index, file := range t.DebridTorrent.Files {
|
||||||
|
files = append(files, &TorrentFile{
|
||||||
|
Index: index,
|
||||||
|
Name: file.Path,
|
||||||
|
Size: file.Size,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return files
|
||||||
|
}
|
||||||
@@ -1,29 +1,13 @@
|
|||||||
package qbit
|
package shared
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
"goBlack/pkg/debrid"
|
"goBlack/pkg/debrid"
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
//func generateSID() (string, error) {
|
|
||||||
// bytes := make([]byte, sidLength)
|
|
||||||
// if _, err := rand.Read(bytes); err != nil {
|
|
||||||
// return "", err
|
|
||||||
// }
|
|
||||||
// return hex.EncodeToString(bytes), nil
|
|
||||||
//}
|
|
||||||
|
|
||||||
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
w.WriteHeader(code)
|
|
||||||
json.NewEncoder(w).Encode(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.TorrentFile, ready chan<- debrid.TorrentFile) {
|
func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.TorrentFile, ready chan<- debrid.TorrentFile) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
ticker := time.NewTicker(1 * time.Second) // Check every second
|
ticker := time.NewTicker(1 * time.Second) // Check every second
|
||||||
@@ -1,8 +1,7 @@
|
|||||||
package qbit
|
package shared
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"goBlack/pkg/debrid"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -20,7 +19,7 @@ func (q *QBit) StartRefreshWorker(ctx context.Context) {
|
|||||||
q.logger.Println("Qbit Refresh Worker stopped")
|
q.logger.Println("Qbit Refresh Worker stopped")
|
||||||
return
|
return
|
||||||
case <-refreshTicker.C:
|
case <-refreshTicker.C:
|
||||||
torrents := q.storage.GetAll("", "", nil)
|
torrents := q.Storage.GetAll("", "", nil)
|
||||||
if len(torrents) > 0 {
|
if len(torrents) > 0 {
|
||||||
q.RefreshArrs()
|
q.RefreshArrs()
|
||||||
}
|
}
|
||||||
@@ -29,18 +28,10 @@ func (q *QBit) StartRefreshWorker(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) RefreshArrs() {
|
func (q *QBit) RefreshArrs() {
|
||||||
q.arrs.Range(func(key, value interface{}) bool {
|
for _, arr := range q.Arrs.GetAll() {
|
||||||
host, ok := key.(string)
|
err := arr.Refresh()
|
||||||
token, ok2 := value.(string)
|
if err != nil {
|
||||||
if !ok || !ok2 {
|
return
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
arr := &debrid.Arr{
|
}
|
||||||
Name: "",
|
|
||||||
Token: token,
|
|
||||||
Host: host,
|
|
||||||
}
|
|
||||||
q.RefreshArr(arr)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
@@ -1,125 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cmp"
|
|
||||||
"goBlack/pkg/debrid"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// All torrent related helpers goes here
|
|
||||||
|
|
||||||
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
|
|
||||||
t.State = "error"
|
|
||||||
q.storage.AddOrUpdate(t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
|
||||||
rcLoneMount := q.debrid.GetMountPath()
|
|
||||||
if debridTorrent == nil && t.ID != "" {
|
|
||||||
debridTorrent, _ = q.debrid.GetTorrent(t.ID)
|
|
||||||
}
|
|
||||||
if debridTorrent == nil {
|
|
||||||
q.logger.Printf("Torrent with ID %s not found in %s", t.ID, q.debrid.GetName())
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
if debridTorrent.Status != "downloaded" {
|
|
||||||
debridTorrent, _ = q.debrid.GetTorrent(t.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.TorrentPath == "" {
|
|
||||||
t.TorrentPath = filepath.Base(debridTorrent.GetMountFolder(rcLoneMount))
|
|
||||||
}
|
|
||||||
|
|
||||||
totalSize := float64(cmp.Or(debridTorrent.Bytes, 1.0))
|
|
||||||
progress := cmp.Or(debridTorrent.Progress, 100.0)
|
|
||||||
progress = progress / 100.0
|
|
||||||
var sizeCompleted int64
|
|
||||||
|
|
||||||
sizeCompleted = int64(totalSize * progress)
|
|
||||||
savePath := filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
|
|
||||||
torrentPath := filepath.Join(savePath, t.TorrentPath) + string(os.PathSeparator)
|
|
||||||
|
|
||||||
var speed int64
|
|
||||||
if debridTorrent.Speed != 0 {
|
|
||||||
speed = debridTorrent.Speed
|
|
||||||
}
|
|
||||||
var eta int64
|
|
||||||
if speed != 0 {
|
|
||||||
eta = int64((totalSize - float64(sizeCompleted)) / float64(speed))
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Size = debridTorrent.Bytes
|
|
||||||
t.DebridTorrent = debridTorrent
|
|
||||||
t.Completed = sizeCompleted
|
|
||||||
t.Downloaded = sizeCompleted
|
|
||||||
t.DownloadedSession = sizeCompleted
|
|
||||||
t.Uploaded = sizeCompleted
|
|
||||||
t.UploadedSession = sizeCompleted
|
|
||||||
t.AmountLeft = int64(totalSize) - sizeCompleted
|
|
||||||
t.Progress = float32(progress)
|
|
||||||
t.SavePath = savePath
|
|
||||||
t.ContentPath = torrentPath
|
|
||||||
t.Eta = eta
|
|
||||||
t.Dlspeed = speed
|
|
||||||
t.Upspeed = speed
|
|
||||||
|
|
||||||
if t.IsReady() {
|
|
||||||
t.State = "pausedUP"
|
|
||||||
q.storage.AddOrUpdate(t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
ticker := time.NewTicker(3 * time.Second)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
if t.IsReady() {
|
|
||||||
t.State = "pausedUP"
|
|
||||||
q.storage.AddOrUpdate(t)
|
|
||||||
ticker.Stop()
|
|
||||||
return t
|
|
||||||
} else {
|
|
||||||
return q.UpdateTorrent(t, debridTorrent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) ResumeTorrent(t *Torrent) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) PauseTorrent(t *Torrent) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) RefreshTorrent(t *Torrent) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
|
|
||||||
return &TorrentProperties{
|
|
||||||
AdditionDate: t.AddedOn,
|
|
||||||
Comment: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
|
||||||
CreatedBy: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
|
||||||
CreationDate: t.AddedOn,
|
|
||||||
DlLimit: -1,
|
|
||||||
UpLimit: -1,
|
|
||||||
DlSpeed: t.Dlspeed,
|
|
||||||
UpSpeed: t.Upspeed,
|
|
||||||
TotalSize: t.Size,
|
|
||||||
TotalUploaded: t.Uploaded,
|
|
||||||
TotalDownloaded: t.Downloaded,
|
|
||||||
TotalUploadedSession: t.UploadedSession,
|
|
||||||
TotalDownloadedSession: t.DownloadedSession,
|
|
||||||
LastSeen: time.Now().Unix(),
|
|
||||||
NbConnectionsLimit: 100,
|
|
||||||
Peers: 0,
|
|
||||||
PeersTotal: 2,
|
|
||||||
SeedingTime: 1,
|
|
||||||
Seeds: 100,
|
|
||||||
ShareRatio: 100,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user