18 Commits

Author SHA1 Message Date
Mukhtar Akere
b77dbcc4f4 Fix magnet conversion
Some checks failed
Release / goreleaser (push) Failing after 2m4s
2024-12-19 00:12:29 +01:00
Mukhtar Akere
58c0aafab1 Fix docker.yml 2024-12-18 17:24:40 +01:00
Mukhtar Akere
357da54083 Fix docker.yml 2024-12-18 17:20:59 +01:00
Mukhtar Akere
88a7196eaf Hotfix 2024-12-18 17:01:26 +01:00
Mukhtar Akere
abc86a0460 Changelog 0.3.1 2024-12-18 16:51:00 +01:00
robertRogerPresident
dd0b7efdff fix toboxInfo struct type (#6)
Co-authored-by: Tangui <tanguidaoudal@yahoo.fr>
2024-12-16 11:56:09 -08:00
Mukhtar Akere
7359f280b0 Make sure torrents get deleted on failed 2024-12-12 17:38:53 +01:00
Mukhtar Akere
4eb3539347 Fix docker.yml 2024-11-30 16:06:28 +01:00
Mukhtar Akere
9fb1118475 Fix docker.yml 2024-11-30 16:03:33 +01:00
Mukhtar Akere
07491b43fe Fix docker.yml 2024-11-30 16:01:07 +01:00
Mukhtar Akere
8f7c9a19c5 Fix goreleaser
Some checks failed
Release / goreleaser (push) Failing after 2m44s
2024-11-30 15:50:42 +01:00
Mukhtar Akere
a51364d150 Changelog 0.3.0 2024-11-30 15:46:58 +01:00
Mukhtar Akere
df2aa4e361 0.2.7:
- Add support for multiple debrid providers
- Add Torbox support
- Add support for configurable debrid cache checks
- Add support for configurable debrid download uncached torrents
2024-11-25 16:48:23 +01:00
Mukhtar Akere
b51cb954f8 Merge branch 'beta' 2024-11-25 16:39:47 +01:00
Mukhtar Akere
8bdb2e3547 Hotfix & Updated Readme 2024-11-23 23:41:49 +01:00
Mukhtar Akere
2c9a076cd2 Hotfix 2024-11-23 21:10:42 +01:00
Mukhtar Akere
d2a77620bc Features:
- Add Torbox(Tested)
- Fix RD cache check
- Minor fixes
2024-11-23 19:52:15 +01:00
Mukhtar Akere
4b8f1ccfb6 Changelog 0.2.6 2024-10-08 15:43:38 +01:00
58 changed files with 3047 additions and 906 deletions

52
.air.toml Normal file
View File

@@ -0,0 +1,52 @@
root = "."
testdata_dir = "testdata"
tmp_dir = "tmp"
[build]
args_bin = []
bin = "./tmp/main"
cmd = "go build -o ./tmp/main ."
delay = 1000
exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"]
exclude_file = []
exclude_regex = ["_test.go"]
exclude_unchanged = false
follow_symlink = false
full_bin = ""
include_dir = []
include_ext = ["go", "tpl", "tmpl", "html", ".json"]
include_file = []
kill_delay = "0s"
log = "build-errors.log"
poll = false
poll_interval = 0
post_cmd = []
pre_cmd = []
rerun = false
rerun_delay = 500
send_interrupt = false
stop_on_error = false
[color]
app = ""
build = "yellow"
main = "magenta"
runner = "green"
watcher = "cyan"
[log]
main_only = false
silent = false
time = false
[misc]
clean_on_exit = false
[proxy]
app_port = 0
enabled = false
proxy_port = 0
[screen]
clear_on_rebuild = false
keep_scroll = true

View File

@@ -5,4 +5,5 @@ docker-compose.yml
.DS_Store
**/.idea/
*.magnet
**.torrent
**.torrent

54
.github/workflows/docker.yml vendored Normal file
View File

@@ -0,0 +1,54 @@
name: Docker Build and Push
on:
push:
branches:
- main
- beta
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get version
id: get_version
run: |
LATEST_TAG=$(git tag | sort -V | tail -n1)
echo "latest_tag=${LATEST_TAG}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push for beta branch
if: github.ref == 'refs/heads/beta'
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64,linux/arm/v7
push: true
tags: cy01/blackhole:beta
- name: Build and push for main branch
if: github.ref == 'refs/heads/main'
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64,linux/arm/v7
push: true
tags: |
cy01/blackhole:latest
cy01/blackhole:${{ env.latest_tag }}

32
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,32 @@
name: Release
on:
push:
tags:
- '*'
permissions:
contents: write
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.22'
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v5
with:
distribution: goreleaser
version: latest
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

1
.gitignore vendored
View File

@@ -9,3 +9,4 @@ docker-compose.yml
*.log
*.log.*
dist/
tmp/**

View File

@@ -1,4 +1,4 @@
version: 2
version: 1
before:
hooks:

View File

@@ -72,4 +72,28 @@
#### 0.2.5
- Fix ContentPath not being set prior
- Rewrote Readme
- Cleaned up the code
- Cleaned up the code
#### 0.2.6
- Delete torrent for empty matched files
- Update Readme
#### 0.2.7
- Add support for multiple debrid providers
- Add Torbox support
- Add support for configurable debrid cache checks
- Add support for configurable debrid download uncached torrents
#### 0.3.0
- Add UI for adding torrents
- Refraction of the code
- -Fix Torbox bug
- Update CI/CD
- Update Readme
#### 0.3.1
- Add DebridLink Support
- Refactor error handling

View File

@@ -20,6 +20,7 @@ RUN CGO_ENABLED=0 GOOS=$(echo $TARGETPLATFORM | cut -d '/' -f1) GOARCH=$(echo $T
FROM scratch
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=builder /blackhole /blackhole
COPY --from=builder /app/README.md /README.md
EXPOSE 8181

View File

@@ -1,13 +1,25 @@
### GoBlackHole(with Debrid Proxy Support)
This is a Golang implementation go Torrent QbitTorrent with a **Real Debrid Proxy Support**.
This is a Golang implementation go Torrent QbitTorrent with a **Real Debrid & Torbox Support**.
### Features
#### Uses
- Mock Qbittorent API that supports the Arrs(Sonarr, Radarr, etc)
- Proxy support for the Arrs
- Real Debrid Support
- Torbox Support
- Debrid Link Support
- Multi-Debrid Providers support
- UI for adding torrents directly to *arrs
The proxy is useful in filtering out un-cached Real Debrid torrents
### Supported Debrid Providers
- Real Debrid
- Torbox
- Debrid Link
### Changelog
- View the [CHANGELOG.md](CHANGELOG.md) for the latest changes
@@ -37,6 +49,8 @@ services:
- QBIT_PORT=8282 # qBittorrent Port. This is optional. You can set this in the config file
- PORT=8181 # Proxy Port. This is optional. You can set this in the config file
restart: unless-stopped
depends_on:
- rclone # If you are using rclone with docker
```
@@ -50,16 +64,38 @@ Download the binary from the releases page and run it with the config file.
#### Config
```json
{
"debrid": {
"name": "realdebrid",
"host": "https://api.real-debrid.com/rest/1.0",
"api_key": "realdebrid_api_key",
"folder": "data/realdebrid/torrents/",
"rate_limit": "250/minute"
},
"debrids": [
{
"name": "torbox",
"host": "https://api.torbox.app/v1",
"api_key": "torbox_api_key",
"folder": "data/torbox/torrents/",
"rate_limit": "250/minute",
"download_uncached": false,
"check_cached": true
},
{
"name": "realdebrid",
"host": "https://api.real-debrid.com/rest/1.0",
"api_key": "realdebrid_key",
"folder": "data/realdebrid/torrents/",
"rate_limit": "250/minute",
"download_uncached": false,
"check_cached": false
},
{
"name": "debridlink",
"host": "https://debrid-link.com/api/v2",
"api_key": "debridlink_key",
"folder": "data/debridlink/torrents/",
"rate_limit": "250/minute",
"download_uncached": false,
"check_cached": false
}
],
"proxy": {
"enabled": true,
"port": "8181",
"port": "8100",
"debug": false,
"username": "username",
"password": "password",
@@ -82,7 +118,14 @@ Download the binary from the releases page and run it with the config file.
- The cache is stored in memory and is not persisted on restart
##### Debrid Config
- This config key is important as it's used for both Blackhole and Proxy
- The `debrids` key is an array of debrid providers
- The `name` key is the name of the debrid provider
- The `host` key is the API endpoint of the debrid provider
- The `api_key` key is the API key of the debrid provider
- The `folder` key is the folder where the torrents will be downloaded. e.g `data/realdebrid/torrents/`
- The `rate_limit` key is the rate limit of the debrid provider(null by default)
- The `download_uncached` bool key is used to download uncached torrents(disabled by default)
- The `check_cached` bool key is used to check if the torrent is cached(disabled by default)
##### Proxy Config
- The `enabled` key is used to enable the proxy
@@ -132,14 +175,17 @@ Setting Up Qbittorrent in Arr
- Test
- Save
### UI for adding torrents
![UI](./doc/ui.png)
The UI is a simple web interface that allows you to add torrents directly to the Arrs(Sonarr, Radarr, etc)
### TODO
- [ ] A proper name!!!!
- [ ] Debrid
- [ ] Add more Debrid Providers
- [ ] Proxy
- [ ] Add more Proxy features
- [ ] Qbittorrent
- [ ] Add more Qbittorrent features
- [ ] Persist torrents on restart/server crash

View File

@@ -2,6 +2,7 @@ package cmd
import (
"cmp"
"context"
"goBlack/common"
"goBlack/pkg/debrid"
"goBlack/pkg/proxy"
@@ -9,32 +10,44 @@ import (
"sync"
)
func Start(config *common.Config) {
func Start(ctx context.Context, config *common.Config) error {
maxCacheSize := cmp.Or(config.MaxCacheSize, 1000)
cache := common.NewCache(maxCacheSize)
deb := debrid.NewDebrid(config.Debrid, cache)
deb := debrid.NewDebrid(config.Debrids, cache)
var wg sync.WaitGroup
errChan := make(chan error, 2)
if config.Proxy.Enabled {
p := proxy.NewProxy(*config, deb, cache)
wg.Add(1)
go func() {
defer wg.Done()
p.Start()
if err := proxy.NewProxy(*config, deb, cache).Start(ctx); err != nil {
errChan <- err
}
}()
}
if config.QBitTorrent.Port != "" {
qb := qbit.NewQBit(config, deb, cache)
wg.Add(1)
go func() {
defer wg.Done()
qb.Start()
if err := qbit.Start(ctx, config, deb, cache); err != nil {
errChan <- err
}
}()
}
// Wait indefinitely
wg.Wait()
go func() {
wg.Wait()
close(errChan)
}()
// Wait for context cancellation or completion or error
select {
case err := <-errChan:
return err
case <-ctx.Done():
return ctx.Err()
}
}

View File

@@ -2,8 +2,11 @@ package common
import (
"encoding/json"
"errors"
"fmt"
"log"
"os"
"sync"
)
type DebridConfig struct {
@@ -12,6 +15,7 @@ type DebridConfig struct {
APIKey string `json:"api_key"`
Folder string `json:"folder"`
DownloadUncached bool `json:"download_uncached"`
CheckCached bool `json:"check_cached"`
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
}
@@ -36,11 +40,88 @@ type QBitTorrentConfig struct {
type Config struct {
Debrid DebridConfig `json:"debrid"`
Debrids []DebridConfig `json:"debrids"`
Proxy ProxyConfig `json:"proxy"`
MaxCacheSize int `json:"max_cache_size"`
QBitTorrent QBitTorrentConfig `json:"qbittorrent"`
}
func validateDebrids(debrids []DebridConfig) error {
if len(debrids) == 0 {
return errors.New("no debrids configured")
}
errChan := make(chan error, len(debrids))
var wg sync.WaitGroup
for _, debrid := range debrids {
// Basic field validation
if debrid.Host == "" {
return errors.New("debrid host is required")
}
if debrid.APIKey == "" {
return errors.New("debrid api key is required")
}
if debrid.Folder == "" {
return errors.New("debrid folder is required")
}
// Check folder existence concurrently
wg.Add(1)
go func(folder string) {
defer wg.Done()
if _, err := os.Stat(folder); os.IsNotExist(err) {
errChan <- fmt.Errorf("debrid folder does not exist: %s", folder)
}
}(debrid.Folder)
}
// Wait for all checks to complete
go func() {
wg.Wait()
close(errChan)
}()
// Return first error if any
if err := <-errChan; err != nil {
return err
}
return nil
}
func validateQbitTorrent(config *QBitTorrentConfig) error {
if config.DownloadFolder == "" {
return errors.New("qbittorent download folder is required")
}
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
return errors.New("qbittorent download folder does not exist")
}
return nil
}
func validateConfig(config *Config) error {
// Run validations concurrently
errChan := make(chan error, 2)
go func() {
errChan <- validateDebrids(config.Debrids)
}()
go func() {
errChan <- validateQbitTorrent(&config.QBitTorrent)
}()
// Check for errors
for i := 0; i < 2; i++ {
if err := <-errChan; err != nil {
return err
}
}
return nil
}
func LoadConfig(path string) (*Config, error) {
// Load the config file
file, err := os.Open(path)
@@ -60,10 +141,15 @@ func LoadConfig(path string) (*Config, error) {
if err != nil {
return nil, err
}
if config.Proxy.CachedOnly == nil {
config.Proxy.CachedOnly = new(bool)
*config.Proxy.CachedOnly = true
if config.Debrid.Name != "" {
config.Debrids = append(config.Debrids, config.Debrid)
}
// Validate the config
//if err := validateConfig(config); err != nil {
// return nil, err
//}
return config, nil
}

14
common/logger.go Normal file
View File

@@ -0,0 +1,14 @@
package common
import (
"fmt"
"log"
"os"
)
func NewLogger(prefix string, output *os.File) *log.Logger {
f := fmt.Sprintf("[%s] ", prefix)
return log.New(output, f, log.LstdFlags)
}
var Logger = NewLogger("Main", os.Stdout)

View File

@@ -2,6 +2,7 @@ package common
import (
"crypto/tls"
"encoding/json"
"fmt"
"golang.org/x/time/rate"
"io"
@@ -60,11 +61,7 @@ func (c *RLHTTPClient) Do(req *http.Request) (*http.Response, error) {
return resp, fmt.Errorf("max retries exceeded")
}
func (c *RLHTTPClient) MakeRequest(method string, url string, body io.Reader) ([]byte, error) {
req, err := http.NewRequest(method, url, body)
if err != nil {
return nil, err
}
func (c *RLHTTPClient) MakeRequest(req *http.Request) ([]byte, error) {
if c.Headers != nil {
for key, value := range c.Headers {
req.Header.Set(key, value)
@@ -75,9 +72,12 @@ func (c *RLHTTPClient) MakeRequest(method string, url string, body io.Reader) ([
if err != nil {
return nil, err
}
b, _ := io.ReadAll(res.Body)
statusOk := strconv.Itoa(res.StatusCode)[0] == '2'
if !statusOk {
return nil, fmt.Errorf("unexpected status code: %d", res.StatusCode)
// Add status code error to the body
b = append(b, []byte(fmt.Sprintf("\nstatus code: %d", res.StatusCode))...)
return nil, fmt.Errorf(string(b))
}
defer func(Body io.ReadCloser) {
err := Body.Close()
@@ -85,7 +85,7 @@ func (c *RLHTTPClient) MakeRequest(method string, url string, body io.Reader) ([
log.Println(err)
}
}(res.Body)
return io.ReadAll(res.Body)
return b, nil
}
func NewRLHTTPClient(rl *rate.Limiter, headers map[string]string) *RLHTTPClient {
@@ -128,3 +128,9 @@ func ParseRateLimit(rateStr string) *rate.Limiter {
return nil
}
}
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
json.NewEncoder(w).Encode(data)
}

View File

@@ -209,11 +209,6 @@ func processInfoHash(input string) (string, error) {
return "", fmt.Errorf("invalid infohash: %s", input)
}
func NewLogger(prefix string, output *os.File) *log.Logger {
f := fmt.Sprintf("[%s] ", prefix)
return log.New(output, f, log.LstdFlags)
}
func GetInfohashFromURL(url string) (string, error) {
// Download the torrent file
var magnetLink string

BIN
doc/ui.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

View File

@@ -1,6 +1,7 @@
package main
import (
"context"
"flag"
"goBlack/cmd"
"goBlack/common"
@@ -17,6 +18,9 @@ func main() {
if err != nil {
log.Fatal(err)
}
cmd.Start(conf)
ctx := context.Background()
if err := cmd.Start(ctx, conf); err != nil {
log.Fatal(err)
}
}

120
pkg/arr/arr.go Normal file
View File

@@ -0,0 +1,120 @@
package arr
import (
"bytes"
"encoding/json"
"goBlack/common"
"log"
"net/http"
"os"
"strings"
"sync"
)
// Type is a type of arr
type Type string
const (
Sonarr Type = "sonarr"
Radarr Type = "radarr"
Lidarr Type = "lidarr"
Readarr Type = "readarr"
)
var (
client *common.RLHTTPClient = common.NewRLHTTPClient(nil, nil)
logger *log.Logger = common.NewLogger("QBit", os.Stdout)
)
type Arr struct {
Name string `json:"name"`
Host string `json:"host"`
Token string `json:"token"`
Type Type `json:"type"`
}
func NewArr(name, host, token string, arrType Type) *Arr {
return &Arr{
Name: name,
Host: host,
Token: token,
Type: arrType,
}
}
func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Response, error) {
if a.Token == "" || a.Host == "" {
return nil, nil
}
url, err := common.JoinURL(a.Host, endpoint)
if err != nil {
return nil, err
}
var jsonPayload []byte
if payload != nil {
jsonPayload, err = json.Marshal(payload)
if err != nil {
return nil, err
}
}
req, err := http.NewRequest(method, url, bytes.NewBuffer(jsonPayload))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-Api-Key", a.Token)
return client.Do(req)
}
type Storage struct {
Arrs map[string]*Arr // name -> arr
mu sync.RWMutex
}
func inferType(host, name string) Type {
switch {
case strings.Contains(host, "sonarr") || strings.Contains(name, "sonarr"):
return Sonarr
case strings.Contains(host, "radarr") || strings.Contains(name, "radarr"):
return Radarr
case strings.Contains(host, "lidarr") || strings.Contains(name, "lidarr"):
return Lidarr
case strings.Contains(host, "readarr") || strings.Contains(name, "readarr"):
return Readarr
default:
return ""
}
}
func NewStorage() *Storage {
arrs := make(map[string]*Arr)
//for name, arrCfg := range cfg {
// arrs[name] = NewArr(name, arrCfg.Host, arrCfg.Token, inferType(arrCfg.Host, name))
//}
return &Storage{
Arrs: arrs,
}
}
func (as *Storage) AddOrUpdate(arr *Arr) {
as.mu.Lock()
defer as.mu.Unlock()
as.Arrs[arr.Host] = arr
}
func (as *Storage) Get(name string) *Arr {
as.mu.RLock()
defer as.mu.RUnlock()
return as.Arrs[name]
}
func (as *Storage) GetAll() []*Arr {
as.mu.RLock()
defer as.mu.RUnlock()
arrs := make([]*Arr, 0, len(as.Arrs))
for _, arr := range as.Arrs {
arrs = append(arrs, arr)
}
return arrs
}

29
pkg/arr/content.go Normal file
View File

@@ -0,0 +1,29 @@
package arr
import (
"encoding/json"
"fmt"
"net/http"
)
type ContentRequest struct {
ID string `json:"id"`
Title string `json:"name"`
Arr string `json:"arr"`
}
func (a *Arr) GetContents() *ContentRequest {
resp, err := a.Request(http.MethodGet, "api/v3/series", nil)
if err != nil {
return nil
}
defer resp.Body.Close()
var data *ContentRequest
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
fmt.Printf("Error: %v\n", err)
return nil
}
fmt.Printf("Data: %v\n", data)
//data.Arr = a.Name
return data
}

41
pkg/arr/history.go Normal file
View File

@@ -0,0 +1,41 @@
package arr
import (
"encoding/json"
"net/http"
gourl "net/url"
)
type HistorySchema struct {
Page int `json:"page"`
PageSize int `json:"pageSize"`
SortKey string `json:"sortKey"`
SortDirection string `json:"sortDirection"`
TotalRecords int `json:"totalRecords"`
Records []struct {
ID int `json:"id"`
DownloadID string `json:"downloadId"`
} `json:"records"`
}
func (a *Arr) GetHistory(downloadId, eventType string) *HistorySchema {
query := gourl.Values{}
if downloadId != "" {
query.Add("downloadId", downloadId)
}
query.Add("eventType", eventType)
query.Add("pageSize", "100")
url := "history" + "?" + query.Encode()
resp, err := a.Request(http.MethodGet, url, nil)
if err != nil {
return nil
}
defer resp.Body.Close()
var data *HistorySchema
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
return nil
}
return data
}

209
pkg/arr/import.go Normal file
View File

@@ -0,0 +1,209 @@
package arr
import (
"encoding/json"
"fmt"
"io"
"net/http"
gourl "net/url"
"strconv"
"time"
)
type ImportResponseSchema struct {
Path string `json:"path"`
RelativePath string `json:"relativePath"`
FolderName string `json:"folderName"`
Name string `json:"name"`
Size int `json:"size"`
Series struct {
Title string `json:"title"`
SortTitle string `json:"sortTitle"`
Status string `json:"status"`
Ended bool `json:"ended"`
Overview string `json:"overview"`
Network string `json:"network"`
AirTime string `json:"airTime"`
Images []struct {
CoverType string `json:"coverType"`
RemoteUrl string `json:"remoteUrl"`
} `json:"images"`
OriginalLanguage struct {
Id int `json:"id"`
Name string `json:"name"`
} `json:"originalLanguage"`
Seasons []struct {
SeasonNumber int `json:"seasonNumber"`
Monitored bool `json:"monitored"`
} `json:"seasons"`
Year int `json:"year"`
Path string `json:"path"`
QualityProfileId int `json:"qualityProfileId"`
SeasonFolder bool `json:"seasonFolder"`
Monitored bool `json:"monitored"`
MonitorNewItems string `json:"monitorNewItems"`
UseSceneNumbering bool `json:"useSceneNumbering"`
Runtime int `json:"runtime"`
TvdbId int `json:"tvdbId"`
TvRageId int `json:"tvRageId"`
TvMazeId int `json:"tvMazeId"`
TmdbId int `json:"tmdbId"`
FirstAired time.Time `json:"firstAired"`
LastAired time.Time `json:"lastAired"`
SeriesType string `json:"seriesType"`
CleanTitle string `json:"cleanTitle"`
ImdbId string `json:"imdbId"`
TitleSlug string `json:"titleSlug"`
Certification string `json:"certification"`
Genres []string `json:"genres"`
Tags []interface{} `json:"tags"`
Added time.Time `json:"added"`
Ratings struct {
Votes int `json:"votes"`
Value float64 `json:"value"`
} `json:"ratings"`
LanguageProfileId int `json:"languageProfileId"`
Id int `json:"id"`
} `json:"series"`
SeasonNumber int `json:"seasonNumber"`
Episodes []struct {
SeriesId int `json:"seriesId"`
TvdbId int `json:"tvdbId"`
EpisodeFileId int `json:"episodeFileId"`
SeasonNumber int `json:"seasonNumber"`
EpisodeNumber int `json:"episodeNumber"`
Title string `json:"title"`
AirDate string `json:"airDate"`
AirDateUtc time.Time `json:"airDateUtc"`
Runtime int `json:"runtime"`
Overview string `json:"overview"`
HasFile bool `json:"hasFile"`
Monitored bool `json:"monitored"`
AbsoluteEpisodeNumber int `json:"absoluteEpisodeNumber"`
UnverifiedSceneNumbering bool `json:"unverifiedSceneNumbering"`
Id int `json:"id"`
FinaleType string `json:"finaleType,omitempty"`
} `json:"episodes"`
ReleaseGroup string `json:"releaseGroup"`
Quality struct {
Quality struct {
Id int `json:"id"`
Name string `json:"name"`
Source string `json:"source"`
Resolution int `json:"resolution"`
} `json:"quality"`
Revision struct {
Version int `json:"version"`
Real int `json:"real"`
IsRepack bool `json:"isRepack"`
} `json:"revision"`
} `json:"quality"`
Languages []struct {
Id int `json:"id"`
Name string `json:"name"`
} `json:"languages"`
QualityWeight int `json:"qualityWeight"`
CustomFormats []interface{} `json:"customFormats"`
CustomFormatScore int `json:"customFormatScore"`
IndexerFlags int `json:"indexerFlags"`
ReleaseType string `json:"releaseType"`
Rejections []struct {
Reason string `json:"reason"`
Type string `json:"type"`
} `json:"rejections"`
Id int `json:"id"`
}
type ManualImportRequestFile struct {
Path string `json:"path"`
SeriesId int `json:"seriesId"`
SeasonNumber int `json:"seasonNumber"`
EpisodeIds []int `json:"episodeIds"`
Quality struct {
Quality struct {
Id int `json:"id"`
Name string `json:"name"`
Source string `json:"source"`
Resolution int `json:"resolution"`
} `json:"quality"`
Revision struct {
Version int `json:"version"`
Real int `json:"real"`
IsRepack bool `json:"isRepack"`
} `json:"revision"`
} `json:"quality"`
Languages []struct {
Id int `json:"id"`
Name string `json:"name"`
} `json:"languages"`
ReleaseGroup string `json:"releaseGroup"`
CustomFormats []interface{} `json:"customFormats"`
CustomFormatScore int `json:"customFormatScore"`
IndexerFlags int `json:"indexerFlags"`
ReleaseType string `json:"releaseType"`
Rejections []struct {
Reason string `json:"reason"`
Type string `json:"type"`
} `json:"rejections"`
}
type ManualImportRequestSchema struct {
Name string `json:"name"`
Files []ManualImportRequestFile `json:"files"`
ImportMode string `json:"importMode"`
}
func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, error) {
query := gourl.Values{}
query.Add("folder", path)
if seriesId != 0 {
query.Add("seriesId", strconv.Itoa(seriesId))
}
url := "api/v3/manualimport" + "?" + query.Encode()
resp, err := a.Request(http.MethodGet, url, nil)
if err != nil {
return nil, fmt.Errorf("failed to import, invalid file: %w", err)
}
defer resp.Body.Close()
var data []ImportResponseSchema
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
var files []ManualImportRequestFile
for _, d := range data {
episodesIds := []int{}
for _, e := range d.Episodes {
episodesIds = append(episodesIds, e.Id)
}
file := ManualImportRequestFile{
Path: d.Path,
SeriesId: d.Series.Id,
SeasonNumber: d.SeasonNumber,
EpisodeIds: episodesIds,
Quality: d.Quality,
Languages: d.Languages,
ReleaseGroup: d.ReleaseGroup,
CustomFormats: d.CustomFormats,
CustomFormatScore: d.CustomFormatScore,
IndexerFlags: d.IndexerFlags,
ReleaseType: d.ReleaseType,
Rejections: d.Rejections,
}
files = append(files, file)
}
request := ManualImportRequestSchema{
Name: "ManualImport",
Files: files,
ImportMode: "copy",
}
url = "api/v3/command"
resp, err = a.Request(http.MethodPost, url, request)
if err != nil {
return nil, fmt.Errorf("failed to import: %w", err)
}
defer resp.Body.Close()
return resp.Body, nil
}

54
pkg/arr/refresh.go Normal file
View File

@@ -0,0 +1,54 @@
package arr
import (
"cmp"
"fmt"
"goBlack/common"
"net/http"
"strconv"
"strings"
)
func (a *Arr) Refresh() error {
payload := map[string]string{"name": "RefreshMonitoredDownloads"}
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
if err == nil && resp != nil {
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
if statusOk {
return nil
}
}
return fmt.Errorf("failed to refresh monitored downloads for %s", cmp.Or(a.Name, a.Host))
}
func (a *Arr) MarkAsFailed(infoHash string) error {
downloadId := strings.ToUpper(infoHash)
history := a.GetHistory(downloadId, "grabbed")
if history == nil {
return nil
}
torrentId := 0
for _, record := range history.Records {
if strings.EqualFold(record.DownloadID, downloadId) {
torrentId = record.ID
break
}
}
if torrentId != 0 {
url, err := common.JoinURL(a.Host, "history/failed/", strconv.Itoa(torrentId))
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPost, url, nil)
if err != nil {
return err
}
client := &http.Client{}
_, err = client.Do(req)
if err == nil {
return fmt.Errorf("failed to mark %s as failed: %v", cmp.Or(a.Name, a.Host), err)
}
}
return nil
}

31
pkg/arr/tmdb.go Normal file
View File

@@ -0,0 +1,31 @@
package arr
import (
"encoding/json"
"net/http"
url2 "net/url"
)
type TMDBResponse struct {
Page int `json:"page"`
Results []struct {
ID int `json:"id"`
Name string `json:"name"`
MediaType string `json:"media_type"`
PosterPath string `json:"poster_path"`
} `json:"results"`
}
func SearchTMDB(term string) (*TMDBResponse, error) {
resp, err := http.Get("https://api.themoviedb.org/3/search/multi?api_key=key&query=" + url2.QueryEscape(term))
if err != nil {
return nil, err
}
var data *TMDBResponse
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
return nil, err
}
return data, nil
}

View File

@@ -4,10 +4,23 @@ import (
"fmt"
"github.com/anacrolix/torrent/metainfo"
"goBlack/common"
"goBlack/pkg/arr"
"log"
"path/filepath"
)
type BaseDebrid struct {
Name string
Host string `json:"host"`
APIKey string
DownloadUncached bool
client *common.RLHTTPClient
cache *common.Cache
MountPath string
logger *log.Logger
CheckCached bool
}
type Service interface {
SubmitMagnet(torrent *Torrent) (*Torrent, error)
CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error)
@@ -15,26 +28,31 @@ type Service interface {
DeleteTorrent(torrent *Torrent)
IsAvailable(infohashes []string) map[string]bool
GetMountPath() string
GetDownloadUncached() bool
GetCheckCached() bool
GetTorrent(id string) (*Torrent, error)
GetName() string
GetLogger() *log.Logger
}
type Debrid struct {
Host string `json:"host"`
APIKey string
DownloadUncached bool
client *common.RLHTTPClient
cache *common.Cache
MountPath string
logger *log.Logger
func NewDebrid(debs []common.DebridConfig, cache *common.Cache) *DebridService {
debrids := make([]Service, 0)
for _, dc := range debs {
d := createDebrid(dc, cache)
d.GetLogger().Println("Debrid Service started")
debrids = append(debrids, d)
}
d := &DebridService{debrids: debrids, lastUsed: 0}
return d
}
func NewDebrid(dc common.DebridConfig, cache *common.Cache) Service {
func createDebrid(dc common.DebridConfig, cache *common.Cache) Service {
switch dc.Name {
case "realdebrid":
return NewRealDebrid(dc, cache)
case "torbox":
return NewTorbox(dc, cache)
case "debridlink":
return NewDebridLink(dc, cache)
default:
return NewRealDebrid(dc, cache)
}
@@ -77,16 +95,17 @@ func getTorrentInfo(filePath string) (*Torrent, error) {
if err != nil {
return nil, err
}
infoLength := info.Length
magnet := &common.Magnet{
InfoHash: infoHash,
Name: info.Name,
Size: info.Length,
Size: infoLength,
Link: mi.Magnet(&hash, &info).String(),
}
torrent := &Torrent{
InfoHash: infoHash,
Name: info.Name,
Size: info.Length,
Size: infoLength,
Magnet: magnet,
Filename: filePath,
}
@@ -95,54 +114,71 @@ func getTorrentInfo(filePath string) (*Torrent, error) {
func GetLocalCache(infohashes []string, cache *common.Cache) ([]string, map[string]bool) {
result := make(map[string]bool)
hashes := make([]string, len(infohashes))
if len(infohashes) == 0 {
return hashes, result
}
if len(infohashes) == 1 {
if cache.Exists(infohashes[0]) {
return hashes, map[string]bool{infohashes[0]: true}
}
return infohashes, result
}
//if len(infohashes) == 0 {
// return hashes, result
//}
//if len(infohashes) == 1 {
// if cache.Exists(infohashes[0]) {
// return hashes, map[string]bool{infohashes[0]: true}
// }
// return infohashes, result
//}
//
//cachedHashes := cache.GetMultiple(infohashes)
//for _, h := range infohashes {
// _, exists := cachedHashes[h]
// if !exists {
// hashes = append(hashes, h)
// } else {
// result[h] = true
// }
//}
cachedHashes := cache.GetMultiple(infohashes)
for _, h := range infohashes {
_, exists := cachedHashes[h]
if !exists {
hashes = append(hashes, h)
} else {
result[h] = true
}
}
return hashes, result
return infohashes, result
}
func ProcessQBitTorrent(d Service, magnet *common.Magnet, arr *Arr, isSymlink bool) (*Torrent, error) {
func ProcessTorrent(d *DebridService, magnet *common.Magnet, a *arr.Arr, isSymlink bool) (*Torrent, error) {
debridTorrent := &Torrent{
InfoHash: magnet.InfoHash,
Magnet: magnet,
Name: magnet.Name,
Arr: arr,
Arr: a,
Size: magnet.Size,
}
logger := d.GetLogger()
logger.Printf("Torrent Hash: %s", debridTorrent.InfoHash)
if !d.GetDownloadUncached() {
hash, exists := d.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
if !exists || !hash {
return debridTorrent, fmt.Errorf("torrent: %s is not cached", debridTorrent.Name)
} else {
logger.Printf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
}
}
debridTorrent, err := d.SubmitMagnet(debridTorrent)
if err != nil || debridTorrent.Id == "" {
logger.Printf("Error submitting magnet: %s", err)
return nil, err
errs := make([]error, 0)
for index, db := range d.debrids {
log.Println("Processing debrid: ", db.GetName())
logger := db.GetLogger()
logger.Printf("Torrent Hash: %s", debridTorrent.InfoHash)
if db.GetCheckCached() {
hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
if !exists || !hash {
logger.Printf("Torrent: %s is not cached", debridTorrent.Name)
continue
} else {
logger.Printf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
}
}
dbt, err := db.SubmitMagnet(debridTorrent)
if dbt != nil {
dbt.Debrid = db
dbt.Arr = a
}
if err != nil || dbt == nil || dbt.Id == "" {
errs = append(errs, err)
continue
}
logger.Printf("Torrent: %s submitted to %s", dbt.Name, db.GetName())
d.lastUsed = index
return db.CheckStatus(dbt, isSymlink)
}
return d.CheckStatus(debridTorrent, isSymlink)
err := fmt.Errorf("failed to process torrent")
for _, e := range errs {
err = fmt.Errorf("%w\n%w", err, e)
}
return nil, err
}

271
pkg/debrid/debrid_link.go Normal file
View File

@@ -0,0 +1,271 @@
package debrid
import (
"bytes"
"encoding/json"
"fmt"
"goBlack/common"
"goBlack/pkg/debrid/structs"
"log"
"net/http"
"os"
"strings"
)
type DebridLink struct {
BaseDebrid
}
func (r *DebridLink) GetMountPath() string {
return r.MountPath
}
func (r *DebridLink) GetName() string {
return r.Name
}
func (r *DebridLink) GetLogger() *log.Logger {
return r.logger
}
func (r *DebridLink) IsAvailable(infohashes []string) map[string]bool {
// Check if the infohashes are available in the local cache
hashes, result := GetLocalCache(infohashes, r.cache)
if len(hashes) == 0 {
// Either all the infohashes are locally cached or none are
r.cache.AddMultiple(result)
return result
}
// Divide hashes into groups of 100
for i := 0; i < len(hashes); i += 200 {
end := i + 200
if end > len(hashes) {
end = len(hashes)
}
// Filter out empty strings
validHashes := make([]string, 0, end-i)
for _, hash := range hashes[i:end] {
if hash != "" {
validHashes = append(validHashes, hash)
}
}
// If no valid hashes in this batch, continue to the next batch
if len(validHashes) == 0 {
continue
}
hashStr := strings.Join(validHashes, ",")
url := fmt.Sprintf("%s/seedbox/cached/%s", r.Host, hashStr)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
log.Println("Error checking availability:", err)
return result
}
var data structs.DebridLinkAvailableResponse
err = json.Unmarshal(resp, &data)
if err != nil {
log.Println("Error marshalling availability:", err)
return result
}
if data.Value == nil {
return result
}
value := *data.Value
for _, h := range hashes[i:end] {
_, exists := value[h]
if exists {
result[h] = true
}
}
}
r.cache.AddMultiple(result) // Add the results to the cache
return result
}
func (r *DebridLink) GetTorrent(id string) (*Torrent, error) {
torrent := &Torrent{}
url := fmt.Sprintf("%s/seedbox/list?ids=%s", r.Host, id)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
return torrent, err
}
var res structs.DebridLinkTorrentInfo
err = json.Unmarshal(resp, &res)
if err != nil {
return torrent, err
}
if res.Success == false {
return torrent, fmt.Errorf("error getting torrent")
}
if res.Value == nil {
return torrent, fmt.Errorf("torrent not found")
}
dt := *res.Value
if len(dt) == 0 {
return torrent, fmt.Errorf("torrent not found")
}
data := dt[0]
status := "downloading"
if data.Status == 100 {
status = "downloaded"
}
name := common.RemoveInvalidChars(data.Name)
torrent.Id = data.ID
torrent.Name = name
torrent.Bytes = data.TotalSize
torrent.Folder = name
torrent.Progress = data.DownloadPercent
torrent.Status = status
torrent.Speed = data.DownloadSpeed
torrent.Seeders = data.PeersConnected
torrent.Filename = name
torrent.OriginalFilename = name
files := make([]TorrentFile, len(data.Files))
for i, f := range data.Files {
files[i] = TorrentFile{
Id: f.ID,
Name: f.Name,
Size: f.Size,
}
}
torrent.Files = files
torrent.Debrid = r
return torrent, nil
}
func (r *DebridLink) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
url := fmt.Sprintf("%s/seedbox/add", r.Host)
payload := map[string]string{"url": torrent.Magnet.Link}
jsonPayload, _ := json.Marshal(payload)
req, _ := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(jsonPayload))
resp, err := r.client.MakeRequest(req)
if err != nil {
return nil, err
}
var res structs.DebridLinkSubmitTorrentInfo
err = json.Unmarshal(resp, &res)
if err != nil {
return nil, err
}
if res.Success == false || res.Value == nil {
return nil, fmt.Errorf("error adding torrent")
}
data := *res.Value
status := "downloading"
log.Printf("Torrent: %s added with id: %s\n", torrent.Name, data.ID)
name := common.RemoveInvalidChars(data.Name)
torrent.Id = data.ID
torrent.Name = name
torrent.Bytes = data.TotalSize
torrent.Folder = name
torrent.Progress = data.DownloadPercent
torrent.Status = status
torrent.Speed = data.DownloadSpeed
torrent.Seeders = data.PeersConnected
torrent.Filename = name
torrent.OriginalFilename = name
files := make([]TorrentFile, len(data.Files))
for i, f := range data.Files {
files[i] = TorrentFile{
Id: f.ID,
Name: f.Name,
Size: f.Size,
Link: f.DownloadURL,
}
}
torrent.Files = files
torrent.Debrid = r
return torrent, nil
}
func (r *DebridLink) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
for {
t, err := r.GetTorrent(torrent.Id)
torrent = t
if err != nil || torrent == nil {
return torrent, err
}
status := torrent.Status
if status == "error" || status == "dead" || status == "magnet_error" {
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
} else if status == "downloaded" {
r.logger.Printf("Torrent: %s downloaded\n", torrent.Name)
if !isSymlink {
err = r.GetDownloadLinks(torrent)
if err != nil {
return torrent, err
}
}
break
} else if status == "downloading" {
if !r.DownloadUncached {
go torrent.Delete()
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
}
// Break out of the loop if the torrent is downloading.
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
break
}
}
return torrent, nil
}
func (r *DebridLink) DeleteTorrent(torrent *Torrent) {
url := fmt.Sprintf("%s/seedbox/%s/remove", r.Host, torrent.Id)
req, _ := http.NewRequest(http.MethodDelete, url, nil)
_, err := r.client.MakeRequest(req)
if err == nil {
r.logger.Printf("Torrent: %s deleted\n", torrent.Name)
} else {
r.logger.Printf("Error deleting torrent: %s", err)
}
}
func (r *DebridLink) GetDownloadLinks(torrent *Torrent) error {
downloadLinks := make([]TorrentDownloadLinks, 0)
for _, f := range torrent.Files {
dl := TorrentDownloadLinks{
Link: f.Link,
Filename: f.Name,
}
downloadLinks = append(downloadLinks, dl)
}
torrent.DownloadLinks = downloadLinks
return nil
}
func (r *DebridLink) GetCheckCached() bool {
return r.CheckCached
}
func NewDebridLink(dc common.DebridConfig, cache *common.Cache) *DebridLink {
rl := common.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
}
client := common.NewRLHTTPClient(rl, headers)
logger := common.NewLogger(dc.Name, os.Stdout)
return &DebridLink{
BaseDebrid: BaseDebrid{
Name: "debridlink",
Host: dc.Host,
APIKey: dc.APIKey,
DownloadUncached: dc.DownloadUncached,
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger,
CheckCached: dc.CheckCached,
},
}
}

View File

@@ -10,18 +10,13 @@ import (
gourl "net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
)
type RealDebrid struct {
Host string `json:"host"`
APIKey string
DownloadUncached bool
client *common.RLHTTPClient
cache *common.Cache
MountPath string
logger *log.Logger
BaseDebrid
}
func (r *RealDebrid) GetMountPath() string {
@@ -29,7 +24,7 @@ func (r *RealDebrid) GetMountPath() string {
}
func (r *RealDebrid) GetName() string {
return "realdebrid"
return r.Name
}
func (r *RealDebrid) GetLogger() *log.Logger {
@@ -46,13 +41,13 @@ func GetTorrentFiles(data structs.RealDebridTorrentInfo) []TorrentFile {
continue
}
fileId := f.ID
file := &TorrentFile{
file := TorrentFile{
Name: name,
Path: name,
Size: int64(f.Bytes),
Size: f.Bytes,
Id: strconv.Itoa(fileId),
}
files = append(files, *file)
files = append(files, file)
}
return files
}
@@ -89,7 +84,8 @@ func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool {
hashStr := strings.Join(validHashes, "/")
url := fmt.Sprintf("%s/torrents/instantAvailability/%s", r.Host, hashStr)
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
log.Println("Error checking availability:", err)
return result
@@ -117,7 +113,8 @@ func (r *RealDebrid) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
"magnet": {torrent.Magnet.Link},
}
var data structs.RealDebridAddMagnetSchema
resp, err := r.client.MakeRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.client.MakeRequest(req)
if err != nil {
return nil, err
}
@@ -131,7 +128,8 @@ func (r *RealDebrid) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
func (r *RealDebrid) GetTorrent(id string) (*Torrent, error) {
torrent := &Torrent{}
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, id)
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
return torrent, err
}
@@ -152,6 +150,7 @@ func (r *RealDebrid) GetTorrent(id string) (*Torrent, error) {
torrent.Filename = data.Filename
torrent.OriginalFilename = data.OriginalFilename
torrent.Links = data.Links
torrent.Debrid = r
files := GetTorrentFiles(data)
torrent.Files = files
return torrent, nil
@@ -159,8 +158,9 @@ func (r *RealDebrid) GetTorrent(id string) (*Torrent, error) {
func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, torrent.Id)
req, _ := http.NewRequest(http.MethodGet, url, nil)
for {
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
log.Println("ERROR Checking file: ", err)
return torrent, err
@@ -168,6 +168,7 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
var data structs.RealDebridTorrentInfo
err = json.Unmarshal(resp, &data)
status := data.Status
fmt.Println("RD STATUS: ", status)
name := common.RemoveInvalidChars(data.OriginalFilename)
torrent.Name = name // Important because some magnet changes the name
torrent.Folder = name
@@ -179,8 +180,10 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
torrent.Seeders = data.Seeders
torrent.Links = data.Links
torrent.Status = status
torrent.Debrid = r
downloading_status := []string{"downloading", "magnet_conversion", "queued", "compressing", "uploading"}
if status == "error" || status == "dead" || status == "magnet_error" {
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
return torrent, fmt.Errorf("torrent: %s has error: %s", torrent.Name, status)
} else if status == "waiting_files_selection" {
files := GetTorrentFiles(data)
torrent.Files = files
@@ -195,7 +198,8 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
"files": {strings.Join(filesId, ",")},
}
payload := strings.NewReader(p.Encode())
_, err = r.client.MakeRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, torrent.Id), payload)
req, _ := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, torrent.Id), payload)
_, err = r.client.MakeRequest(req)
if err != nil {
return torrent, err
}
@@ -209,11 +213,9 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
return torrent, err
}
}
break
} else if status == "downloading" {
} else if slices.Contains(downloading_status, status) {
if !r.DownloadUncached {
go r.DeleteTorrent(torrent)
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
}
// Break out of the loop if the torrent is downloading.
@@ -227,7 +229,8 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
func (r *RealDebrid) DeleteTorrent(torrent *Torrent) {
url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrent.Id)
_, err := r.client.MakeRequest(http.MethodDelete, url, nil)
req, _ := http.NewRequest(http.MethodDelete, url, nil)
_, err := r.client.MakeRequest(req)
if err == nil {
r.logger.Printf("Torrent: %s deleted\n", torrent.Name)
} else {
@@ -245,7 +248,8 @@ func (r *RealDebrid) GetDownloadLinks(torrent *Torrent) error {
payload := gourl.Values{
"link": {link},
}
resp, err := r.client.MakeRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.client.MakeRequest(req)
if err != nil {
return err
}
@@ -264,8 +268,8 @@ func (r *RealDebrid) GetDownloadLinks(torrent *Torrent) error {
return nil
}
func (r *RealDebrid) GetDownloadUncached() bool {
return r.DownloadUncached
func (r *RealDebrid) GetCheckCached() bool {
return r.CheckCached
}
func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
@@ -276,12 +280,16 @@ func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
client := common.NewRLHTTPClient(rl, headers)
logger := common.NewLogger(dc.Name, os.Stdout)
return &RealDebrid{
Host: dc.Host,
APIKey: dc.APIKey,
DownloadUncached: dc.DownloadUncached,
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger,
BaseDebrid: BaseDebrid{
Name: "realdebrid",
Host: dc.Host,
APIKey: dc.APIKey,
DownloadUncached: dc.DownloadUncached,
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger,
CheckCached: dc.CheckCached,
},
}
}

13
pkg/debrid/service.go Normal file
View File

@@ -0,0 +1,13 @@
package debrid
type DebridService struct {
debrids []Service
lastUsed int
}
func (d *DebridService) Get() Service {
if d.lastUsed == 0 {
return d.debrids[0]
}
return d.debrids[d.lastUsed]
}

View File

@@ -0,0 +1,45 @@
package structs
type DebridLinkAPIResponse[T any] struct {
Success bool `json:"success"`
Value *T `json:"value"` // Use pointer to allow nil
}
type DebridLinkAvailableResponse DebridLinkAPIResponse[map[string]map[string]struct {
Name string `json:"name"`
HashString string `json:"hashString"`
Files []struct {
Name string `json:"name"`
Size int `json:"size"`
} `json:"files"`
}]
type debridLinkTorrentInfo struct {
ID string `json:"id"`
Name string `json:"name"`
HashString string `json:"hashString"`
UploadRatio float64 `json:"uploadRatio"`
ServerID string `json:"serverId"`
Wait bool `json:"wait"`
PeersConnected int `json:"peersConnected"`
Status int `json:"status"`
TotalSize int64 `json:"totalSize"`
Files []struct {
ID string `json:"id"`
Name string `json:"name"`
DownloadURL string `json:"downloadUrl"`
Size int64 `json:"size"`
DownloadPercent int `json:"downloadPercent"`
} `json:"files"`
Trackers []struct {
Announce string `json:"announce"`
} `json:"trackers"`
Created int64 `json:"created"`
DownloadPercent float64 `json:"downloadPercent"`
DownloadSpeed int `json:"downloadSpeed"`
UploadSpeed int `json:"uploadSpeed"`
}
type DebridLinkTorrentInfo DebridLinkAPIResponse[[]debridLinkTorrentInfo]
type DebridLinkSubmitTorrentInfo DebridLinkAPIResponse[debridLinkTorrentInfo]

View File

@@ -75,7 +75,7 @@ type RealDebridTorrentInfo struct {
OriginalFilename string `json:"original_filename"`
Hash string `json:"hash"`
Bytes int64 `json:"bytes"`
OriginalBytes int `json:"original_bytes"`
OriginalBytes int64 `json:"original_bytes"`
Host string `json:"host"`
Split int `json:"split"`
Progress float64 `json:"progress"`
@@ -84,12 +84,12 @@ type RealDebridTorrentInfo struct {
Files []struct {
ID int `json:"id"`
Path string `json:"path"`
Bytes int `json:"bytes"`
Bytes int64 `json:"bytes"`
Selected int `json:"selected"`
} `json:"files"`
Links []string `json:"links"`
Ended string `json:"ended,omitempty"`
Speed int64 `json:"speed,omitempty"`
Speed int `json:"speed,omitempty"`
Seeders int `json:"seeders,omitempty"`
}
@@ -97,11 +97,11 @@ type RealDebridUnrestrictResponse struct {
Id string `json:"id"`
Filename string `json:"filename"`
MimeType string `json:"mimeType"`
Filesize int64 `json:"filesize"`
Filesize int `json:"filesize"`
Link string `json:"link"`
Host string `json:"host"`
Chunks int64 `json:"chunks"`
Crc int64 `json:"crc"`
Chunks int `json:"chunks"`
Crc int `json:"crc"`
Download string `json:"download"`
Streamable int `json:"streamable"`
}

View File

@@ -0,0 +1,75 @@
package structs
import "time"
type TorboxAPIResponse[T any] struct {
Success bool `json:"success"`
Error any `json:"error"`
Detail string `json:"detail"`
Data *T `json:"data"` // Use pointer to allow nil
}
type TorBoxAvailableResponse TorboxAPIResponse[map[string]struct {
Name string `json:"name"`
Size int `json:"size"`
Hash string `json:"hash"`
}]
type TorBoxAddMagnetResponse TorboxAPIResponse[struct {
Id int `json:"torrent_id"`
Hash string `json:"hash"`
}]
type torboxInfo struct {
Id int `json:"id"`
AuthId string `json:"auth_id"`
Server int `json:"server"`
Hash string `json:"hash"`
Name string `json:"name"`
Magnet interface{} `json:"magnet"`
Size int64 `json:"size"`
Active bool `json:"active"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DownloadState string `json:"download_state"`
Seeds int `json:"seeds"`
Peers int `json:"peers"`
Ratio float64 `json:"ratio"`
Progress float64 `json:"progress"`
DownloadSpeed int `json:"download_speed"`
UploadSpeed int `json:"upload_speed"`
Eta int `json:"eta"`
TorrentFile bool `json:"torrent_file"`
ExpiresAt interface{} `json:"expires_at"`
DownloadPresent bool `json:"download_present"`
Files []struct {
Id int `json:"id"`
Md5 interface{} `json:"md5"`
Hash string `json:"hash"`
Name string `json:"name"`
Size int64 `json:"size"`
Zipped bool `json:"zipped"`
S3Path string `json:"s3_path"`
Infected bool `json:"infected"`
Mimetype string `json:"mimetype"`
ShortName string `json:"short_name"`
AbsolutePath string `json:"absolute_path"`
} `json:"files"`
DownloadPath string `json:"download_path"`
InactiveCheck int `json:"inactive_check"`
Availability int `json:"availability"`
DownloadFinished bool `json:"download_finished"`
Tracker interface{} `json:"tracker"`
TotalUploaded int `json:"total_uploaded"`
TotalDownloaded int `json:"total_downloaded"`
Cached bool `json:"cached"`
Owner string `json:"owner"`
SeedTorrent bool `json:"seed_torrent"`
AllowZipped bool `json:"allow_zipped"`
LongTermSeeding bool `json:"long_term_seeding"`
TrackerMessage interface{} `json:"tracker_message"`
}
type TorboxInfoResponse TorboxAPIResponse[torboxInfo]
type TorBoxDownloadLinksResponse TorboxAPIResponse[string]

303
pkg/debrid/torbox.go Normal file
View File

@@ -0,0 +1,303 @@
package debrid
import (
"bytes"
"encoding/json"
"fmt"
"goBlack/common"
"goBlack/pkg/debrid/structs"
"log"
"mime/multipart"
"net/http"
gourl "net/url"
"os"
"path"
"path/filepath"
"slices"
"strconv"
"strings"
)
type Torbox struct {
BaseDebrid
}
func (r *Torbox) GetMountPath() string {
return r.MountPath
}
func (r *Torbox) GetName() string {
return r.Name
}
func (r *Torbox) GetLogger() *log.Logger {
return r.logger
}
func (r *Torbox) IsAvailable(infohashes []string) map[string]bool {
// Check if the infohashes are available in the local cache
hashes, result := GetLocalCache(infohashes, r.cache)
if len(hashes) == 0 {
// Either all the infohashes are locally cached or none are
r.cache.AddMultiple(result)
return result
}
// Divide hashes into groups of 100
for i := 0; i < len(hashes); i += 200 {
end := i + 200
if end > len(hashes) {
end = len(hashes)
}
// Filter out empty strings
validHashes := make([]string, 0, end-i)
for _, hash := range hashes[i:end] {
if hash != "" {
validHashes = append(validHashes, hash)
}
}
// If no valid hashes in this batch, continue to the next batch
if len(validHashes) == 0 {
continue
}
hashStr := strings.Join(validHashes, ",")
url := fmt.Sprintf("%s/api/torrents/checkcached?hash=%s", r.Host, hashStr)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
log.Println("Error checking availability:", err)
return result
}
var res structs.TorBoxAvailableResponse
err = json.Unmarshal(resp, &res)
if err != nil {
log.Println("Error marshalling availability:", err)
return result
}
if res.Data == nil {
return result
}
for h, cache := range *res.Data {
if cache.Size > 0 {
result[strings.ToUpper(h)] = true
}
}
}
r.cache.AddMultiple(result) // Add the results to the cache
return result
}
func (r *Torbox) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
url := fmt.Sprintf("%s/api/torrents/createtorrent", r.Host)
payload := &bytes.Buffer{}
writer := multipart.NewWriter(payload)
_ = writer.WriteField("magnet", torrent.Magnet.Link)
err := writer.Close()
if err != nil {
return nil, err
}
req, _ := http.NewRequest(http.MethodPost, url, payload)
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := r.client.MakeRequest(req)
if err != nil {
return nil, err
}
var data structs.TorBoxAddMagnetResponse
err = json.Unmarshal(resp, &data)
if err != nil {
return nil, err
}
if data.Data == nil {
return nil, fmt.Errorf("error adding torrent")
}
dt := *data.Data
torrentId := strconv.Itoa(dt.Id)
log.Printf("Torrent: %s added with id: %s\n", torrent.Name, torrentId)
torrent.Id = torrentId
return torrent, nil
}
func getStatus(status string, finished bool) string {
if finished {
return "downloaded"
}
downloading := []string{"completed", "cached", "paused", "downloading", "uploading",
"checkingResumeData", "metaDL", "pausedUP", "queuedUP", "checkingUP",
"forcedUP", "allocating", "downloading", "metaDL", "pausedDL",
"queuedDL", "checkingDL", "forcedDL", "checkingResumeData", "moving"}
switch {
case slices.Contains(downloading, status):
return "downloading"
default:
return "error"
}
}
func (r *Torbox) GetTorrent(id string) (*Torrent, error) {
torrent := &Torrent{}
url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", r.Host, id)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
return torrent, err
}
var res structs.TorboxInfoResponse
err = json.Unmarshal(resp, &res)
if err != nil {
return torrent, err
}
data := res.Data
name := data.Name
torrent.Id = id
torrent.Name = name
torrent.Bytes = data.Size
torrent.Folder = name
torrent.Progress = data.Progress * 100
torrent.Status = getStatus(data.DownloadState, data.DownloadFinished)
torrent.Speed = data.DownloadSpeed
torrent.Seeders = data.Seeds
torrent.Filename = name
torrent.OriginalFilename = name
files := make([]TorrentFile, 0)
if len(data.Files) == 0 {
return torrent, fmt.Errorf("no files found for torrent: %s", name)
}
for _, f := range data.Files {
fileName := filepath.Base(f.Name)
if (!common.RegexMatch(common.VIDEOMATCH, fileName) &&
!common.RegexMatch(common.SUBMATCH, fileName) &&
!common.RegexMatch(common.MUSICMATCH, fileName)) || common.RegexMatch(common.SAMPLEMATCH, fileName) {
continue
}
file := TorrentFile{
Id: strconv.Itoa(f.Id),
Name: fileName,
Size: f.Size,
Path: fileName,
}
files = append(files, file)
}
if len(files) == 0 {
return torrent, fmt.Errorf("no video files found")
}
cleanPath := path.Clean(data.Files[0].Name)
torrent.OriginalFilename = strings.Split(cleanPath, "/")[0]
torrent.Files = files
torrent.Debrid = r
return torrent, nil
}
func (r *Torbox) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
for {
tb, err := r.GetTorrent(torrent.Id)
torrent = tb
if err != nil || tb == nil {
return tb, err
}
status := torrent.Status
if status == "error" || status == "dead" || status == "magnet_error" {
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
} else if status == "downloaded" {
r.logger.Printf("Torrent: %s downloaded\n", torrent.Name)
if !isSymlink {
err = r.GetDownloadLinks(torrent)
if err != nil {
return torrent, err
}
}
break
} else if status == "downloading" {
if !r.DownloadUncached {
go torrent.Delete()
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
}
// Break out of the loop if the torrent is downloading.
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
break
}
}
return torrent, nil
}
func (r *Torbox) DeleteTorrent(torrent *Torrent) {
url := fmt.Sprintf("%s/api//torrents/controltorrent/%s", r.Host, torrent.Id)
payload := map[string]string{"torrent_id": torrent.Id, "action": "Delete"}
jsonPayload, _ := json.Marshal(payload)
req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(jsonPayload))
_, err := r.client.MakeRequest(req)
if err == nil {
r.logger.Printf("Torrent: %s deleted\n", torrent.Name)
} else {
r.logger.Printf("Error deleting torrent: %s", err)
}
}
func (r *Torbox) GetDownloadLinks(torrent *Torrent) error {
downloadLinks := make([]TorrentDownloadLinks, 0)
for _, file := range torrent.Files {
url := fmt.Sprintf("%s/api/torrents/requestdl/", r.Host)
query := gourl.Values{}
query.Add("torrent_id", torrent.Id)
query.Add("token", r.APIKey)
query.Add("file_id", file.Id)
url += "?" + query.Encode()
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
return err
}
var data structs.TorBoxDownloadLinksResponse
if err = json.Unmarshal(resp, &data); err != nil {
return err
}
if data.Data == nil {
return fmt.Errorf("error getting download links")
}
idx := 0
link := *data.Data
dl := TorrentDownloadLinks{
Link: link,
Filename: torrent.Files[idx].Name,
DownloadLink: link,
}
downloadLinks = append(downloadLinks, dl)
}
torrent.DownloadLinks = downloadLinks
return nil
}
func (r *Torbox) GetCheckCached() bool {
return r.CheckCached
}
func NewTorbox(dc common.DebridConfig, cache *common.Cache) *Torbox {
rl := common.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
client := common.NewRLHTTPClient(rl, headers)
logger := common.NewLogger(dc.Name, os.Stdout)
return &Torbox{
BaseDebrid: BaseDebrid{
Name: "torbox",
Host: dc.Host,
APIKey: dc.APIKey,
DownloadUncached: dc.DownloadUncached,
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger,
CheckCached: dc.CheckCached,
},
}
}

View File

@@ -2,13 +2,14 @@ package debrid
import (
"goBlack/common"
"goBlack/pkg/arr"
"os"
"path/filepath"
)
type Arr struct {
Name string `json:"name"`
Token string `json:"token"`
Token string `json:"-"`
Host string `json:"host"`
}
@@ -38,13 +39,13 @@ type Torrent struct {
Status string `json:"status"`
Added string `json:"added"`
Progress float64 `json:"progress"`
Speed int64 `json:"speed"`
Speed int `json:"speed"`
Seeders int `json:"seeders"`
Links []string `json:"links"`
DownloadLinks []TorrentDownloadLinks `json:"download_links"`
Debrid *Debrid
Arr *Arr
Debrid Service
Arr *arr.Arr
}
type TorrentDownloadLinks struct {
@@ -58,16 +59,26 @@ func (t *Torrent) GetSymlinkFolder(parent string) string {
}
func (t *Torrent) GetMountFolder(rClonePath string) string {
pathWithNoExt := common.RemoveExtension(t.OriginalFilename)
if common.FileReady(filepath.Join(rClonePath, t.OriginalFilename)) {
return t.OriginalFilename
} else if common.FileReady(filepath.Join(rClonePath, t.Filename)) {
return t.Filename
} else if common.FileReady(filepath.Join(rClonePath, pathWithNoExt)) {
return pathWithNoExt
} else {
return ""
possiblePaths := []string{
t.OriginalFilename,
t.Filename,
common.RemoveExtension(t.OriginalFilename),
}
for _, path := range possiblePaths {
if path != "" && common.FileReady(filepath.Join(rClonePath, path)) {
return path
}
}
return ""
}
func (t *Torrent) Delete() {
if t.Debrid == nil {
return
}
t.Debrid.DeleteTorrent(t)
}
type TorrentFile struct {
@@ -75,6 +86,7 @@ type TorrentFile struct {
Name string `json:"name"`
Size int64 `json:"size"`
Path string `json:"path"`
Link string `json:"link"`
}
func getEventId(eventType string) int {

View File

@@ -40,7 +40,7 @@ Loop:
fmt.Printf(" %s: transferred %d / %d bytes (%.2f%%)\n",
resp.Filename,
resp.BytesComplete(),
resp.Size,
resp.Size(),
100*resp.Progress())
case <-resp.Done:

View File

@@ -3,7 +3,9 @@ package proxy
import (
"bytes"
"cmp"
"context"
"encoding/xml"
"errors"
"fmt"
"github.com/elazarl/goproxy"
"github.com/elazarl/goproxy/ext/auth"
@@ -77,7 +79,7 @@ type Proxy struct {
logger *log.Logger
}
func NewProxy(config common.Config, deb debrid.Service, cache *common.Cache) *Proxy {
func NewProxy(config common.Config, deb *debrid.DebridService, cache *common.Cache) *Proxy {
cfg := config.Proxy
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
return &Proxy{
@@ -87,7 +89,7 @@ func NewProxy(config common.Config, deb debrid.Service, cache *common.Cache) *Pr
username: cfg.Username,
password: cfg.Password,
cachedOnly: *cfg.CachedOnly,
debrid: deb,
debrid: deb.Get(),
cache: cache,
logger: common.NewLogger("Proxy", os.Stdout),
}
@@ -308,7 +310,7 @@ func UrlMatches(re *regexp.Regexp) goproxy.ReqConditionFunc {
}
}
func (p *Proxy) Start() {
func (p *Proxy) Start(ctx context.Context) error {
username, password := p.username, p.password
proxy := goproxy.NewProxyHttpServer()
if username != "" || password != "" {
@@ -328,6 +330,17 @@ func (p *Proxy) Start() {
proxy.Verbose = p.debug
portFmt := fmt.Sprintf(":%s", p.port)
srv := &http.Server{
Addr: portFmt,
Handler: proxy,
}
p.logger.Printf("[*] Starting proxy server on %s\n", portFmt)
p.logger.Fatal(http.ListenAndServe(fmt.Sprintf("%s", portFmt), proxy))
go func() {
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
p.logger.Printf("Error starting proxy server: %v\n", err)
}
}()
<-ctx.Done()
p.logger.Println("Shutting down gracefully...")
return srv.Shutdown(context.Background())
}

View File

@@ -1,103 +0,0 @@
package qbit
import (
"bytes"
"cmp"
"encoding/json"
"goBlack/common"
"goBlack/pkg/debrid"
"net/http"
gourl "net/url"
"strconv"
"strings"
)
func (q *QBit) RefreshArr(arr *debrid.Arr) {
if arr.Token == "" || arr.Host == "" {
return
}
url, err := common.JoinURL(arr.Host, "api/v3/command")
if err != nil {
return
}
payload := map[string]string{"name": "RefreshMonitoredDownloads"}
jsonPayload, err := json.Marshal(payload)
if err != nil {
return
}
client := &http.Client{}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
if err != nil {
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-Api-Key", arr.Token)
resp, reqErr := client.Do(req)
if reqErr == nil {
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
if statusOk {
if q.debug {
q.logger.Printf("Refreshed monitored downloads for %s", cmp.Or(arr.Name, arr.Host))
}
}
}
if reqErr != nil {
}
}
func (q *QBit) GetArrHistory(arr *debrid.Arr, downloadId, eventType string) *debrid.ArrHistorySchema {
query := gourl.Values{}
if downloadId != "" {
query.Add("downloadId", downloadId)
}
query.Add("eventType", eventType)
query.Add("pageSize", "100")
url, _ := common.JoinURL(arr.Host, "history")
url += "?" + query.Encode()
resp, err := http.Get(url)
if err != nil {
return nil
}
var data *debrid.ArrHistorySchema
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
return nil
}
return data
}
func (q *QBit) MarkArrAsFailed(torrent *Torrent, arr *debrid.Arr) error {
downloadId := strings.ToUpper(torrent.Hash)
history := q.GetArrHistory(arr, downloadId, "grabbed")
if history == nil {
return nil
}
torrentId := 0
for _, record := range history.Records {
if strings.EqualFold(record.DownloadID, downloadId) {
torrentId = record.ID
break
}
}
if torrentId != 0 {
url, err := common.JoinURL(arr.Host, "history/failed/", strconv.Itoa(torrentId))
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPost, url, nil)
if err != nil {
return err
}
client := &http.Client{}
_, err = client.Do(req)
if err == nil {
q.logger.Printf("Marked torrent: %s as failed", torrent.Name)
}
}
return nil
}

View File

@@ -1,41 +0,0 @@
package qbit
import (
"github.com/go-chi/chi/v5"
"net/http"
)
func (q *QBit) AddRoutes(r chi.Router) http.Handler {
r.Route("/api/v2", func(r chi.Router) {
r.Post("/auth/login", q.handleLogin)
r.Group(func(r chi.Router) {
//r.Use(q.authMiddleware)
r.Use(q.authContext)
r.Route("/torrents", func(r chi.Router) {
r.Use(HashesCtx)
r.Get("/info", q.handleTorrentsInfo)
r.Post("/add", q.handleTorrentsAdd)
r.Post("/delete", q.handleTorrentsDelete)
r.Get("/categories", q.handleCategories)
r.Post("/createCategory", q.handleCreateCategory)
r.Get("/pause", q.handleTorrentsPause)
r.Get("/resume", q.handleTorrentsResume)
r.Get("/recheck", q.handleTorrentRecheck)
r.Get("/properties", q.handleTorrentProperties)
r.Get("/files", q.handleTorrentFiles)
})
r.Route("/app", func(r chi.Router) {
r.Get("/version", q.handleVersion)
r.Get("/webapiVersion", q.handleWebAPIVersion)
r.Get("/preferences", q.handlePreferences)
r.Get("/buildInfo", q.handleBuildInfo)
r.Get("/shutdown", q.shutdown)
})
})
})
return r
}

View File

@@ -1,40 +0,0 @@
package qbit
import (
"net/http"
"path/filepath"
)
func (q *QBit) handleVersion(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("v4.3.2"))
}
func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("2.7"))
}
func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) {
preferences := NewAppPreferences()
preferences.WebUiUsername = q.Username
preferences.SavePath = q.DownloadFolder
preferences.TempPath = filepath.Join(q.DownloadFolder, "temp")
JSONResponse(w, preferences, http.StatusOK)
}
func (q *QBit) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
res := BuildInfo{
Bitness: 64,
Boost: "1.75.0",
Libtorrent: "1.2.11.0",
Openssl: "1.1.1i",
Qt: "5.15.2",
Zlib: "1.2.11",
}
JSONResponse(w, res, http.StatusOK)
}
func (q *QBit) shutdown(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}

View File

@@ -1,9 +0,0 @@
package qbit
import (
"net/http"
)
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("Ok."))
}

View File

@@ -1,80 +1,17 @@
package qbit
import (
"cmp"
"context"
"fmt"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"goBlack/common"
"goBlack/pkg/debrid"
"log"
"net/http"
"os"
"sync"
"time"
"goBlack/pkg/qbit/server"
)
type WorkerType struct {
ticker *time.Ticker
ctx context.Context
}
type Worker struct {
types map[string]WorkerType
}
type QBit struct {
Username string `json:"username"`
Password string `json:"password"`
Port string `json:"port"`
DownloadFolder string `json:"download_folder"`
Categories []string `json:"categories"`
debrid debrid.Service
cache *common.Cache
storage *TorrentStorage
debug bool
logger *log.Logger
arrs sync.Map // host:token (Used for refreshing in worker)
RefreshInterval int
}
func NewQBit(config *common.Config, deb debrid.Service, cache *common.Cache) *QBit {
cfg := config.QBitTorrent
storage := NewTorrentStorage("torrents.json")
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8182")
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
return &QBit{
Username: cfg.Username,
Password: cfg.Password,
Port: port,
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
debrid: deb,
cache: cache,
debug: cfg.Debug,
storage: storage,
logger: common.NewLogger("QBit", os.Stdout),
arrs: sync.Map{},
RefreshInterval: refreshInterval,
func Start(ctx context.Context, config *common.Config, deb *debrid.DebridService, cache *common.Cache) error {
srv := server.NewServer(config, deb, cache)
if err := srv.Start(ctx); err != nil {
return fmt.Errorf("failed to start qbit server: %w", err)
}
}
func (q *QBit) Start() {
r := chi.NewRouter()
if q.debug {
r.Use(middleware.Logger)
}
r.Use(middleware.Recoverer)
q.AddRoutes(r)
ctx := context.Background()
go q.StartWorker(ctx)
q.logger.Printf("Starting QBit server on :%s", q.Port)
port := fmt.Sprintf(":%s", q.Port)
q.logger.Fatal(http.ListenAndServe(port, r))
return nil
}

View File

@@ -1,107 +0,0 @@
package qbit
import (
"context"
"fmt"
"github.com/google/uuid"
"goBlack/common"
"goBlack/pkg/debrid"
"io"
"mime/multipart"
"strings"
"time"
)
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
magnet, err := common.GetMagnetFromUrl(url)
if err != nil {
q.logger.Printf("Error parsing magnet link: %v\n", err)
return err
}
err = q.Process(ctx, magnet, category)
if err != nil {
q.logger.Println("Failed to process magnet:", err)
return err
}
return nil
}
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
file, _ := fileHeader.Open()
defer file.Close()
var reader io.Reader = file
magnet, err := common.GetMagnetFromFile(reader, fileHeader.Filename)
if err != nil {
q.logger.Printf("Error reading file: %s", fileHeader.Filename)
return err
}
err = q.Process(ctx, magnet, category)
if err != nil {
q.logger.Println("Failed to process torrent:", err)
return err
}
return nil
}
func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category string) error {
torrent := q.CreateTorrentFromMagnet(magnet, category)
arr := &debrid.Arr{
Name: category,
Token: ctx.Value("token").(string),
Host: ctx.Value("host").(string),
}
isSymlink := ctx.Value("isSymlink").(bool)
debridTorrent, err := debrid.ProcessQBitTorrent(q.debrid, magnet, arr, isSymlink)
if err != nil || debridTorrent == nil {
if err == nil {
err = fmt.Errorf("failed to process torrent")
}
return err
}
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
q.storage.AddOrUpdate(torrent)
go q.processFiles(torrent, debridTorrent, arr, isSymlink) // We can send async for file processing not to delay the response
return nil
}
func (q *QBit) CreateTorrentFromMagnet(magnet *common.Magnet, category string) *Torrent {
torrent := &Torrent{
ID: uuid.NewString(),
Hash: strings.ToLower(magnet.InfoHash),
Name: magnet.Name,
Size: magnet.Size,
Category: category,
State: "downloading",
MagnetUri: magnet.Link,
Tracker: "udp://tracker.opentrackr.org:1337",
UpLimit: -1,
DlLimit: -1,
AutoTmm: false,
Ratio: 1,
RatioLimit: 1,
}
return torrent
}
func (q *QBit) processFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr, isSymlink bool) {
for debridTorrent.Status != "downloaded" {
progress := debridTorrent.Progress
q.logger.Printf("RD Download Progress: %.2f%%", progress)
time.Sleep(5 * time.Second)
dbT, err := q.debrid.CheckStatus(debridTorrent, isSymlink)
if err != nil {
q.logger.Printf("Error checking status: %v", err)
q.MarkAsFailed(torrent)
q.RefreshArr(arr)
return
}
debridTorrent = dbT
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
}
if isSymlink {
q.processSymlink(torrent, debridTorrent, arr)
} else {
q.processManualFiles(torrent, debridTorrent, arr)
}
}

View File

@@ -0,0 +1,42 @@
package server
import (
"goBlack/common"
"goBlack/pkg/qbit/shared"
"net/http"
"path/filepath"
)
func (s *Server) handleVersion(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("v4.3.2"))
}
func (s *Server) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("2.7"))
}
func (s *Server) handlePreferences(w http.ResponseWriter, r *http.Request) {
preferences := shared.NewAppPreferences()
preferences.WebUiUsername = s.qbit.Username
preferences.SavePath = s.qbit.DownloadFolder
preferences.TempPath = filepath.Join(s.qbit.DownloadFolder, "temp")
common.JSONResponse(w, preferences, http.StatusOK)
}
func (s *Server) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
res := shared.BuildInfo{
Bitness: 64,
Boost: "1.75.0",
Libtorrent: "1.2.11.0",
Openssl: "1.1.1i",
Qt: "5.15.2",
Zlib: "1.2.11",
}
common.JSONResponse(w, res, http.StatusOK)
}
func (s *Server) shutdown(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}

View File

@@ -0,0 +1,7 @@
package server
import "net/http"
func (s *Server) handleLogin(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("Ok."))
}

178
pkg/qbit/server/import.go Normal file
View File

@@ -0,0 +1,178 @@
package server
import (
"encoding/json"
"errors"
"fmt"
"github.com/google/uuid"
"goBlack/common"
"goBlack/pkg/arr"
"goBlack/pkg/debrid"
"sync"
"time"
)
type ImportRequest struct {
ID string `json:"id"`
Path string `json:"path"`
URI string `json:"uri"`
Arr *arr.Arr `json:"arr"`
IsSymlink bool `json:"isSymlink"`
SeriesId int `json:"series"`
Seasons []int `json:"seasons"`
Episodes []string `json:"episodes"`
Failed bool `json:"failed"`
FailedAt time.Time `json:"failedAt"`
Reason string `json:"reason"`
Completed bool `json:"completed"`
CompletedAt time.Time `json:"completedAt"`
Async bool `json:"async"`
}
type ManualImportResponseSchema struct {
Priority string `json:"priority"`
Status string `json:"status"`
Result string `json:"result"`
Queued time.Time `json:"queued"`
Trigger string `json:"trigger"`
SendUpdatesToClient bool `json:"sendUpdatesToClient"`
UpdateScheduledTask bool `json:"updateScheduledTask"`
Id int `json:"id"`
}
func NewImportRequest(uri string, arr *arr.Arr, isSymlink bool) *ImportRequest {
return &ImportRequest{
ID: uuid.NewString(),
URI: uri,
Arr: arr,
Failed: false,
Completed: false,
Async: false,
IsSymlink: isSymlink,
}
}
func (i *ImportRequest) Fail(reason string) {
i.Failed = true
i.FailedAt = time.Now()
i.Reason = reason
}
func (i *ImportRequest) Complete() {
i.Completed = true
i.CompletedAt = time.Now()
}
func (i *ImportRequest) Process(s *Server) (err error) {
// Use this for now.
// This sends the torrent to the arr
q := s.qbit
magnet, err := common.GetMagnetFromUrl(i.URI)
torrent := q.CreateTorrentFromMagnet(magnet, i.Arr.Name)
debridTorrent, err := debrid.ProcessTorrent(q.Debrid, magnet, i.Arr, i.IsSymlink)
if err != nil || debridTorrent == nil {
if debridTorrent != nil {
go debridTorrent.Delete()
}
if err == nil {
err = fmt.Errorf("failed to process torrent")
}
return err
}
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
q.Storage.AddOrUpdate(torrent)
go q.ProcessFiles(torrent, debridTorrent, i.Arr, i.IsSymlink)
return nil
}
func (i *ImportRequest) BetaProcess(s *Server) (err error) {
// THis actually imports the torrent into the arr. Needs more work
if i.Arr == nil {
return errors.New("invalid arr")
}
q := s.qbit
magnet, err := common.GetMagnetFromUrl(i.URI)
if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err)
}
debridTorrent, err := debrid.ProcessTorrent(q.Debrid, magnet, i.Arr, true)
if err != nil || debridTorrent == nil {
if debridTorrent != nil {
go debridTorrent.Delete()
}
if err == nil {
err = fmt.Errorf("failed to process torrent")
}
return err
}
debridTorrent.Arr = i.Arr
torrentPath, err := q.ProcessSymlink(debridTorrent)
if err != nil {
return fmt.Errorf("failed to process symlink: %w", err)
}
i.Path = torrentPath
body, err := i.Arr.Import(torrentPath, i.SeriesId, i.Seasons)
if err != nil {
return fmt.Errorf("failed to import: %w", err)
}
defer body.Close()
var resp ManualImportResponseSchema
if err := json.NewDecoder(body).Decode(&resp); err != nil {
return fmt.Errorf("failed to decode response: %w", err)
}
if resp.Status != "success" {
return fmt.Errorf("failed to import: %s", resp.Result)
}
i.Complete()
return
}
type ImportStore struct {
Imports map[string]*ImportRequest
mu sync.RWMutex
}
func NewImportStore() *ImportStore {
return &ImportStore{
Imports: make(map[string]*ImportRequest),
}
}
func (s *ImportStore) AddImport(i *ImportRequest) {
s.mu.Lock()
defer s.mu.Unlock()
s.Imports[i.ID] = i
}
func (s *ImportStore) GetImport(id string) *ImportRequest {
s.mu.RLock()
defer s.mu.RUnlock()
return s.Imports[id]
}
func (s *ImportStore) GetAllImports() []*ImportRequest {
s.mu.RLock()
defer s.mu.RUnlock()
var imports []*ImportRequest
for _, i := range s.Imports {
imports = append(imports, i)
}
return imports
}
func (s *ImportStore) DeleteImport(id string) {
s.mu.Lock()
defer s.mu.Unlock()
delete(s.Imports, id)
}
func (s *ImportStore) UpdateImport(i *ImportRequest) {
s.mu.Lock()
defer s.mu.Unlock()
s.Imports[i.ID] = i
}

View File

@@ -1,29 +1,14 @@
package qbit
package server
import (
"context"
"crypto/subtle"
"encoding/base64"
"github.com/go-chi/chi/v5"
"goBlack/pkg/arr"
"net/http"
"strings"
)
func (q *QBit) authMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
user, pass, ok := r.BasicAuth()
if !ok {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
if subtle.ConstantTimeCompare([]byte(user), []byte(q.Username)) != 1 || subtle.ConstantTimeCompare([]byte(pass), []byte(q.Password)) != 1 {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
next.ServeHTTP(w, r)
})
}
func DecodeAuthHeader(header string) (string, string, error) {
encodedTokens := strings.Split(header, " ")
if len(encodedTokens) != 2 {
@@ -45,17 +30,38 @@ func DecodeAuthHeader(header string) (string, string, error) {
return host, token, nil
}
func (q *QBit) authContext(next http.Handler) http.Handler {
func (s *Server) CategoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" {
// Get from form
_ = r.ParseForm()
category = r.Form.Get("category")
if category == "" {
// Get from multipart form
_ = r.ParseMultipartForm(0)
category = r.FormValue("category")
}
}
ctx := r.Context()
ctx = context.WithValue(r.Context(), "category", category)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func (s *Server) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, token, err := DecodeAuthHeader(r.Header.Get("Authorization"))
ctx := r.Context()
if err == nil {
ctx = context.WithValue(r.Context(), "host", host)
ctx = context.WithValue(ctx, "token", token)
q.arrs.Store(host, token)
next.ServeHTTP(w, r.WithContext(ctx))
return
category := r.Context().Value("category").(string)
a := &arr.Arr{
Name: category,
}
if err == nil {
a.Host = host
a.Token = token
}
s.qbit.Arrs.AddOrUpdate(a)
ctx := context.WithValue(r.Context(), "arr", a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}

50
pkg/qbit/server/routes.go Normal file
View File

@@ -0,0 +1,50 @@
package server
import (
"github.com/go-chi/chi/v5"
"net/http"
)
func (s *Server) Routes(r chi.Router) http.Handler {
r.Route("/api/v2", func(r chi.Router) {
r.Use(s.CategoryContext)
r.Post("/auth/login", s.handleLogin)
r.Group(func(r chi.Router) {
r.Use(s.authContext)
r.Route("/torrents", func(r chi.Router) {
r.Use(HashesCtx)
r.Get("/info", s.handleTorrentsInfo)
r.Post("/add", s.handleTorrentsAdd)
r.Post("/delete", s.handleTorrentsDelete)
r.Get("/categories", s.handleCategories)
r.Post("/createCategory", s.handleCreateCategory)
r.Get("/pause", s.handleTorrentsPause)
r.Get("/resume", s.handleTorrentsResume)
r.Get("/recheck", s.handleTorrentRecheck)
r.Get("/properties", s.handleTorrentProperties)
r.Get("/files", s.handleTorrentFiles)
})
r.Route("/app", func(r chi.Router) {
r.Get("/version", s.handleVersion)
r.Get("/webapiVersion", s.handleWebAPIVersion)
r.Get("/preferences", s.handlePreferences)
r.Get("/buildInfo", s.handleBuildInfo)
r.Get("/shutdown", s.shutdown)
})
})
})
r.Get("/", s.handleHome)
r.Route("/internal", func(r chi.Router) {
r.Get("/arrs", s.handleGetArrs)
r.Get("/content", s.handleContent)
r.Get("/seasons/{contentId}", s.handleSeasons)
r.Get("/episodes/{contentId}", s.handleEpisodes)
r.Post("/add", s.handleAddContent)
r.Get("/search", s.handleSearch)
})
return r
}

66
pkg/qbit/server/server.go Normal file
View File

@@ -0,0 +1,66 @@
package server
import (
"context"
"errors"
"fmt"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"goBlack/common"
"goBlack/pkg/debrid"
"goBlack/pkg/qbit/shared"
"log"
"net/http"
"os"
"os/signal"
"syscall"
)
type Server struct {
qbit *shared.QBit
logger *log.Logger
debug bool
}
func NewServer(config *common.Config, deb *debrid.DebridService, cache *common.Cache) *Server {
logger := common.NewLogger("QBit", os.Stdout)
q := shared.NewQBit(config, deb, cache, logger)
return &Server{
qbit: q,
logger: logger,
debug: config.QBitTorrent.Debug,
}
}
func (s *Server) Start(ctx context.Context) error {
r := chi.NewRouter()
if s.debug {
r.Use(middleware.Logger)
}
r.Use(middleware.Recoverer)
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
s.Routes(r)
go s.qbit.StartWorker(context.Background())
s.logger.Printf("Starting QBit server on :%s", s.qbit.Port)
port := fmt.Sprintf(":%s", s.qbit.Port)
srv := &http.Server{
Addr: port,
Handler: r,
}
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
defer stop()
go func() {
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
fmt.Printf("Error starting server: %v\n", err)
stop()
}
}()
<-ctx.Done()
fmt.Println("Shutting down gracefully...")
return srv.Shutdown(context.Background())
}

View File

@@ -0,0 +1,334 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Debrid Manager</title>
<!-- Bootstrap CSS -->
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css" rel="stylesheet">
<!-- Bootstrap Icons -->
<link href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.7.2/font/bootstrap-icons.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/css/select2.min.css" rel="stylesheet"/>
<!-- Select2 Bootstrap 5 Theme CSS -->
<link rel="stylesheet"
href="https://cdn.jsdelivr.net/npm/select2-bootstrap-5-theme@1.3.0/dist/select2-bootstrap-5-theme.min.css"/>
<style>
.select2-container--bootstrap-5 .select2-results__option {
padding: 0.5rem;
}
.select2-result img {
border-radius: 4px;
}
.select2-container--bootstrap-5 .select2-results__option--highlighted {
background-color: #f8f9fa !important;
color: #000 !important;
}
.select2-container--bootstrap-5 .select2-results__option--selected {
background-color: #e9ecef !important;
}
</style>
</head>
<body>
<nav class="navbar navbar-expand-lg navbar-dark bg-dark">
<div class="container-fluid">
<span class="navbar-brand">Debrid Manager</span>
</div>
</nav>
<div class="container mt-4">
<div class="row">
<div class="col-md-8">
<div class="mb-3">
<label for="magnetURI" class="form-label">Magnet Link</label>
<textarea class="form-control" id="magnetURI" rows="3"></textarea>
</div>
<div class="mb-3">
<label for="selectArr" class="form-label">Enter Category</label>
<input type="email" class="form-control" id="selectArr" placeholder="Enter Category(e.g sonarr, radarr, radarr4k)">
</div>
<div class="form-check">
<input class="form-check-input" type="checkbox" value="" id="isSymlink">
<label class="form-check-label" for="isSymlink">
Not Symlink(Download real files instead of symlinks from Debrid)
</label>
</div>
<div class="mt-3">
<button class="btn btn-primary" id="addToArr">
Add to Arr
</button>
</div>
</div>
<!-- <div class="col-md-6">-->
<!-- <div class="mb-3 d-none">-->
<!-- <select class="form-select mb-3 select2-ajax" id="selectContent">-->
<!-- <option></option>-->
<!-- </select>-->
<!-- </div>-->
<!-- <div class="mb-3 d-none">-->
<!-- <select class="form-select mb-3 select2-multi" id="selectSeason" multiple-->
<!-- style="width: 100%; display: none;">-->
<!-- <option value="all">Select All</option>-->
<!-- </select>-->
<!-- </div>-->
<!-- <div class="mb-4 d-none">-->
<!-- <select class="form-select mb-3 select2-multi" id="selectEpisode" multiple-->
<!-- style="width: 100%; display: none;">-->
<!-- <option value="all">Select All</option>-->
<!-- </select>-->
<!-- </div>-->
<!-- </div>-->
</div>
</div>
<!-- Bootstrap JS and Popper.js -->
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.bundle.min.js"></script>
<!-- jQuery -->
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/js/select2.min.js"></script>
<script>
$(document).ready(function () {
let $selectArr = $('#selectArr');
let $selectContent = $('#selectContent');
let $selectSeason = $('#selectSeason');
let $selectEpisode = $('#selectEpisode');
let $addBtn = $('#addToArr');
const $contentSearch = $('#contentSearch');
const $searchResults = $('#searchResults');
let isSonarr = true;
let searchTimeout;
let selectedArr, selectedContent, selectedSeasons, selectedEpisodes;
// Initially show only selectArr, hide others
$selectSeason.hide().closest('.mb-3').hide();
$selectEpisode.hide().closest('.mb-3').hide();
// Initialize Select2
$('.select2-multi').select2({
theme: 'bootstrap-5',
width: '100%',
placeholder: 'Select options',
allowClear: true
});
// Also hide the Select2 containers
$('.select2-container--bootstrap-5').hide();
$selectContent.select2({
theme: 'bootstrap-5',
width: '100%',
placeholder: 'Search shows/movies...',
allowClear: true,
minimumInputLength: 2,
ajax: {
url: '/internal/search',
dataType: 'json',
delay: 250,
data: function (params) {
return {
term: params.term
};
},
processResults: function (data) {
return {
results: data.map(function (item) {
return {
id: item.id,
text: item.media_type === 'movie' ? item.title : item.name,
media_type: item.media_type,
poster: item.poster_path ?
'https://image.tmdb.org/t/p/w92' + item.poster_path : null,
year: item.media_type === 'movie' ?
(item.release_date ? item.release_date.substring(0, 4) : '') :
(item.first_air_date ? item.first_air_date.substring(0, 4) : '')
};
})
};
},
cache: true
},
templateResult: formatResult,
templateSelection: formatSelection
});
function formatResult(item) {
if (!item.id) return item.text;
return $(`
<div class="select2-result d-flex align-items-center gap-2">
${item.poster ?
`<img src="${item.poster}" style="width: 45px; height: 68px; object-fit: cover;">` :
'<div style="width: 45px; height: 68px; background: #eee;"></div>'
}
<div>
<div class="fw-bold">${item.text}</div>
<small class="text-muted">
${item.year}${item.media_type === 'movie' ? 'Movie' : 'TV Series'}
</small>
</div>
</div>
`);
}
function formatSelection(item) {
if (!item.id) return item.text;
return item.text + (item.year ? ` (${item.year})` : '');
}
// Handle selection
$selectContent.on('select2:select', function (e) {
selectedContent = e.params.data.id;
const mediaType = e.params.data.media_type;
if (mediaType === 'tv') {
$selectSeason.show().closest('.mb-3').show();
$selectSeason.next('.select2-container--bootstrap-5').show();
// Fetch seasons (your existing seasons fetch code)
fetch(`/internal/seasons/${selectedContent}`)
.then(response => response.json())
.then(seasons => {
$selectSeason.empty().append('<option value="all">Select All</option>');
seasons.forEach(season => {
$selectSeason.append(`<option value="${season}">Season ${season}</option>`);
});
$selectSeason.trigger('change.select2');
})
.catch(error => console.error('Error fetching seasons:', error));
} else {
// For movies, show the Add to Arr button directly
$selectSeason.hide().closest('.mb-3').hide();
$selectSeason.next('.select2-container--bootstrap-5').hide();
$selectEpisode.hide().closest('.mb-3').hide();
$selectEpisode.next('.select2-container--bootstrap-5').hide();
$addBtn.show();
}
});
// Fetch Arrs
function fetchArrs() {
fetch('/internal/arrs')
.then(response => response.json())
.then(arrs => {
$selectArr.empty().append('<option value="">Select Arr</option>');
arrs.forEach(arr => {
$selectArr.append(`<option value="${arr.name}">${arr.name}</option>`);
});
})
.catch(error => console.error('Error fetching arrs:', error));
}
// Handle content selection
$selectContent.change(function () {
selectedContent = $(this).val();
selectedArr = $selectArr.val();
if (!selectedContent) {
$selectSeason.hide().closest('.mb-3').hide();
$selectSeason.next('.select2-container--bootstrap-5').hide();
$selectEpisode.hide().closest('.mb-3').hide();
$selectEpisode.next('.select2-container--bootstrap-5').hide();
return;
}
if (isSonarr) {
$selectSeason.show().closest('.mb-3').show();
$selectSeason.next('.select2-container--bootstrap-5').show();
// Fetch seasons
fetch(`/internal/seasons/${selectedContent}`)
.then(response => response.json())
.then(seasons => {
$selectSeason.empty().append('<option value="all">Select All</option>');
seasons.forEach(season => {
$selectSeason.append(`<option value="${season}">Season ${season}</option>`);
});
$selectSeason.trigger('change.select2');
})
.catch(error => console.error('Error fetching seasons:', error));
} else {
// For Radarr, show the Add to Arr button directly
$selectSeason.hide().closest('.mb-3').hide();
$selectSeason.next('.select2-container--bootstrap-5').hide();
$selectEpisode.hide().closest('.mb-3').hide();
$selectEpisode.next('.select2-container--bootstrap-5').hide();
$addBtn.show();
}
});
// Handle season selection
$selectSeason.change(function () {
selectedSeasons = $(this).val();
console.log('Selected seasons:', selectedSeasons);
if (!selectedSeasons || selectedSeasons.includes('all')) {
$selectEpisode.hide().closest('.mb-3').hide();
$selectEpisode.next('.select2-container--bootstrap-5').hide();
$addBtn.show();
return;
}
$selectEpisode.show().closest('.mb-3').show();
$selectEpisode.next('.select2-container--bootstrap-5').show();
fetch(`/internal/episodes/${selectedContent}?seasons=${selectedSeasons.join(',')}`)
.then(response => response.json())
.then(episodes => {
$selectEpisode.empty().append('<option value="all">Select All</option>');
episodes.forEach(episode => {
$selectEpisode.append(`<option value="${episode}">Episode ${episode}</option>`);
});
$selectEpisode.trigger('change.select2');
})
.catch(error => console.error('Error fetching episodes:', error));
$addBtn.show();
});
$addBtn.click(function () {
let oldText = $(this).text();
$(this).prop('disabled', true).prepend('<span class="spinner-border spinner-border-sm me-2" role="status" aria-hidden="true"></span>');
let magnet = $('#magnetURI').val();
if (!magnet) {
$(this).prop('disabled', false).text(oldText);
alert('Please provide a magnet link or upload a torrent file!');
return;
}
let data = {
arr: $selectArr.val(),
url: magnet,
notSymlink: $('#isSymlink').is(':checked'),
};
console.log('Adding to Arr:', data);
fetch('/internal/add', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
})
.then(async response => {
if (!response.ok) {
const errorText = await response.text();
throw new Error(errorText);
}
return response.json();
})
.then(result => {
console.log('Added to Arr:', result);
$(this).prop('disabled', false).text(oldText);
alert('Added to Arr successfully!');
})
.catch(error => {
$(this).prop('disabled', false).text(oldText);
alert(`Error adding to Arr: ${error.message || error}`);
});
});
// Initial fetch of Arrs
//fetchArrs();
});
</script>
</body>
</html>

View File

@@ -1,44 +1,46 @@
package qbit
package server
import (
"context"
"goBlack/common"
"goBlack/pkg/qbit/shared"
"net/http"
"path/filepath"
"strings"
)
func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
//log all url params
ctx := r.Context()
category := strings.Trim(r.URL.Query().Get("category"), "")
category := ctx.Value("category").(string)
filter := strings.Trim(r.URL.Query().Get("filter"), "")
hashes, _ := ctx.Value("hashes").([]string)
torrents := q.storage.GetAll(category, filter, hashes)
JSONResponse(w, torrents, http.StatusOK)
torrents := s.qbit.Storage.GetAll(category, filter, hashes)
common.JSONResponse(w, torrents, http.StatusOK)
}
func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
contentType := strings.Split(r.Header.Get("Content-Type"), ";")[0]
switch contentType {
case "multipart/form-data":
err := r.ParseMultipartForm(32 << 20) // 32MB max memory
if err != nil {
q.logger.Printf("Error parsing form: %v\n", err)
s.logger.Printf("Error parsing form: %v\n", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
case "application/x-www-form-urlencoded":
err := r.ParseForm()
if err != nil {
q.logger.Printf("Error parsing form: %v\n", err)
s.logger.Printf("Error parsing form: %v\n", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
q.logger.Printf("isSymlink: %v\n", isSymlink)
s.logger.Printf("isSymlink: %v\n", isSymlink)
urls := r.FormValue("urls")
category := r.FormValue("category")
@@ -48,20 +50,19 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
}
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
for _, url := range urlList {
if err := q.AddMagnet(ctx, url, category); err != nil {
q.logger.Printf("Error adding magnet: %v\n", err)
if err := s.qbit.AddMagnet(ctx, url, category); err != nil {
s.logger.Printf("Error adding magnet: %v\n", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
if contentType == "multipart/form-data" {
if contentType == "multipart/form-data" && len(r.MultipartForm.File["torrents"]) > 0 {
files := r.MultipartForm.File["torrents"]
for _, fileHeader := range files {
if err := q.AddTorrent(ctx, fileHeader, category); err != nil {
q.logger.Printf("Error adding torrent: %v\n", err)
if err := s.qbit.AddTorrent(ctx, fileHeader, category); err != nil {
s.logger.Printf("Error adding torrent: %v\n", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
@@ -71,7 +72,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
if len(hashes) == 0 {
@@ -79,67 +80,67 @@ func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
return
}
for _, hash := range hashes {
q.storage.Delete(hash)
s.qbit.Storage.Delete(hash)
}
w.WriteHeader(http.StatusOK)
}
func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
for _, hash := range hashes {
torrent := q.storage.Get(hash)
torrent := s.qbit.Storage.Get(hash)
if torrent == nil {
continue
}
go q.PauseTorrent(torrent)
go s.qbit.PauseTorrent(torrent)
}
w.WriteHeader(http.StatusOK)
}
func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
for _, hash := range hashes {
torrent := q.storage.Get(hash)
torrent := s.qbit.Storage.Get(hash)
if torrent == nil {
continue
}
go q.ResumeTorrent(torrent)
go s.qbit.ResumeTorrent(torrent)
}
w.WriteHeader(http.StatusOK)
}
func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
for _, hash := range hashes {
torrent := q.storage.Get(hash)
torrent := s.qbit.Storage.Get(hash)
if torrent == nil {
continue
}
go q.RefreshTorrent(torrent)
go s.qbit.RefreshTorrent(torrent)
}
w.WriteHeader(http.StatusOK)
}
func (q *QBit) handleCategories(w http.ResponseWriter, r *http.Request) {
var categories = map[string]TorrentCategory{}
for _, cat := range q.Categories {
path := filepath.Join(q.DownloadFolder, cat)
categories[cat] = TorrentCategory{
func (s *Server) handleCategories(w http.ResponseWriter, r *http.Request) {
var categories = map[string]shared.TorrentCategory{}
for _, cat := range s.qbit.Categories {
path := filepath.Join(s.qbit.DownloadFolder, cat)
categories[cat] = shared.TorrentCategory{
Name: cat,
SavePath: path,
}
}
JSONResponse(w, categories, http.StatusOK)
common.JSONResponse(w, categories, http.StatusOK)
}
func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
@@ -152,24 +153,24 @@ func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
return
}
q.Categories = append(q.Categories, name)
s.qbit.Categories = append(s.qbit.Categories, name)
JSONResponse(w, nil, http.StatusOK)
common.JSONResponse(w, nil, http.StatusOK)
}
func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
hash := r.URL.Query().Get("hash")
torrent := q.storage.Get(hash)
properties := q.GetTorrentProperties(torrent)
JSONResponse(w, properties, http.StatusOK)
torrent := s.qbit.Storage.Get(hash)
properties := s.qbit.GetTorrentProperties(torrent)
common.JSONResponse(w, properties, http.StatusOK)
}
func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
hash := r.URL.Query().Get("hash")
torrent := q.storage.Get(hash)
torrent := s.qbit.Storage.Get(hash)
if torrent == nil {
return
}
files := q.GetTorrentFiles(torrent)
JSONResponse(w, files, http.StatusOK)
files := s.qbit.GetTorrentFiles(torrent)
common.JSONResponse(w, files, http.StatusOK)
}

View File

@@ -0,0 +1,114 @@
package server
import (
"embed"
"encoding/json"
"goBlack/common"
"goBlack/pkg/arr"
"html/template"
"net/http"
)
type AddRequest struct {
Url string `json:"url"`
Arr string `json:"arr"`
File string `json:"file"`
NotSymlink bool `json:"notSymlink"`
Content string `json:"content"`
Seasons []string `json:"seasons"`
Episodes []string `json:"episodes"`
}
type ArrResponse struct {
Name string `json:"name"`
Url string `json:"url"`
}
type ContentResponse struct {
ID string `json:"id"`
Title string `json:"title"`
Type string `json:"type"`
ArrID string `json:"arr"`
}
//go:embed static/index.html
var content embed.FS
func (s *Server) handleHome(w http.ResponseWriter, r *http.Request) {
tmpl, err := template.ParseFS(content, "static/index.html")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = tmpl.Execute(w, nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handleGetArrs(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
common.JSONResponse(w, s.qbit.Arrs.GetAll(), http.StatusOK)
}
func (s *Server) handleContent(w http.ResponseWriter, r *http.Request) {
arrName := r.URL.Query().Get("arr")
_arr := s.qbit.Arrs.Get(arrName)
if _arr == nil {
http.Error(w, "Invalid arr", http.StatusBadRequest)
return
}
contents := _arr.GetContents()
w.Header().Set("Content-Type", "application/json")
common.JSONResponse(w, contents, http.StatusOK)
}
func (s *Server) handleSearch(w http.ResponseWriter, r *http.Request) {
// arrName := r.URL.Query().Get("arr")
term := r.URL.Query().Get("term")
results, err := arr.SearchTMDB(term)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Header().Set("Content-Type", "application/json")
common.JSONResponse(w, results.Results, http.StatusOK)
}
func (s *Server) handleSeasons(w http.ResponseWriter, r *http.Request) {
// arrId := r.URL.Query().Get("arrId")
// contentId := chi.URLParam(r, "contentId")
seasons := []string{"Season 1", "Season 2", "Season 3", "Season 4", "Season 5"}
w.Header().Set("Content-Type", "application/json")
common.JSONResponse(w, seasons, http.StatusOK)
}
func (s *Server) handleEpisodes(w http.ResponseWriter, r *http.Request) {
// arrId := r.URL.Query().Get("arrId")
// contentId := chi.URLParam(r, "contentId")
// seasonIds := strings.Split(r.URL.Query().Get("seasons"), ",")
episodes := []string{"Episode 1", "Episode 2", "Episode 3", "Episode 4", "Episode 5"}
w.Header().Set("Content-Type", "application/json")
common.JSONResponse(w, episodes, http.StatusOK)
}
func (s *Server) handleAddContent(w http.ResponseWriter, r *http.Request) {
var req AddRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
_arr := s.qbit.Arrs.Get(req.Arr)
if _arr == nil {
_arr = arr.NewArr(req.Arr, "", "", arr.Sonarr)
}
importReq := NewImportRequest(req.Url, _arr, !req.NotSymlink)
err := importReq.Process(s)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
common.JSONResponse(w, importReq, http.StatusOK)
}

View File

@@ -1,29 +1,27 @@
package qbit
package shared
import (
"fmt"
"goBlack/common"
"goBlack/pkg/debrid"
"goBlack/pkg/qbit/downloaders"
"goBlack/pkg/downloaders"
"os"
"path/filepath"
"sync"
"time"
)
func (q *QBit) processManualFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr) {
func (q *QBit) processManualFiles(debridTorrent *debrid.Torrent) (string, error) {
q.logger.Printf("Downloading %d files...", len(debridTorrent.DownloadLinks))
torrentPath := common.RemoveExtension(debridTorrent.OriginalFilename)
parent := common.RemoveInvalidChars(filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath))
err := os.MkdirAll(parent, os.ModePerm)
if err != nil {
q.logger.Printf("Failed to create directory: %s\n", parent)
q.MarkAsFailed(torrent)
return
// add previous error to the error and return
return "", fmt.Errorf("failed to create directory: %s: %v", parent, err)
}
torrent.TorrentPath = torrentPath
q.downloadFiles(debridTorrent, parent)
q.UpdateTorrent(torrent, debridTorrent)
q.RefreshArr(arr)
return torrentPath, nil
}
func (q *QBit) downloadFiles(debridTorrent *debrid.Torrent, parent string) {
@@ -52,26 +50,21 @@ func (q *QBit) downloadFiles(debridTorrent *debrid.Torrent, parent string) {
q.logger.Printf("Downloaded all files for %s\n", debridTorrent.Name)
}
func (q *QBit) processSymlink(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr) {
func (q *QBit) ProcessSymlink(debridTorrent *debrid.Torrent) (string, error) {
var wg sync.WaitGroup
files := debridTorrent.Files
ready := make(chan debrid.TorrentFile, len(files))
q.logger.Printf("Checking %d files...", len(files))
rCloneBase := q.debrid.GetMountPath()
rCloneBase := debridTorrent.Debrid.GetMountPath()
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
if err != nil {
q.MarkAsFailed(torrent)
q.logger.Printf("Error: %v", err)
return
return "", fmt.Errorf("failed to get torrent path: %v", err)
}
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath) // /mnt/symlinks/{category}/MyTVShow/
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
if err != nil {
q.logger.Printf("Failed to create directory: %s\n", torrentSymlinkPath)
q.MarkAsFailed(torrent)
return
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
}
torrentRclonePath := filepath.Join(rCloneBase, torrentPath)
for _, file := range files {
@@ -88,32 +81,16 @@ func (q *QBit) processSymlink(torrent *Torrent, debridTorrent *debrid.Torrent, a
q.logger.Println("File is ready:", f.Path)
q.createSymLink(torrentSymlinkPath, torrentRclonePath, f)
}
// Update the torrent when all files are ready
torrent.TorrentPath = filepath.Base(torrentPath) // Quite important
q.UpdateTorrent(torrent, debridTorrent)
q.RefreshArr(arr)
return torrentPath, nil
}
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
pathChan := make(chan string)
errChan := make(chan error)
go func() {
for {
torrentPath := debridTorrent.GetMountFolder(rclonePath)
if torrentPath != "" {
pathChan <- torrentPath
return
}
time.Sleep(time.Second)
for {
torrentPath := debridTorrent.GetMountFolder(rclonePath)
if torrentPath != "" {
return torrentPath, nil
}
}()
select {
case path := <-pathChan:
return path, nil
case err := <-errChan:
return "", err
time.Sleep(time.Second)
}
}

46
pkg/qbit/shared/qbit.go Normal file
View File

@@ -0,0 +1,46 @@
package shared
import (
"cmp"
"goBlack/common"
"goBlack/pkg/arr"
"goBlack/pkg/debrid"
"log"
"os"
)
type QBit struct {
Username string `json:"username"`
Password string `json:"password"`
Port string `json:"port"`
DownloadFolder string `json:"download_folder"`
Categories []string `json:"categories"`
Debrid *debrid.DebridService
cache *common.Cache
Storage *TorrentStorage
debug bool
logger *log.Logger
Arrs *arr.Storage
RefreshInterval int
}
func NewQBit(config *common.Config, deb *debrid.DebridService, cache *common.Cache, logger *log.Logger) *QBit {
cfg := config.QBitTorrent
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8182")
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
arrs := arr.NewStorage()
return &QBit{
Username: cfg.Username,
Password: cfg.Password,
Port: port,
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
Debrid: deb,
cache: cache,
debug: cfg.Debug,
Storage: NewTorrentStorage("torrents.json"),
logger: logger,
Arrs: arrs,
RefreshInterval: refreshInterval,
}
}

View File

@@ -1,4 +1,4 @@
package qbit
package shared
import (
"encoding/json"
@@ -122,7 +122,10 @@ func (ts *TorrentStorage) Delete(hash string) {
}
// Delete the torrent folder
if torrent.ContentPath != "" {
os.RemoveAll(torrent.ContentPath)
err := os.RemoveAll(torrent.ContentPath)
if err != nil {
return
}
}
}

View File

@@ -1,4 +1,4 @@
package qbit
package shared
import "goBlack/pkg/debrid"
@@ -14,40 +14,40 @@ type BuildInfo struct {
type AppPreferences struct {
AddTrackers string `json:"add_trackers"`
AddTrackersEnabled bool `json:"add_trackers_enabled"`
AltDlLimit int64 `json:"alt_dl_limit"`
AltUpLimit int64 `json:"alt_up_limit"`
AltDlLimit int `json:"alt_dl_limit"`
AltUpLimit int `json:"alt_up_limit"`
AlternativeWebuiEnabled bool `json:"alternative_webui_enabled"`
AlternativeWebuiPath string `json:"alternative_webui_path"`
AnnounceIp string `json:"announce_ip"`
AnnounceToAllTiers bool `json:"announce_to_all_tiers"`
AnnounceToAllTrackers bool `json:"announce_to_all_trackers"`
AnonymousMode bool `json:"anonymous_mode"`
AsyncIoThreads int64 `json:"async_io_threads"`
AutoDeleteMode int64 `json:"auto_delete_mode"`
AsyncIoThreads int `json:"async_io_threads"`
AutoDeleteMode int `json:"auto_delete_mode"`
AutoTmmEnabled bool `json:"auto_tmm_enabled"`
AutorunEnabled bool `json:"autorun_enabled"`
AutorunProgram string `json:"autorun_program"`
BannedIPs string `json:"banned_IPs"`
BittorrentProtocol int64 `json:"bittorrent_protocol"`
BittorrentProtocol int `json:"bittorrent_protocol"`
BypassAuthSubnetWhitelist string `json:"bypass_auth_subnet_whitelist"`
BypassAuthSubnetWhitelistEnabled bool `json:"bypass_auth_subnet_whitelist_enabled"`
BypassLocalAuth bool `json:"bypass_local_auth"`
CategoryChangedTmmEnabled bool `json:"category_changed_tmm_enabled"`
CheckingMemoryUse int64 `json:"checking_memory_use"`
CheckingMemoryUse int `json:"checking_memory_use"`
CreateSubfolderEnabled bool `json:"create_subfolder_enabled"`
CurrentInterfaceAddress string `json:"current_interface_address"`
CurrentNetworkInterface string `json:"current_network_interface"`
Dht bool `json:"dht"`
DiskCache int64 `json:"disk_cache"`
DiskCacheTtl int64 `json:"disk_cache_ttl"`
DlLimit int64 `json:"dl_limit"`
DiskCache int `json:"disk_cache"`
DiskCacheTtl int `json:"disk_cache_ttl"`
DlLimit int `json:"dl_limit"`
DontCountSlowTorrents bool `json:"dont_count_slow_torrents"`
DyndnsDomain string `json:"dyndns_domain"`
DyndnsEnabled bool `json:"dyndns_enabled"`
DyndnsPassword string `json:"dyndns_password"`
DyndnsService int64 `json:"dyndns_service"`
DyndnsService int `json:"dyndns_service"`
DyndnsUsername string `json:"dyndns_username"`
EmbeddedTrackerPort int64 `json:"embedded_tracker_port"`
EmbeddedTrackerPort int `json:"embedded_tracker_port"`
EnableCoalesceReadWrite bool `json:"enable_coalesce_read_write"`
EnableEmbeddedTracker bool `json:"enable_embedded_tracker"`
EnableMultiConnectionsFromSameIp bool `json:"enable_multi_connections_from_same_ip"`
@@ -55,10 +55,10 @@ type AppPreferences struct {
EnablePieceExtentAffinity bool `json:"enable_piece_extent_affinity"`
EnableSuperSeeding bool `json:"enable_super_seeding"`
EnableUploadSuggestions bool `json:"enable_upload_suggestions"`
Encryption int64 `json:"encryption"`
Encryption int `json:"encryption"`
ExportDir string `json:"export_dir"`
ExportDirFin string `json:"export_dir_fin"`
FilePoolSize int64 `json:"file_pool_size"`
FilePoolSize int `json:"file_pool_size"`
IncompleteFilesExt bool `json:"incomplete_files_ext"`
IpFilterEnabled bool `json:"ip_filter_enabled"`
IpFilterPath string `json:"ip_filter_path"`
@@ -66,7 +66,7 @@ type AppPreferences struct {
LimitLanPeers bool `json:"limit_lan_peers"`
LimitTcpOverhead bool `json:"limit_tcp_overhead"`
LimitUtpRate bool `json:"limit_utp_rate"`
ListenPort int64 `json:"listen_port"`
ListenPort int `json:"listen_port"`
Locale string `json:"locale"`
Lsd bool `json:"lsd"`
MailNotificationAuthEnabled bool `json:"mail_notification_auth_enabled"`
@@ -77,79 +77,79 @@ type AppPreferences struct {
MailNotificationSmtp string `json:"mail_notification_smtp"`
MailNotificationSslEnabled bool `json:"mail_notification_ssl_enabled"`
MailNotificationUsername string `json:"mail_notification_username"`
MaxActiveDownloads int64 `json:"max_active_downloads"`
MaxActiveTorrents int64 `json:"max_active_torrents"`
MaxActiveUploads int64 `json:"max_active_uploads"`
MaxConnec int64 `json:"max_connec"`
MaxConnecPerTorrent int64 `json:"max_connec_per_torrent"`
MaxRatio int64 `json:"max_ratio"`
MaxRatioAct int64 `json:"max_ratio_act"`
MaxActiveDownloads int `json:"max_active_downloads"`
MaxActiveTorrents int `json:"max_active_torrents"`
MaxActiveUploads int `json:"max_active_uploads"`
MaxConnec int `json:"max_connec"`
MaxConnecPerTorrent int `json:"max_connec_per_torrent"`
MaxRatio int `json:"max_ratio"`
MaxRatioAct int `json:"max_ratio_act"`
MaxRatioEnabled bool `json:"max_ratio_enabled"`
MaxSeedingTime int64 `json:"max_seeding_time"`
MaxSeedingTime int `json:"max_seeding_time"`
MaxSeedingTimeEnabled bool `json:"max_seeding_time_enabled"`
MaxUploads int64 `json:"max_uploads"`
MaxUploadsPerTorrent int64 `json:"max_uploads_per_torrent"`
OutgoingPortsMax int64 `json:"outgoing_ports_max"`
OutgoingPortsMin int64 `json:"outgoing_ports_min"`
MaxUploads int `json:"max_uploads"`
MaxUploadsPerTorrent int `json:"max_uploads_per_torrent"`
OutgoingPortsMax int `json:"outgoing_ports_max"`
OutgoingPortsMin int `json:"outgoing_ports_min"`
Pex bool `json:"pex"`
PreallocateAll bool `json:"preallocate_all"`
ProxyAuthEnabled bool `json:"proxy_auth_enabled"`
ProxyIp string `json:"proxy_ip"`
ProxyPassword string `json:"proxy_password"`
ProxyPeerConnections bool `json:"proxy_peer_connections"`
ProxyPort int64 `json:"proxy_port"`
ProxyPort int `json:"proxy_port"`
ProxyTorrentsOnly bool `json:"proxy_torrents_only"`
ProxyType int64 `json:"proxy_type"`
ProxyType int `json:"proxy_type"`
ProxyUsername string `json:"proxy_username"`
QueueingEnabled bool `json:"queueing_enabled"`
RandomPort bool `json:"random_port"`
RecheckCompletedTorrents bool `json:"recheck_completed_torrents"`
ResolvePeerCountries bool `json:"resolve_peer_countries"`
RssAutoDownloadingEnabled bool `json:"rss_auto_downloading_enabled"`
RssMaxArticlesPerFeed int64 `json:"rss_max_articles_per_feed"`
RssMaxArticlesPerFeed int `json:"rss_max_articles_per_feed"`
RssProcessingEnabled bool `json:"rss_processing_enabled"`
RssRefreshInterval int64 `json:"rss_refresh_interval"`
RssRefreshInterval int `json:"rss_refresh_interval"`
SavePath string `json:"save_path"`
SavePathChangedTmmEnabled bool `json:"save_path_changed_tmm_enabled"`
SaveResumeDataInterval int64 `json:"save_resume_data_interval"`
SaveResumeDataInterval int `json:"save_resume_data_interval"`
ScanDirs ScanDirs `json:"scan_dirs"`
ScheduleFromHour int64 `json:"schedule_from_hour"`
ScheduleFromMin int64 `json:"schedule_from_min"`
ScheduleToHour int64 `json:"schedule_to_hour"`
ScheduleToMin int64 `json:"schedule_to_min"`
SchedulerDays int64 `json:"scheduler_days"`
ScheduleFromHour int `json:"schedule_from_hour"`
ScheduleFromMin int `json:"schedule_from_min"`
ScheduleToHour int `json:"schedule_to_hour"`
ScheduleToMin int `json:"schedule_to_min"`
SchedulerDays int `json:"scheduler_days"`
SchedulerEnabled bool `json:"scheduler_enabled"`
SendBufferLowWatermark int64 `json:"send_buffer_low_watermark"`
SendBufferWatermark int64 `json:"send_buffer_watermark"`
SendBufferWatermarkFactor int64 `json:"send_buffer_watermark_factor"`
SlowTorrentDlRateThreshold int64 `json:"slow_torrent_dl_rate_threshold"`
SlowTorrentInactiveTimer int64 `json:"slow_torrent_inactive_timer"`
SlowTorrentUlRateThreshold int64 `json:"slow_torrent_ul_rate_threshold"`
SocketBacklogSize int64 `json:"socket_backlog_size"`
SendBufferLowWatermark int `json:"send_buffer_low_watermark"`
SendBufferWatermark int `json:"send_buffer_watermark"`
SendBufferWatermarkFactor int `json:"send_buffer_watermark_factor"`
SlowTorrentDlRateThreshold int `json:"slow_torrent_dl_rate_threshold"`
SlowTorrentInactiveTimer int `json:"slow_torrent_inactive_timer"`
SlowTorrentUlRateThreshold int `json:"slow_torrent_ul_rate_threshold"`
SocketBacklogSize int `json:"socket_backlog_size"`
StartPausedEnabled bool `json:"start_paused_enabled"`
StopTrackerTimeout int64 `json:"stop_tracker_timeout"`
StopTrackerTimeout int `json:"stop_tracker_timeout"`
TempPath string `json:"temp_path"`
TempPathEnabled bool `json:"temp_path_enabled"`
TorrentChangedTmmEnabled bool `json:"torrent_changed_tmm_enabled"`
UpLimit int64 `json:"up_limit"`
UploadChokingAlgorithm int64 `json:"upload_choking_algorithm"`
UploadSlotsBehavior int64 `json:"upload_slots_behavior"`
UpLimit int `json:"up_limit"`
UploadChokingAlgorithm int `json:"upload_choking_algorithm"`
UploadSlotsBehavior int `json:"upload_slots_behavior"`
Upnp bool `json:"upnp"`
UpnpLeaseDuration int64 `json:"upnp_lease_duration"`
UpnpLeaseDuration int `json:"upnp_lease_duration"`
UseHttps bool `json:"use_https"`
UtpTcpMixedMode int64 `json:"utp_tcp_mixed_mode"`
UtpTcpMixedMode int `json:"utp_tcp_mixed_mode"`
WebUiAddress string `json:"web_ui_address"`
WebUiBanDuration int64 `json:"web_ui_ban_duration"`
WebUiBanDuration int `json:"web_ui_ban_duration"`
WebUiClickjackingProtectionEnabled bool `json:"web_ui_clickjacking_protection_enabled"`
WebUiCsrfProtectionEnabled bool `json:"web_ui_csrf_protection_enabled"`
WebUiDomainList string `json:"web_ui_domain_list"`
WebUiHostHeaderValidationEnabled bool `json:"web_ui_host_header_validation_enabled"`
WebUiHttpsCertPath string `json:"web_ui_https_cert_path"`
WebUiHttpsKeyPath string `json:"web_ui_https_key_path"`
WebUiMaxAuthFailCount int64 `json:"web_ui_max_auth_fail_count"`
WebUiPort int64 `json:"web_ui_port"`
WebUiMaxAuthFailCount int `json:"web_ui_max_auth_fail_count"`
WebUiPort int `json:"web_ui_port"`
WebUiSecureCookieEnabled bool `json:"web_ui_secure_cookie_enabled"`
WebUiSessionTimeout int64 `json:"web_ui_session_timeout"`
WebUiSessionTimeout int `json:"web_ui_session_timeout"`
WebUiUpnp bool `json:"web_ui_upnp"`
WebUiUsername string `json:"web_ui_username"`
WebUiPassword string `json:"web_ui_password"`
@@ -179,44 +179,44 @@ type Torrent struct {
Availability float64 `json:"availability,omitempty"`
Category string `json:"category,omitempty"`
Completed int64 `json:"completed"`
CompletionOn int64 `json:"completion_on,omitempty"`
CompletionOn int `json:"completion_on,omitempty"`
ContentPath string `json:"content_path"`
DlLimit int64 `json:"dl_limit"`
Dlspeed int64 `json:"dlspeed"`
DlLimit int `json:"dl_limit"`
Dlspeed int `json:"dlspeed"`
Downloaded int64 `json:"downloaded"`
DownloadedSession int64 `json:"downloaded_session"`
Eta int64 `json:"eta"`
Eta int `json:"eta"`
FlPiecePrio bool `json:"f_l_piece_prio,omitempty"`
ForceStart bool `json:"force_start,omitempty"`
Hash string `json:"hash"`
LastActivity int64 `json:"last_activity,omitempty"`
MagnetUri string `json:"magnet_uri,omitempty"`
MaxRatio int64 `json:"max_ratio,omitempty"`
MaxSeedingTime int64 `json:"max_seeding_time,omitempty"`
MaxRatio int `json:"max_ratio,omitempty"`
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
Name string `json:"name,omitempty"`
NumComplete int64 `json:"num_complete,omitempty"`
NumIncomplete int64 `json:"num_incomplete,omitempty"`
NumLeechs int64 `json:"num_leechs,omitempty"`
NumSeeds int64 `json:"num_seeds,omitempty"`
Priority int64 `json:"priority,omitempty"`
Progress float32 `json:"progress"`
Ratio int64 `json:"ratio,omitempty"`
RatioLimit int64 `json:"ratio_limit,omitempty"`
NumComplete int `json:"num_complete,omitempty"`
NumIncomplete int `json:"num_incomplete,omitempty"`
NumLeechs int `json:"num_leechs,omitempty"`
NumSeeds int `json:"num_seeds,omitempty"`
Priority int `json:"priority,omitempty"`
Progress float64 `json:"progress"`
Ratio int `json:"ratio,omitempty"`
RatioLimit int `json:"ratio_limit,omitempty"`
SavePath string `json:"save_path"`
SeedingTimeLimit int64 `json:"seeding_time_limit,omitempty"`
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
SeenComplete int64 `json:"seen_complete,omitempty"`
SeqDl bool `json:"seq_dl"`
Size int64 `json:"size,omitempty"`
State string `json:"state,omitempty"`
SuperSeeding bool `json:"super_seeding"`
Tags string `json:"tags,omitempty"`
TimeActive int64 `json:"time_active,omitempty"`
TimeActive int `json:"time_active,omitempty"`
TotalSize int64 `json:"total_size,omitempty"`
Tracker string `json:"tracker,omitempty"`
UpLimit int64 `json:"up_limit,omitempty"`
Uploaded int64 `json:"uploaded,omitempty"`
UploadedSession int64 `json:"uploaded_session,omitempty"`
Upspeed int64 `json:"upspeed,omitempty"`
Upspeed int `json:"upspeed,omitempty"`
}
func (t *Torrent) IsReady() bool {
@@ -229,24 +229,24 @@ type TorrentProperties struct {
CompletionDate int64 `json:"completion_date,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
CreationDate int64 `json:"creation_date,omitempty"`
DlLimit int64 `json:"dl_limit,omitempty"`
DlSpeed int64 `json:"dl_speed,omitempty"`
DlSpeedAvg int64 `json:"dl_speed_avg,omitempty"`
Eta int64 `json:"eta,omitempty"`
DlLimit int `json:"dl_limit,omitempty"`
DlSpeed int `json:"dl_speed,omitempty"`
DlSpeedAvg int `json:"dl_speed_avg,omitempty"`
Eta int `json:"eta,omitempty"`
LastSeen int64 `json:"last_seen,omitempty"`
NbConnections int64 `json:"nb_connections,omitempty"`
NbConnectionsLimit int64 `json:"nb_connections_limit,omitempty"`
Peers int64 `json:"peers,omitempty"`
PeersTotal int64 `json:"peers_total,omitempty"`
NbConnections int `json:"nb_connections,omitempty"`
NbConnectionsLimit int `json:"nb_connections_limit,omitempty"`
Peers int `json:"peers,omitempty"`
PeersTotal int `json:"peers_total,omitempty"`
PieceSize int64 `json:"piece_size,omitempty"`
PiecesHave int64 `json:"pieces_have,omitempty"`
PiecesNum int64 `json:"pieces_num,omitempty"`
Reannounce int64 `json:"reannounce,omitempty"`
Reannounce int `json:"reannounce,omitempty"`
SavePath string `json:"save_path,omitempty"`
SeedingTime int64 `json:"seeding_time,omitempty"`
Seeds int64 `json:"seeds,omitempty"`
SeedsTotal int64 `json:"seeds_total,omitempty"`
ShareRatio int64 `json:"share_ratio,omitempty"`
SeedingTime int `json:"seeding_time,omitempty"`
Seeds int `json:"seeds,omitempty"`
SeedsTotal int `json:"seeds_total,omitempty"`
ShareRatio int `json:"share_ratio,omitempty"`
TimeElapsed int64 `json:"time_elapsed,omitempty"`
TotalDownloaded int64 `json:"total_downloaded,omitempty"`
TotalDownloadedSession int64 `json:"total_downloaded_session,omitempty"`
@@ -254,19 +254,19 @@ type TorrentProperties struct {
TotalUploaded int64 `json:"total_uploaded,omitempty"`
TotalUploadedSession int64 `json:"total_uploaded_session,omitempty"`
TotalWasted int64 `json:"total_wasted,omitempty"`
UpLimit int64 `json:"up_limit,omitempty"`
UpSpeed int64 `json:"up_speed,omitempty"`
UpSpeedAvg int64 `json:"up_speed_avg,omitempty"`
UpLimit int `json:"up_limit,omitempty"`
UpSpeed int `json:"up_speed,omitempty"`
UpSpeedAvg int `json:"up_speed_avg,omitempty"`
}
type TorrentFile struct {
Index int `json:"index,omitempty"`
Name string `json:"name,omitempty"`
Size int64 `json:"size,omitempty"`
Progress int64 `json:"progress,omitempty"`
Priority int64 `json:"priority,omitempty"`
Progress int `json:"progress,omitempty"`
Priority int `json:"priority,omitempty"`
IsSeed bool `json:"is_seed,omitempty"`
PieceRange []int64 `json:"piece_range,omitempty"`
PieceRange []int `json:"piece_range,omitempty"`
Availability float64 `json:"availability,omitempty"`
}

274
pkg/qbit/shared/torrent.go Normal file
View File

@@ -0,0 +1,274 @@
package shared
import (
"cmp"
"context"
"fmt"
"github.com/google/uuid"
"goBlack/common"
"goBlack/pkg/arr"
"goBlack/pkg/debrid"
"io"
"mime/multipart"
"os"
"path/filepath"
"strings"
"time"
)
// All torrent related helpers goes here
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
magnet, err := common.GetMagnetFromUrl(url)
if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err)
}
err = q.Process(ctx, magnet, category)
if err != nil {
return fmt.Errorf("failed to process torrent: %w", err)
}
return nil
}
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
file, _ := fileHeader.Open()
defer file.Close()
var reader io.Reader = file
magnet, err := common.GetMagnetFromFile(reader, fileHeader.Filename)
if err != nil {
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
}
err = q.Process(ctx, magnet, category)
if err != nil {
return fmt.Errorf("failed to process torrent: %w", err)
}
return nil
}
func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category string) error {
torrent := q.CreateTorrentFromMagnet(magnet, category)
a, ok := ctx.Value("arr").(*arr.Arr)
if !ok {
return fmt.Errorf("arr not found in context")
}
isSymlink := ctx.Value("isSymlink").(bool)
debridTorrent, err := debrid.ProcessTorrent(q.Debrid, magnet, a, isSymlink)
if err != nil || debridTorrent == nil {
if debridTorrent != nil {
go debridTorrent.Delete()
}
if err == nil {
err = fmt.Errorf("failed to process torrent")
}
return err
}
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
q.Storage.AddOrUpdate(torrent)
go q.ProcessFiles(torrent, debridTorrent, a, isSymlink) // We can send async for file processing not to delay the response
return nil
}
func (q *QBit) CreateTorrentFromMagnet(magnet *common.Magnet, category string) *Torrent {
torrent := &Torrent{
ID: uuid.NewString(),
Hash: strings.ToLower(magnet.InfoHash),
Name: magnet.Name,
Size: magnet.Size,
Category: category,
State: "downloading",
MagnetUri: magnet.Link,
Tracker: "udp://tracker.opentrackr.org:1337",
UpLimit: -1,
DlLimit: -1,
AutoTmm: false,
Ratio: 1,
RatioLimit: 1,
}
return torrent
}
func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *arr.Arr, isSymlink bool) {
for debridTorrent.Status != "downloaded" {
progress := debridTorrent.Progress
q.logger.Printf("%s Download Progress: %.2f%%", debridTorrent.Debrid.GetName(), progress)
time.Sleep(5 * time.Second)
dbT, err := debridTorrent.Debrid.CheckStatus(debridTorrent, isSymlink)
if err != nil {
q.logger.Printf("Error checking status: %v", err)
go debridTorrent.Delete()
q.MarkAsFailed(torrent)
_ = arr.Refresh()
return
}
debridTorrent = dbT
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
}
var (
torrentPath string
err error
)
debridTorrent.Arr = arr
if isSymlink {
torrentPath, err = q.ProcessSymlink(debridTorrent)
} else {
torrentPath, err = q.processManualFiles(debridTorrent)
}
if err != nil {
q.MarkAsFailed(torrent)
go debridTorrent.Delete()
q.logger.Printf("Error: %v", err)
return
}
torrent.TorrentPath = filepath.Base(torrentPath)
q.UpdateTorrent(torrent, debridTorrent)
_ = arr.Refresh()
}
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
t.State = "error"
q.Storage.AddOrUpdate(t)
return t
}
func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
if err != nil {
addedOn = time.Now()
}
totalSize := debridTorrent.Bytes
progress := cmp.Or(debridTorrent.Progress, 100)
progress = progress / 100.0
sizeCompleted := int64(float64(totalSize) * progress)
var speed int
if debridTorrent.Speed != 0 {
speed = debridTorrent.Speed
}
var eta int
if speed != 0 {
eta = int(totalSize-sizeCompleted) / speed
}
t.ID = debridTorrent.Id
t.Name = debridTorrent.Name
t.AddedOn = addedOn.Unix()
t.DebridTorrent = debridTorrent
t.Size = totalSize
t.Completed = sizeCompleted
t.Downloaded = sizeCompleted
t.DownloadedSession = sizeCompleted
t.Uploaded = sizeCompleted
t.UploadedSession = sizeCompleted
t.AmountLeft = totalSize - sizeCompleted
t.Progress = progress
t.Eta = eta
t.Dlspeed = speed
t.Upspeed = speed
t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
return t
}
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
db := debridTorrent.Debrid
rcLoneMount := db.GetMountPath()
if debridTorrent == nil && t.ID != "" {
debridTorrent, _ = db.GetTorrent(t.ID)
}
if debridTorrent == nil {
q.logger.Printf("Torrent with ID %s not found in %s", t.ID, db.GetName())
return t
}
if debridTorrent.Status != "downloaded" {
debridTorrent, _ = db.GetTorrent(t.ID)
}
if t.TorrentPath == "" {
t.TorrentPath = filepath.Base(debridTorrent.GetMountFolder(rcLoneMount))
}
savePath := filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
torrentPath := filepath.Join(savePath, t.TorrentPath) + string(os.PathSeparator)
t = q.UpdateTorrentMin(t, debridTorrent)
t.ContentPath = torrentPath
if t.IsReady() {
t.State = "pausedUP"
q.Storage.Update(t)
return t
}
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if t.IsReady() {
t.State = "pausedUP"
q.Storage.Update(t)
return t
}
updatedT := q.UpdateTorrent(t, debridTorrent)
t = updatedT
case <-time.After(10 * time.Minute): // Add a timeout
return t
}
}
}
func (q *QBit) ResumeTorrent(t *Torrent) bool {
return true
}
func (q *QBit) PauseTorrent(t *Torrent) bool {
return true
}
func (q *QBit) RefreshTorrent(t *Torrent) bool {
return true
}
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
return &TorrentProperties{
AdditionDate: t.AddedOn,
Comment: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
CreatedBy: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
CreationDate: t.AddedOn,
DlLimit: -1,
UpLimit: -1,
DlSpeed: t.Dlspeed,
UpSpeed: t.Upspeed,
TotalSize: t.Size,
TotalUploaded: t.Uploaded,
TotalDownloaded: t.Downloaded,
TotalUploadedSession: t.UploadedSession,
TotalDownloadedSession: t.DownloadedSession,
LastSeen: time.Now().Unix(),
NbConnectionsLimit: 100,
Peers: 0,
PeersTotal: 2,
SeedingTime: 1,
Seeds: 100,
ShareRatio: 100,
}
}
func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
files := make([]*TorrentFile, 0)
if t.DebridTorrent == nil {
return files
}
for index, file := range t.DebridTorrent.Files {
files = append(files, &TorrentFile{
Index: index,
Name: file.Path,
Size: file.Size,
})
}
return files
}

View File

@@ -1,29 +1,13 @@
package qbit
package shared
import (
"encoding/json"
"goBlack/common"
"goBlack/pkg/debrid"
"net/http"
"path/filepath"
"sync"
"time"
)
//func generateSID() (string, error) {
// bytes := make([]byte, sidLength)
// if _, err := rand.Read(bytes); err != nil {
// return "", err
// }
// return hex.EncodeToString(bytes), nil
//}
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
json.NewEncoder(w).Encode(data)
}
func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.TorrentFile, ready chan<- debrid.TorrentFile) {
defer wg.Done()
ticker := time.NewTicker(1 * time.Second) // Check every second

View File

@@ -1,8 +1,7 @@
package qbit
package shared
import (
"context"
"goBlack/pkg/debrid"
"time"
)
@@ -20,7 +19,7 @@ func (q *QBit) StartRefreshWorker(ctx context.Context) {
q.logger.Println("Qbit Refresh Worker stopped")
return
case <-refreshTicker.C:
torrents := q.storage.GetAll("", "", nil)
torrents := q.Storage.GetAll("", "", nil)
if len(torrents) > 0 {
q.RefreshArrs()
}
@@ -29,18 +28,10 @@ func (q *QBit) StartRefreshWorker(ctx context.Context) {
}
func (q *QBit) RefreshArrs() {
q.arrs.Range(func(key, value interface{}) bool {
host, ok := key.(string)
token, ok2 := value.(string)
if !ok || !ok2 {
return true
for _, arr := range q.Arrs.GetAll() {
err := arr.Refresh()
if err != nil {
return
}
arr := &debrid.Arr{
Name: "",
Token: token,
Host: host,
}
q.RefreshArr(arr)
return true
})
}
}

View File

@@ -1,158 +0,0 @@
package qbit
import (
"cmp"
"goBlack/pkg/debrid"
"os"
"path/filepath"
"time"
)
// All torrent related helpers goes here
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
t.State = "error"
q.storage.AddOrUpdate(t)
return t
}
func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
if err != nil {
addedOn = time.Now()
}
totalSize := float64(debridTorrent.Bytes)
progress := cmp.Or(debridTorrent.Progress, 100.0)
progress = progress / 100.0
sizeCompleted := int64(totalSize * progress)
var speed int64
if debridTorrent.Speed != 0 {
speed = debridTorrent.Speed
}
var eta int64
if speed != 0 {
eta = int64((totalSize - float64(sizeCompleted)) / float64(speed))
}
t.ID = debridTorrent.Id
t.Name = debridTorrent.Name
t.AddedOn = addedOn.Unix()
t.DebridTorrent = debridTorrent
t.Size = int64(totalSize)
t.Completed = sizeCompleted
t.Downloaded = sizeCompleted
t.DownloadedSession = sizeCompleted
t.Uploaded = sizeCompleted
t.UploadedSession = sizeCompleted
t.AmountLeft = int64(totalSize) - sizeCompleted
t.Progress = float32(progress)
t.Eta = eta
t.Dlspeed = speed
t.Upspeed = speed
t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
return t
}
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
rcLoneMount := q.debrid.GetMountPath()
if debridTorrent == nil && t.ID != "" {
debridTorrent, _ = q.debrid.GetTorrent(t.ID)
}
if debridTorrent == nil {
q.logger.Printf("Torrent with ID %s not found in %s", t.ID, q.debrid.GetName())
return t
}
if debridTorrent.Status != "downloaded" {
debridTorrent, _ = q.debrid.GetTorrent(t.ID)
}
if t.TorrentPath == "" {
t.TorrentPath = filepath.Base(debridTorrent.GetMountFolder(rcLoneMount))
}
savePath := filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
torrentPath := filepath.Join(savePath, t.TorrentPath) + string(os.PathSeparator)
t = q.UpdateTorrentMin(t, debridTorrent)
t.ContentPath = torrentPath
if t.IsReady() {
t.State = "pausedUP"
q.storage.Update(t)
return t
}
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if t.IsReady() {
t.State = "pausedUP"
q.storage.Update(t)
return t
}
updatedT := q.UpdateTorrent(t, debridTorrent)
t = updatedT
case <-time.After(10 * time.Minute): // Add a timeout
return t
}
}
}
func (q *QBit) ResumeTorrent(t *Torrent) bool {
return true
}
func (q *QBit) PauseTorrent(t *Torrent) bool {
return true
}
func (q *QBit) RefreshTorrent(t *Torrent) bool {
return true
}
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
return &TorrentProperties{
AdditionDate: t.AddedOn,
Comment: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
CreatedBy: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
CreationDate: t.AddedOn,
DlLimit: -1,
UpLimit: -1,
DlSpeed: t.Dlspeed,
UpSpeed: t.Upspeed,
TotalSize: t.Size,
TotalUploaded: t.Uploaded,
TotalDownloaded: t.Downloaded,
TotalUploadedSession: t.UploadedSession,
TotalDownloadedSession: t.DownloadedSession,
LastSeen: time.Now().Unix(),
NbConnectionsLimit: 100,
Peers: 0,
PeersTotal: 2,
SeedingTime: 1,
Seeds: 100,
ShareRatio: 100,
}
}
func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
files := make([]*TorrentFile, 0)
if t.DebridTorrent == nil {
return files
}
for index, file := range t.DebridTorrent.Files {
files = append(files, &TorrentFile{
Index: index,
Name: file.Path,
Size: file.Size,
})
}
return files
}