Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6c6144601 | ||
|
|
ff74e279d9 | ||
|
|
ba147ac56c | ||
|
|
01981114cb | ||
|
|
2ec0354881 | ||
|
|
329e4c60f5 | ||
|
|
d5e07dc961 | ||
|
|
f622cbfe63 | ||
|
|
9511f3e99e | ||
|
|
60c6cb32d3 | ||
|
|
d405e0d8e0 | ||
|
|
74791d6e62 |
0
.github/workflows/docker-publish.yml
vendored
0
.github/workflows/docker-publish.yml
vendored
70
CHANGELOG.md
Normal file
70
CHANGELOG.md
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
#### 0.1.0
|
||||||
|
- Initial Release
|
||||||
|
- Added Real Debrid Support
|
||||||
|
- Added Arrs Support
|
||||||
|
- Added Proxy Support
|
||||||
|
- Added Basic Authentication for Proxy
|
||||||
|
- Added Rate Limiting for Debrid Providers
|
||||||
|
|
||||||
|
#### 0.1.1
|
||||||
|
- Added support for "No Blackhole" for Arrs
|
||||||
|
- Added support for "Cached Only" for Proxy
|
||||||
|
- Bug Fixes
|
||||||
|
|
||||||
|
#### 0.1.2
|
||||||
|
- Bug fixes
|
||||||
|
- Code cleanup
|
||||||
|
- Get available hashes at once
|
||||||
|
|
||||||
|
#### 0.1.3
|
||||||
|
|
||||||
|
- Searching for infohashes in the xml description/summary/comments
|
||||||
|
- Added local cache support
|
||||||
|
- Added max cache size
|
||||||
|
- Rewrite blackhole.go
|
||||||
|
- Bug fixes
|
||||||
|
- Fixed indexer getting disabled
|
||||||
|
- Fixed blackhole not working
|
||||||
|
|
||||||
|
#### 0.1.4
|
||||||
|
|
||||||
|
- Rewrote Report log
|
||||||
|
- Fix YTS, 1337x not grabbing infohash
|
||||||
|
- Fix Torrent symlink bug
|
||||||
|
|
||||||
|
|
||||||
|
#### 0.2.0-beta
|
||||||
|
|
||||||
|
- Switch to QbitTorrent API instead of Blackhole
|
||||||
|
- Rewrote the whole codebase
|
||||||
|
|
||||||
|
|
||||||
|
#### 0.2.0
|
||||||
|
- Implement 0.2.0-beta changes
|
||||||
|
- Removed Blackhole
|
||||||
|
- Added QbitTorrent API
|
||||||
|
- Cleaned up the code
|
||||||
|
|
||||||
|
#### 0.2.1
|
||||||
|
|
||||||
|
- Fix Uncached torrents not being downloaded/downloaded
|
||||||
|
- Minor bug fixed
|
||||||
|
- Fix Race condition in the cache and file system
|
||||||
|
|
||||||
|
#### 0.2.2
|
||||||
|
- Fix name mismatch in the cache
|
||||||
|
- Fix directory mapping with mounts
|
||||||
|
- Add Support for refreshing the *arrs
|
||||||
|
|
||||||
|
#### 0.2.3
|
||||||
|
|
||||||
|
- Delete uncached items from RD
|
||||||
|
- Fail if the torrent is not cached(optional)
|
||||||
|
- Fix cache not being updated
|
||||||
|
|
||||||
|
#### 0.2.4
|
||||||
|
|
||||||
|
- Add file download support(Sequential Download)
|
||||||
|
- Fix http handler error
|
||||||
|
- Fix *arrs map failing concurrently
|
||||||
|
- Fix cache not being updated
|
||||||
@@ -18,6 +18,7 @@ ADD . .
|
|||||||
RUN CGO_ENABLED=0 GOOS=$(echo $TARGETPLATFORM | cut -d '/' -f1) GOARCH=$(echo $TARGETPLATFORM | cut -d '/' -f2) go build -o /blackhole
|
RUN CGO_ENABLED=0 GOOS=$(echo $TARGETPLATFORM | cut -d '/' -f1) GOARCH=$(echo $TARGETPLATFORM | cut -d '/' -f2) go build -o /blackhole
|
||||||
|
|
||||||
FROM scratch
|
FROM scratch
|
||||||
|
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||||
COPY --from=builder /blackhole /blackhole
|
COPY --from=builder /blackhole /blackhole
|
||||||
|
|
||||||
EXPOSE 8181
|
EXPOSE 8181
|
||||||
|
|||||||
96
README.md
96
README.md
@@ -1,13 +1,17 @@
|
|||||||
### GoBlackHole(with Debrid Proxy Support)
|
### GoBlackHole(with Debrid Proxy Support)
|
||||||
|
|
||||||
This is a Golang implementation go Torrent Blackhole with a **Real Debrid Proxy Support**.
|
This is a Golang implementation go Torrent QbitTorrent with a **Real Debrid Proxy Support**.
|
||||||
|
|
||||||
#### Uses
|
#### Uses
|
||||||
- Torrent Blackhole that supports the Arrs.
|
- Mock Qbittorent API that supports the Arrs(Sonarr, Radarr, etc)
|
||||||
- Proxy support for the Arrs
|
- Proxy support for the Arrs
|
||||||
|
|
||||||
The proxy is useful in filtering out un-cached Real Debrid torrents
|
The proxy is useful in filtering out un-cached Real Debrid torrents
|
||||||
|
|
||||||
|
### Changelog
|
||||||
|
|
||||||
|
- View the [CHANGELOG.md](CHANGELOG.md) for the latest changes
|
||||||
|
|
||||||
|
|
||||||
#### Installation
|
#### Installation
|
||||||
##### Docker Compose
|
##### Docker Compose
|
||||||
@@ -15,8 +19,11 @@ The proxy is useful in filtering out un-cached Real Debrid torrents
|
|||||||
version: '3.7'
|
version: '3.7'
|
||||||
services:
|
services:
|
||||||
blackhole:
|
blackhole:
|
||||||
image: cy01/blackhole:latest
|
image: cy01/blackhole:latest # or cy01/blackhole:beta
|
||||||
container_name: blackhole
|
container_name: blackhole
|
||||||
|
ports:
|
||||||
|
- "8282:8282" # qBittorrent
|
||||||
|
- "8181:8181" # Proxy
|
||||||
user: "1000:1000"
|
user: "1000:1000"
|
||||||
volumes:
|
volumes:
|
||||||
- ./logs:/app/logs
|
- ./logs:/app/logs
|
||||||
@@ -27,6 +34,8 @@ services:
|
|||||||
- PUID=1000
|
- PUID=1000
|
||||||
- PGID=1000
|
- PGID=1000
|
||||||
- UMASK=002
|
- UMASK=002
|
||||||
|
- QBIT_PORT=8282 # qBittorrent Port. This is optional. You can set this in the config file
|
||||||
|
- PORT=8181 # Proxy Port. This is optional. You can set this in the config file
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -48,37 +57,44 @@ Download the binary from the releases page and run it with the config file.
|
|||||||
"folder": "data/realdebrid/torrents/",
|
"folder": "data/realdebrid/torrents/",
|
||||||
"rate_limit": "250/minute"
|
"rate_limit": "250/minute"
|
||||||
},
|
},
|
||||||
"arrs": [
|
|
||||||
{
|
|
||||||
"watch_folder": "data/sonarr/",
|
|
||||||
"completed_folder": "data/sonarr/completed/",
|
|
||||||
"token": "sonarr_api_key",
|
|
||||||
"url": "http://localhost:8787"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"watch_folder": "data/radarr/",
|
|
||||||
"completed_folder": "data/radarr/completed/",
|
|
||||||
"token": "radarr_api_key",
|
|
||||||
"url": "http://localhost:7878"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"watch_folder": "data/radarr4k/",
|
|
||||||
"completed_folder": "data/radarr4k/completed/",
|
|
||||||
"token": "radarr4k_api_key",
|
|
||||||
"url": "http://localhost:7878"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"proxy": {
|
"proxy": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"port": "8181",
|
"port": "8181",
|
||||||
"debug": false,
|
"debug": false,
|
||||||
"username": "username",
|
"username": "username",
|
||||||
"password": "password"
|
"password": "password",
|
||||||
|
"cached_only": true
|
||||||
|
},
|
||||||
|
"max_cache_size": 1000,
|
||||||
|
"qbittorrent": {
|
||||||
|
"port": "8282",
|
||||||
|
"username": "admin", // deprecated
|
||||||
|
"password": "admin", // deprecated
|
||||||
|
"download_folder": "/media/symlinks/",
|
||||||
|
"categories": ["sonarr", "radarr"],
|
||||||
|
"refresh_interval": 5 // in seconds
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Proxy
|
#### Config Notes
|
||||||
|
##### Debrid Config
|
||||||
|
- This config key is important as it's used for both Blackhole and Proxy
|
||||||
|
|
||||||
|
##### Proxy Config
|
||||||
|
- The `enabled` key is used to enable the proxy
|
||||||
|
- The `port` key is the port the proxy will listen on
|
||||||
|
- The `debug` key is used to enable debug logs
|
||||||
|
- The `username` and `password` keys are used for basic authentication
|
||||||
|
- The `cached_only` means only cached torrents will be returned
|
||||||
|
|
||||||
|
|
||||||
|
##### Qbittorrent Config
|
||||||
|
- The `port` key is the port the qBittorrent will listen on
|
||||||
|
- The `download_folder` is the folder where the torrents will be downloaded. e.g `/media/symlinks/`
|
||||||
|
- The `categories` key is used to filter out torrents based on the category. e.g `sonarr`, `radarr`
|
||||||
|
|
||||||
|
### Proxy
|
||||||
|
|
||||||
The proxy is useful in filtering out un-cached Real Debrid torrents.
|
The proxy is useful in filtering out un-cached Real Debrid torrents.
|
||||||
The proxy is a simple HTTP proxy that requires basic authentication. The proxy can be enabled by setting the `proxy.enabled` to `true` in the config file.
|
The proxy is a simple HTTP proxy that requires basic authentication. The proxy can be enabled by setting the `proxy.enabled` to `true` in the config file.
|
||||||
@@ -94,7 +110,33 @@ Setting Up Proxy in Arr
|
|||||||
- Password: `password` # or the password set in the config file
|
- Password: `password` # or the password set in the config file
|
||||||
- Bypass Proxy for Local Addresses -> `No`
|
- Bypass Proxy for Local Addresses -> `No`
|
||||||
|
|
||||||
|
### Qbittorrent
|
||||||
|
|
||||||
|
The qBittorrent is a mock qBittorrent API that supports the Arrs(Sonarr, Radarr, etc).
|
||||||
|
|
||||||
|
Setting Up Qbittorrent in Arr
|
||||||
|
|
||||||
|
- Sonarr/Radarr
|
||||||
|
- Settings -> Download Client -> Add Client -> qBittorrent
|
||||||
|
- Host: `localhost` # or the IP of the server
|
||||||
|
- Port: `8282` # or the port set in the config file/ docker-compose env
|
||||||
|
- Username: `http://sonarr:8989` # Your arr host with http/https
|
||||||
|
- Password: `sonarr_token` # Your arr token
|
||||||
|
- Category: e.g `sonarr`, `radarr`
|
||||||
|
- Use SSL -> `No`
|
||||||
|
- Sequential Download -> `No`|`Yes` (If you want to download the torrents locally instead of symlink)
|
||||||
|
- Test
|
||||||
|
- Save
|
||||||
|
|
||||||
### TODO
|
### TODO
|
||||||
- [ ] Add more Debrid Providers
|
- [ ] A proper name!!!!
|
||||||
|
- [ ] Debrid
|
||||||
|
- [ ] Add more Debrid Providers
|
||||||
|
|
||||||
|
- [ ] Proxy
|
||||||
- [ ] Add more Proxy features
|
- [ ] Add more Proxy features
|
||||||
- [ ] Add more tests
|
|
||||||
|
- [ ] Qbittorrent
|
||||||
|
- [ ] Add more Qbittorrent features
|
||||||
|
- [ ] Persist torrents on restart/server crash
|
||||||
|
- [ ] Add tests
|
||||||
169
cmd/blackhole.go
169
cmd/blackhole.go
@@ -1,169 +0,0 @@
|
|||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/fsnotify/fsnotify"
|
|
||||||
"goBlack/common"
|
|
||||||
"goBlack/debrid"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func fileReady(path string) bool {
|
|
||||||
_, err := os.Stat(path)
|
|
||||||
return !os.IsNotExist(err) // Returns true if the file exists
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.TorrentFile, ready chan<- debrid.TorrentFile) {
|
|
||||||
defer wg.Done()
|
|
||||||
ticker := time.NewTicker(1 * time.Second) // Check every second
|
|
||||||
defer ticker.Stop()
|
|
||||||
path := filepath.Join(dir, file.Path)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
if fileReady(path) {
|
|
||||||
ready <- file
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ProcessFiles(arr *debrid.Arr, torrent *debrid.Torrent) {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
files := torrent.Files
|
|
||||||
ready := make(chan debrid.TorrentFile, len(files))
|
|
||||||
|
|
||||||
log.Println("Checking files...")
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
wg.Add(1)
|
|
||||||
go checkFileLoop(&wg, arr.Debrid.Folder, file, ready)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
close(ready)
|
|
||||||
}()
|
|
||||||
|
|
||||||
for r := range ready {
|
|
||||||
log.Println("File is ready:", r.Name)
|
|
||||||
CreateSymLink(arr, torrent)
|
|
||||||
|
|
||||||
}
|
|
||||||
go torrent.Cleanup(true)
|
|
||||||
fmt.Printf("%s downloaded", torrent.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func CreateSymLink(config *debrid.Arr, torrent *debrid.Torrent) {
|
|
||||||
path := filepath.Join(config.CompletedFolder, torrent.Folder)
|
|
||||||
err := os.MkdirAll(path, os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Failed to create directory: %s\n", path)
|
|
||||||
}
|
|
||||||
for _, file := range torrent.Files {
|
|
||||||
// Combine the directory and filename to form a full path
|
|
||||||
fullPath := filepath.Join(config.CompletedFolder, file.Path)
|
|
||||||
|
|
||||||
// Create a symbolic link if file doesn't exist
|
|
||||||
_ = os.Symlink(filepath.Join(config.Debrid.Folder, file.Path), fullPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func watchFiles(watcher *fsnotify.Watcher, events map[string]time.Time) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case event, ok := <-watcher.Events:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
|
||||||
if filepath.Ext(event.Name) == ".torrent" || filepath.Ext(event.Name) == ".magnet" {
|
|
||||||
events[event.Name] = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
case err, ok := <-watcher.Errors:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Println("ERROR:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func processFilesDebounced(arr *debrid.Arr, db debrid.Service, events map[string]time.Time, debouncePeriod time.Duration) {
|
|
||||||
ticker := time.NewTicker(1 * time.Second) // Check every second
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for range ticker.C {
|
|
||||||
for file, lastEventTime := range events {
|
|
||||||
if time.Since(lastEventTime) >= debouncePeriod {
|
|
||||||
log.Printf("Torrent file detected: %s", file)
|
|
||||||
// Process the torrent file
|
|
||||||
torrent, err := db.Process(arr, file)
|
|
||||||
if err != nil && torrent != nil {
|
|
||||||
// remove torrent file
|
|
||||||
torrent.Cleanup(true)
|
|
||||||
_ = torrent.MarkAsFailed()
|
|
||||||
log.Printf("Error processing torrent file: %s", err)
|
|
||||||
}
|
|
||||||
if err == nil && torrent != nil && len(torrent.Files) > 0 {
|
|
||||||
go ProcessFiles(arr, torrent)
|
|
||||||
}
|
|
||||||
delete(events, file) // remove file from channel
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func StartArr(conf *debrid.Arr, db debrid.Service) {
|
|
||||||
log.Printf("Watching: %s", conf.WatchFolder)
|
|
||||||
w, err := fsnotify.NewWatcher()
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
defer func(w *fsnotify.Watcher) {
|
|
||||||
err := w.Close()
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
}(w)
|
|
||||||
events := make(map[string]time.Time)
|
|
||||||
|
|
||||||
go watchFiles(w, events)
|
|
||||||
if err = w.Add(conf.WatchFolder); err != nil {
|
|
||||||
log.Println("Error Watching folder:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
processFilesDebounced(conf, db, events, 1*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func StartBlackhole(config *common.Config, deb debrid.Service) {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for _, conf := range config.Arrs {
|
|
||||||
wg.Add(1)
|
|
||||||
defer wg.Done()
|
|
||||||
headers := map[string]string{
|
|
||||||
"X-Api-Key": conf.Token,
|
|
||||||
}
|
|
||||||
client := common.NewRLHTTPClient(nil, headers)
|
|
||||||
|
|
||||||
arr := &debrid.Arr{
|
|
||||||
Debrid: config.Debrid,
|
|
||||||
WatchFolder: conf.WatchFolder,
|
|
||||||
CompletedFolder: conf.CompletedFolder,
|
|
||||||
Token: conf.Token,
|
|
||||||
URL: conf.URL,
|
|
||||||
Client: client,
|
|
||||||
}
|
|
||||||
go StartArr(arr, deb)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
34
cmd/main.go
34
cmd/main.go
@@ -1,18 +1,40 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"cmp"
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
"goBlack/debrid"
|
"goBlack/pkg/debrid"
|
||||||
"log"
|
"goBlack/pkg/proxy"
|
||||||
|
"goBlack/pkg/qbit"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Start(config *common.Config) {
|
func Start(config *common.Config) {
|
||||||
|
maxCacheSize := cmp.Or(config.MaxCacheSize, 1000)
|
||||||
|
cache := common.NewCache(maxCacheSize)
|
||||||
|
|
||||||
|
deb := debrid.NewDebrid(config.Debrid, cache)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
log.Print("[*] BlackHole running")
|
|
||||||
deb := debrid.NewDebrid(config.Debrid)
|
|
||||||
if config.Proxy.Enabled {
|
if config.Proxy.Enabled {
|
||||||
go StartProxy(config, deb)
|
p := proxy.NewProxy(*config, deb, cache)
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
p.Start()
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
StartBlackhole(config, deb)
|
if config.QBitTorrent.Port != "" {
|
||||||
|
qb := qbit.NewQBit(config, deb, cache)
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
qb.Start()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait indefinitely
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
272
cmd/proxy.go
272
cmd/proxy.go
@@ -1,272 +0,0 @@
|
|||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"cmp"
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"github.com/elazarl/goproxy"
|
|
||||||
"github.com/elazarl/goproxy/ext/auth"
|
|
||||||
"github.com/valyala/fastjson"
|
|
||||||
"goBlack/common"
|
|
||||||
"goBlack/debrid"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RSS struct {
|
|
||||||
XMLName xml.Name `xml:"rss"`
|
|
||||||
Version string `xml:"version,attr"`
|
|
||||||
Channel Channel `xml:"channel"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Channel struct {
|
|
||||||
XMLName xml.Name `xml:"channel"`
|
|
||||||
Title string `xml:"title"`
|
|
||||||
AtomLink AtomLink `xml:"link"`
|
|
||||||
Items []Item `xml:"item"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AtomLink struct {
|
|
||||||
XMLName xml.Name `xml:"link"`
|
|
||||||
Rel string `xml:"rel,attr"`
|
|
||||||
Type string `xml:"type,attr"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Item struct {
|
|
||||||
XMLName xml.Name `xml:"item"`
|
|
||||||
Title string `xml:"title"`
|
|
||||||
Description string `xml:"description"`
|
|
||||||
GUID string `xml:"guid"`
|
|
||||||
ProwlarrIndexer ProwlarrIndexer `xml:"prowlarrindexer"`
|
|
||||||
Comments string `xml:"comments"`
|
|
||||||
PubDate string `xml:"pubDate"`
|
|
||||||
Size int64 `xml:"size"`
|
|
||||||
Link string `xml:"link"`
|
|
||||||
Categories []string `xml:"category"`
|
|
||||||
Enclosure Enclosure `xml:"enclosure"`
|
|
||||||
TorznabAttrs []TorznabAttr `xml:"torznab:attr"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProwlarrIndexer struct {
|
|
||||||
ID string `xml:"id,attr"`
|
|
||||||
Type string `xml:"type,attr"`
|
|
||||||
Value string `xml:",chardata"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Enclosure struct {
|
|
||||||
URL string `xml:"url,attr"`
|
|
||||||
Length int64 `xml:"length,attr"`
|
|
||||||
Type string `xml:"type,attr"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type TorznabAttr struct {
|
|
||||||
Name string `xml:"name,attr"`
|
|
||||||
Value string `xml:"value,attr"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SafeItems struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
Items []Item
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SafeItems) Add(item Item) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.Items = append(s.Items, item)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SafeItems) Get() []Item {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
return s.Items
|
|
||||||
}
|
|
||||||
|
|
||||||
func ProcessJSONResponse(resp *http.Response, deb debrid.Service) *http.Response {
|
|
||||||
if resp == nil || resp.Body == nil {
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
log.Println("Error reading response body:", err)
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
err = resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var p fastjson.Parser
|
|
||||||
v, err := p.ParseBytes(body)
|
|
||||||
if err != nil {
|
|
||||||
// If it's not JSON, return the original response
|
|
||||||
resp.Body = io.NopCloser(bytes.NewReader(body))
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
// Modify the JSON
|
|
||||||
|
|
||||||
// Serialize the modified JSON back to bytes
|
|
||||||
modifiedBody := v.MarshalTo(nil)
|
|
||||||
|
|
||||||
// Set the modified body back to the response
|
|
||||||
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
|
||||||
resp.ContentLength = int64(len(modifiedBody))
|
|
||||||
resp.Header.Set("Content-Length", string(rune(len(modifiedBody))))
|
|
||||||
|
|
||||||
return resp
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func ProcessResponse(resp *http.Response, deb debrid.Service) *http.Response {
|
|
||||||
if resp == nil || resp.Body == nil {
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
contentType := resp.Header.Get("Content-Type")
|
|
||||||
switch contentType {
|
|
||||||
case "application/json":
|
|
||||||
return ProcessJSONResponse(resp, deb)
|
|
||||||
case "application/xml":
|
|
||||||
return ProcessXMLResponse(resp, deb)
|
|
||||||
case "application/rss+xml":
|
|
||||||
return ProcessXMLResponse(resp, deb)
|
|
||||||
default:
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func XMLItemIsCached(item Item, deb debrid.Service) bool {
|
|
||||||
magnetLink := ""
|
|
||||||
infohash := ""
|
|
||||||
|
|
||||||
// Extract magnet link from the link or comments
|
|
||||||
if strings.Contains(item.Link, "magnet:?") {
|
|
||||||
magnetLink = item.Link
|
|
||||||
} else if strings.Contains(item.GUID, "magnet:?") {
|
|
||||||
magnetLink = item.GUID
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract infohash from <torznab:attr> elements
|
|
||||||
for _, attr := range item.TorznabAttrs {
|
|
||||||
if attr.Name == "infohash" {
|
|
||||||
infohash = attr.Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if magnetLink == "" && infohash == "" {
|
|
||||||
// We can't check the availability of the torrent without a magnet link or infohash
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var magnet *common.Magnet
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if infohash == "" {
|
|
||||||
magnet, err = common.GetMagnetInfo(magnetLink)
|
|
||||||
if err != nil {
|
|
||||||
log.Println("Error getting magnet info:", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
magnet = &common.Magnet{
|
|
||||||
InfoHash: infohash,
|
|
||||||
Name: item.Title,
|
|
||||||
Link: magnetLink,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if magnet == nil {
|
|
||||||
log.Println("Error getting magnet info")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return deb.IsAvailable(magnet)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func ProcessXMLResponse(resp *http.Response, deb debrid.Service) *http.Response {
|
|
||||||
if resp == nil || resp.Body == nil {
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
log.Println("Error reading response body:", err)
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
err = resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var rss RSS
|
|
||||||
err = xml.Unmarshal(body, &rss)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Error unmarshalling XML: %v", err)
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
newItems := &SafeItems{}
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
// Step 4: Extract infohash or magnet URI, manipulate data
|
|
||||||
for _, item := range rss.Channel.Items {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(item Item) {
|
|
||||||
defer wg.Done()
|
|
||||||
if XMLItemIsCached(item, deb) {
|
|
||||||
newItems.Add(item)
|
|
||||||
}
|
|
||||||
}(item)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
items := newItems.Get()
|
|
||||||
log.Printf("Report: %d/%d items are cached", len(items), len(rss.Channel.Items))
|
|
||||||
rss.Channel.Items = items
|
|
||||||
|
|
||||||
// rss.Channel.Items = newItems
|
|
||||||
modifiedBody, err := xml.MarshalIndent(rss, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Error marshalling XML: %v", err)
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
modifiedBody = append([]byte(xml.Header), modifiedBody...)
|
|
||||||
|
|
||||||
// Set the modified body back to the response
|
|
||||||
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
|
||||||
resp.ContentLength = int64(len(modifiedBody))
|
|
||||||
resp.Header.Set("Content-Length", string(rune(len(modifiedBody))))
|
|
||||||
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
func UrlMatches(re *regexp.Regexp) goproxy.ReqConditionFunc {
|
|
||||||
return func(req *http.Request, ctx *goproxy.ProxyCtx) bool {
|
|
||||||
return re.MatchString(req.URL.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func StartProxy(config *common.Config, deb debrid.Service) {
|
|
||||||
username, password := config.Proxy.Username, config.Proxy.Password
|
|
||||||
cfg := config.Proxy
|
|
||||||
proxy := goproxy.NewProxyHttpServer()
|
|
||||||
if username != "" || password != "" {
|
|
||||||
// Set up basic auth for proxy
|
|
||||||
auth.ProxyBasic(proxy, "my_realm", func(user, pwd string) bool {
|
|
||||||
return user == username && password == pwd
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
proxy.OnRequest(goproxy.ReqHostMatches(regexp.MustCompile("^.443$"))).HandleConnect(goproxy.AlwaysMitm)
|
|
||||||
proxy.OnResponse(UrlMatches(regexp.MustCompile("^.*/api\\?t=(search|tvsearch|movie)(&.*)?$"))).DoFunc(
|
|
||||||
func(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {
|
|
||||||
return ProcessResponse(resp, deb)
|
|
||||||
})
|
|
||||||
|
|
||||||
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
|
|
||||||
proxy.Verbose = cfg.Debug
|
|
||||||
port = fmt.Sprintf(":%s", port)
|
|
||||||
log.Printf("Starting proxy server on %s\n", port)
|
|
||||||
log.Fatal(http.ListenAndServe(fmt.Sprintf("%s", port), proxy))
|
|
||||||
}
|
|
||||||
88
common/cache.go
Normal file
88
common/cache.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Cache struct {
|
||||||
|
data map[string]struct{}
|
||||||
|
order []string
|
||||||
|
maxItems int
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCache(maxItems int) *Cache {
|
||||||
|
if maxItems <= 0 {
|
||||||
|
maxItems = 1000
|
||||||
|
}
|
||||||
|
return &Cache{
|
||||||
|
data: make(map[string]struct{}, maxItems),
|
||||||
|
order: make([]string, 0, maxItems),
|
||||||
|
maxItems: maxItems,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) Add(value string) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if _, exists := c.data[value]; !exists {
|
||||||
|
if len(c.order) >= c.maxItems {
|
||||||
|
delete(c.data, c.order[0])
|
||||||
|
c.order = c.order[1:]
|
||||||
|
}
|
||||||
|
c.data[value] = struct{}{}
|
||||||
|
c.order = append(c.order, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) AddMultiple(values map[string]bool) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
for value := range values {
|
||||||
|
if _, exists := c.data[value]; !exists {
|
||||||
|
if len(c.order) >= c.maxItems {
|
||||||
|
delete(c.data, c.order[0])
|
||||||
|
c.order = c.order[1:]
|
||||||
|
}
|
||||||
|
c.data[value] = struct{}{}
|
||||||
|
c.order = append(c.order, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) Get(index int) (string, bool) {
|
||||||
|
c.mu.RLock()
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
if index < 0 || index >= len(c.order) {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return c.order[index], true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetMultiple(values []string) map[string]bool {
|
||||||
|
c.mu.RLock()
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
|
||||||
|
result := make(map[string]bool, len(values))
|
||||||
|
for _, value := range values {
|
||||||
|
if _, exists := c.data[value]; exists {
|
||||||
|
result[value] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) Exists(value string) bool {
|
||||||
|
c.mu.RLock()
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
_, exists := c.data[value]
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) Len() int {
|
||||||
|
c.mu.RLock()
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
return len(c.order)
|
||||||
|
}
|
||||||
@@ -15,22 +15,30 @@ type DebridConfig struct {
|
|||||||
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
|
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ProxyConfig struct {
|
||||||
|
Port string `json:"port"`
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
Debug bool `json:"debug"`
|
||||||
|
Username string `json:"username"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
CachedOnly *bool `json:"cached_only"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type QBitTorrentConfig struct {
|
||||||
|
Username string `json:"username"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
Port string `json:"port"`
|
||||||
|
Debug bool `json:"debug"`
|
||||||
|
DownloadFolder string `json:"download_folder"`
|
||||||
|
Categories []string `json:"categories"`
|
||||||
|
RefreshInterval int `json:"refresh_interval"`
|
||||||
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
DbDSN string `json:"db_dsn"`
|
Debrid DebridConfig `json:"debrid"`
|
||||||
Debrid DebridConfig `json:"debrid"`
|
Proxy ProxyConfig `json:"proxy"`
|
||||||
Arrs []struct {
|
MaxCacheSize int `json:"max_cache_size"`
|
||||||
WatchFolder string `json:"watch_folder"`
|
QBitTorrent QBitTorrentConfig `json:"qbittorrent"`
|
||||||
CompletedFolder string `json:"completed_folder"`
|
|
||||||
Token string `json:"token"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"arrs"`
|
|
||||||
Proxy struct {
|
|
||||||
Port string `json:"port"`
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
Debug bool `json:"debug"`
|
|
||||||
Username string `json:"username"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadConfig(path string) (*Config, error) {
|
func LoadConfig(path string) (*Config, error) {
|
||||||
@@ -52,6 +60,10 @@ func LoadConfig(path string) (*Config, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if config.Proxy.CachedOnly == nil {
|
||||||
|
config.Proxy.CachedOnly = new(bool)
|
||||||
|
*config.Proxy.CachedOnly = true
|
||||||
|
}
|
||||||
|
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,16 @@
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
VIDEOMATCH = "(?i)(\\.)(YUV|WMV|WEBM|VOB|VIV|SVI|ROQ|RMVB|RM|OGV|OGG|NSV|MXF|MTS|M2TS|TS|MPG|MPEG|M2V|MP2|MPE|MPV|MP4|M4P|M4V|MOV|QT|MNG|MKV|FLV|DRC|AVI|ASF|AMV)$"
|
VIDEOMATCH = "(?i)(\\.)(YUV|WMV|WEBM|VOB|VIV|SVI|ROQ|RMVB|RM|OGV|OGG|NSV|MXF|MPG|MPEG|M2V|MP2|MPE|MPV|MP4|M4P|M4V|MOV|QT|MNG|MKV|FLV|DRC|AVI|ASF|AMV|MKA|F4V|3GP|3G2|DIVX|X264|X265)$"
|
||||||
SUBMATCH = "(?i)(\\.)(SRT|SUB|SBV|ASS|VTT|TTML|DFXP|STL|SCC|CAP|SMI|TTXT|TDS|USF|JSS|SSA|PSB|RT|LRC|SSB)$"
|
MUSICMATCH = "(?i)(\\.)(?:MP3|WAV|FLAC|AAC|OGG|WMA|AIFF|ALAC|M4A|APE|AC3|DTS|M4P|MID|MIDI|MKA|MP2|MPA|RA|VOC|WV|AMR)$"
|
||||||
|
SUBMATCH = "(?i)(\\.)(SRT|SUB|SBV|ASS|VTT|TTML|DFXP|STL|SCC|CAP|SMI|TTXT|TDS|USF|JSS|SSA|PSB|RT|LRC|SSB)$"
|
||||||
|
SAMPLEMATCH = `(?i)(^|[\\/]|[._-])(sample|trailer|thumb)s?([._-]|$)`
|
||||||
)
|
)
|
||||||
|
|
||||||
func RegexMatch(regex string, value string) bool {
|
func RegexMatch(regex string, value string) bool {
|
||||||
@@ -14,8 +18,26 @@ func RegexMatch(regex string, value string) bool {
|
|||||||
return re.MatchString(value)
|
return re.MatchString(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RemoveInvalidChars(value string) string {
|
||||||
|
return strings.Map(func(r rune) rune {
|
||||||
|
if r == filepath.Separator || r == ':' {
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
if filepath.IsAbs(string(r)) {
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
if strings.ContainsRune(filepath.VolumeName("C:"+string(r)), r) {
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
if r < 32 || strings.ContainsRune(`<>:"/\|?*`, r) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}, value)
|
||||||
|
}
|
||||||
|
|
||||||
func RemoveExtension(value string) string {
|
func RemoveExtension(value string) string {
|
||||||
re := regexp.MustCompile(VIDEOMATCH)
|
re := regexp.MustCompile(VIDEOMATCH + "|" + SUBMATCH + "|" + SAMPLEMATCH + "|" + MUSICMATCH)
|
||||||
|
|
||||||
// Find the last index of the matched extension
|
// Find the last index of the matched extension
|
||||||
loc := re.FindStringIndex(value)
|
loc := re.FindStringIndex(value)
|
||||||
@@ -25,3 +47,13 @@ func RemoveExtension(value string) string {
|
|||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RegexFind(regex string, value string) string {
|
||||||
|
re := regexp.MustCompile(regex)
|
||||||
|
match := re.FindStringSubmatch(value)
|
||||||
|
if len(match) > 0 {
|
||||||
|
return match[0]
|
||||||
|
} else {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
200
common/utils.go
200
common/utils.go
@@ -2,12 +2,22 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"encoding/base32"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Magnet struct {
|
type Magnet struct {
|
||||||
@@ -17,6 +27,41 @@ type Magnet struct {
|
|||||||
Link string
|
Link string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetMagnetFromFile(file io.Reader, filePath string) (*Magnet, error) {
|
||||||
|
if filepath.Ext(filePath) == ".torrent" {
|
||||||
|
mi, err := metainfo.Load(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hash := mi.HashInfoBytes()
|
||||||
|
infoHash := hash.HexString()
|
||||||
|
info, err := mi.UnmarshalInfo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
magnet := &Magnet{
|
||||||
|
InfoHash: infoHash,
|
||||||
|
Name: info.Name,
|
||||||
|
Size: info.Length,
|
||||||
|
Link: mi.Magnet(&hash, &info).String(),
|
||||||
|
}
|
||||||
|
return magnet, nil
|
||||||
|
} else {
|
||||||
|
// .magnet file
|
||||||
|
magnetLink := ReadMagnetFile(file)
|
||||||
|
return GetMagnetInfo(magnetLink)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetMagnetFromUrl(url string) (*Magnet, error) {
|
||||||
|
if strings.HasPrefix(url, "magnet:") {
|
||||||
|
return GetMagnetInfo(url)
|
||||||
|
} else if strings.HasPrefix(url, "http") {
|
||||||
|
return OpenMagnetHttpURL(url)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("invalid url")
|
||||||
|
}
|
||||||
|
|
||||||
func OpenMagnetFile(filePath string) string {
|
func OpenMagnetFile(filePath string) string {
|
||||||
file, err := os.Open(filePath)
|
file, err := os.Open(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -29,13 +74,15 @@ func OpenMagnetFile(filePath string) string {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}(file) // Ensure the file is closed after the function ends
|
}(file) // Ensure the file is closed after the function ends
|
||||||
|
return ReadMagnetFile(file)
|
||||||
|
}
|
||||||
|
|
||||||
// Create a scanner to read the file line by line
|
func ReadMagnetFile(file io.Reader) string {
|
||||||
scanner := bufio.NewScanner(file)
|
scanner := bufio.NewScanner(file)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
magnetLink := scanner.Text()
|
content := scanner.Text()
|
||||||
if magnetLink != "" {
|
if content != "" {
|
||||||
return magnetLink
|
return content
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,14 +93,50 @@ func OpenMagnetFile(filePath string) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func OpenMagnetHttpURL(magnetLink string) (*Magnet, error) {
|
||||||
|
resp, err := http.Get(magnetLink)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error making GET request: %v", err)
|
||||||
|
}
|
||||||
|
defer func(resp *http.Response) {
|
||||||
|
err := resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(resp) // Ensure the response is closed after the function ends
|
||||||
|
|
||||||
|
// Create a scanner to read the file line by line
|
||||||
|
|
||||||
|
mi, err := metainfo.Load(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hash := mi.HashInfoBytes()
|
||||||
|
infoHash := hash.HexString()
|
||||||
|
info, err := mi.UnmarshalInfo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Println("InfoHash: ", infoHash)
|
||||||
|
magnet := &Magnet{
|
||||||
|
InfoHash: infoHash,
|
||||||
|
Name: info.Name,
|
||||||
|
Size: info.Length,
|
||||||
|
Link: mi.Magnet(&hash, &info).String(),
|
||||||
|
}
|
||||||
|
return magnet, nil
|
||||||
|
}
|
||||||
|
|
||||||
func GetMagnetInfo(magnetLink string) (*Magnet, error) {
|
func GetMagnetInfo(magnetLink string) (*Magnet, error) {
|
||||||
if magnetLink == "" {
|
if magnetLink == "" {
|
||||||
return nil, fmt.Errorf("error getting magnet from file")
|
return nil, fmt.Errorf("error getting magnet from file")
|
||||||
}
|
}
|
||||||
|
|
||||||
magnetURI, err := url.Parse(magnetLink)
|
magnetURI, err := url.Parse(magnetLink)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error parsing magnet link")
|
return nil, fmt.Errorf("error parsing magnet link")
|
||||||
}
|
}
|
||||||
|
|
||||||
query := magnetURI.Query()
|
query := magnetURI.Query()
|
||||||
xt := query.Get("xt")
|
xt := query.Get("xt")
|
||||||
dn := query.Get("dn")
|
dn := query.Get("dn")
|
||||||
@@ -81,3 +164,112 @@ func RandomString(length int) string {
|
|||||||
}
|
}
|
||||||
return string(b)
|
return string(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ExtractInfoHash(magnetDesc string) string {
|
||||||
|
const prefix = "xt=urn:btih:"
|
||||||
|
start := strings.Index(magnetDesc, prefix)
|
||||||
|
if start == -1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
hash := ""
|
||||||
|
start += len(prefix)
|
||||||
|
end := strings.IndexAny(magnetDesc[start:], "&#")
|
||||||
|
if end == -1 {
|
||||||
|
hash = magnetDesc[start:]
|
||||||
|
} else {
|
||||||
|
hash = magnetDesc[start : start+end]
|
||||||
|
}
|
||||||
|
hash, _ = processInfoHash(hash) // Convert to hex if needed
|
||||||
|
return hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func processInfoHash(input string) (string, error) {
|
||||||
|
// Regular expression for a valid 40-character hex infohash
|
||||||
|
hexRegex := regexp.MustCompile("^[0-9a-fA-F]{40}$")
|
||||||
|
|
||||||
|
// If it's already a valid hex infohash, return it as is
|
||||||
|
if hexRegex.MatchString(input) {
|
||||||
|
return strings.ToLower(input), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it's 32 characters long, it might be Base32 encoded
|
||||||
|
if len(input) == 32 {
|
||||||
|
// Ensure the input is uppercase and remove any padding
|
||||||
|
input = strings.ToUpper(strings.TrimRight(input, "="))
|
||||||
|
|
||||||
|
// Try to decode from Base32
|
||||||
|
decoded, err := base32.StdEncoding.DecodeString(input)
|
||||||
|
if err == nil && len(decoded) == 20 {
|
||||||
|
// If successful and the result is 20 bytes, encode to hex
|
||||||
|
return hex.EncodeToString(decoded), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we get here, it's not a valid infohash and we couldn't convert it
|
||||||
|
return "", fmt.Errorf("invalid infohash: %s", input)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLogger(prefix string, output *os.File) *log.Logger {
|
||||||
|
f := fmt.Sprintf("[%s] ", prefix)
|
||||||
|
return log.New(output, f, log.LstdFlags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetInfohashFromURL(url string) (string, error) {
|
||||||
|
// Download the torrent file
|
||||||
|
var magnetLink string
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||||
|
if len(via) >= 3 {
|
||||||
|
return fmt.Errorf("stopped after 3 redirects")
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(req.URL.String(), "magnet:") {
|
||||||
|
// Stop the redirect chain
|
||||||
|
magnetLink = req.URL.String()
|
||||||
|
return http.ErrUseLastResponse
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if magnetLink != "" {
|
||||||
|
return ExtractInfoHash(magnetLink), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mi, err := metainfo.Load(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
hash := mi.HashInfoBytes()
|
||||||
|
infoHash := hash.HexString()
|
||||||
|
return infoHash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func JoinURL(base string, paths ...string) (string, error) {
|
||||||
|
// Parse the base URL
|
||||||
|
u, err := url.Parse(base)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Join the path components
|
||||||
|
u.Path = path.Join(u.Path, path.Join(paths...))
|
||||||
|
|
||||||
|
// Return the resulting URL as a string
|
||||||
|
return u.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func FileReady(path string) bool {
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
return !os.IsNotExist(err) // Returns true if the file exists
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,83 +0,0 @@
|
|||||||
package debrid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
|
||||||
"goBlack/common"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Service interface {
|
|
||||||
SubmitMagnet(torrent *Torrent) (*Torrent, error)
|
|
||||||
CheckStatus(torrent *Torrent) (*Torrent, error)
|
|
||||||
DownloadLink(torrent *Torrent) error
|
|
||||||
Process(arr *Arr, magnet string) (*Torrent, error)
|
|
||||||
IsAvailable(magnet *common.Magnet) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type Debrid struct {
|
|
||||||
Host string `json:"host"`
|
|
||||||
APIKey string
|
|
||||||
DownloadUncached bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDebrid(dc common.DebridConfig) Service {
|
|
||||||
switch dc.Name {
|
|
||||||
case "realdebrid":
|
|
||||||
return NewRealDebrid(dc)
|
|
||||||
default:
|
|
||||||
return NewRealDebrid(dc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetTorrentInfo(filePath string) (*Torrent, error) {
|
|
||||||
// Open and read the .torrent file
|
|
||||||
if filepath.Ext(filePath) == ".torrent" {
|
|
||||||
return getTorrentInfo(filePath)
|
|
||||||
} else {
|
|
||||||
return torrentFromMagnetFile(filePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func torrentFromMagnetFile(filePath string) (*Torrent, error) {
|
|
||||||
magnetLink := common.OpenMagnetFile(filePath)
|
|
||||||
magnet, err := common.GetMagnetInfo(magnetLink)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
torrent := &Torrent{
|
|
||||||
InfoHash: magnet.InfoHash,
|
|
||||||
Name: magnet.Name,
|
|
||||||
Size: magnet.Size,
|
|
||||||
Magnet: magnet,
|
|
||||||
Filename: filePath,
|
|
||||||
}
|
|
||||||
return torrent, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTorrentInfo(filePath string) (*Torrent, error) {
|
|
||||||
mi, err := metainfo.LoadFromFile(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
hash := mi.HashInfoBytes()
|
|
||||||
infoHash := hash.HexString()
|
|
||||||
info, err := mi.UnmarshalInfo()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
magnet := &common.Magnet{
|
|
||||||
InfoHash: infoHash,
|
|
||||||
Name: info.Name,
|
|
||||||
Size: info.Length,
|
|
||||||
Link: mi.Magnet(&hash, &info).String(),
|
|
||||||
}
|
|
||||||
torrent := &Torrent{
|
|
||||||
InfoHash: infoHash,
|
|
||||||
Name: info.Name,
|
|
||||||
Size: info.Length,
|
|
||||||
Magnet: magnet,
|
|
||||||
Filename: filePath,
|
|
||||||
}
|
|
||||||
return torrent, nil
|
|
||||||
}
|
|
||||||
@@ -1,155 +0,0 @@
|
|||||||
package debrid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"goBlack/common"
|
|
||||||
"goBlack/debrid/structs"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
gourl "net/url"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RealDebrid struct {
|
|
||||||
Host string `json:"host"`
|
|
||||||
APIKey string
|
|
||||||
DownloadUncached bool
|
|
||||||
client *common.RLHTTPClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RealDebrid) Process(arr *Arr, magnet string) (*Torrent, error) {
|
|
||||||
torrent, err := GetTorrentInfo(magnet)
|
|
||||||
torrent.Arr = arr
|
|
||||||
if err != nil {
|
|
||||||
return torrent, err
|
|
||||||
}
|
|
||||||
log.Printf("Torrent Name: %s", torrent.Name)
|
|
||||||
if !r.DownloadUncached {
|
|
||||||
if !r.IsAvailable(torrent.Magnet) {
|
|
||||||
return torrent, fmt.Errorf("torrent is not cached")
|
|
||||||
}
|
|
||||||
log.Printf("Torrent: %s is cached", torrent.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
torrent, err = r.SubmitMagnet(torrent)
|
|
||||||
if err != nil || torrent.Id == "" {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return r.CheckStatus(torrent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RealDebrid) IsAvailable(magnet *common.Magnet) bool {
|
|
||||||
url := fmt.Sprintf("%s/torrents/instantAvailability/%s", r.Host, magnet.InfoHash)
|
|
||||||
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var data structs.RealDebridAvailabilityResponse
|
|
||||||
err = json.Unmarshal(resp, &data)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
hosters, exists := data[strings.ToLower(magnet.InfoHash)]
|
|
||||||
if !exists || len(hosters) < 1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RealDebrid) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
|
|
||||||
url := fmt.Sprintf("%s/torrents/addMagnet", r.Host)
|
|
||||||
payload := gourl.Values{
|
|
||||||
"magnet": {torrent.Magnet.Link},
|
|
||||||
}
|
|
||||||
var data structs.RealDebridAddMagnetSchema
|
|
||||||
resp, err := r.client.MakeRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(resp, &data)
|
|
||||||
log.Printf("Torrent: %s added with id: %s\n", torrent.Name, data.Id)
|
|
||||||
torrent.Id = data.Id
|
|
||||||
|
|
||||||
return torrent, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RealDebrid) CheckStatus(torrent *Torrent) (*Torrent, error) {
|
|
||||||
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, torrent.Id)
|
|
||||||
for {
|
|
||||||
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return torrent, err
|
|
||||||
}
|
|
||||||
var data structs.RealDebridTorrentInfo
|
|
||||||
err = json.Unmarshal(resp, &data)
|
|
||||||
status := data.Status
|
|
||||||
torrent.Folder = common.RemoveExtension(data.OriginalFilename)
|
|
||||||
if status == "error" || status == "dead" || status == "magnet_error" {
|
|
||||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
|
||||||
} else if status == "waiting_files_selection" {
|
|
||||||
files := make([]TorrentFile, 0)
|
|
||||||
for _, f := range data.Files {
|
|
||||||
name := f.Path
|
|
||||||
if !common.RegexMatch(common.VIDEOMATCH, name) && !common.RegexMatch(common.SUBMATCH, name) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fileId := f.ID
|
|
||||||
file := &TorrentFile{
|
|
||||||
Name: name,
|
|
||||||
Path: filepath.Join(torrent.Folder, name),
|
|
||||||
Size: int64(f.Bytes),
|
|
||||||
Id: strconv.Itoa(fileId),
|
|
||||||
}
|
|
||||||
files = append(files, *file)
|
|
||||||
}
|
|
||||||
torrent.Files = files
|
|
||||||
if len(files) == 0 {
|
|
||||||
return torrent, fmt.Errorf("no video files found")
|
|
||||||
}
|
|
||||||
filesId := make([]string, 0)
|
|
||||||
for _, f := range files {
|
|
||||||
filesId = append(filesId, f.Id)
|
|
||||||
}
|
|
||||||
p := gourl.Values{
|
|
||||||
"files": {strings.Join(filesId, ",")},
|
|
||||||
}
|
|
||||||
payload := strings.NewReader(p.Encode())
|
|
||||||
_, err = r.client.MakeRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, torrent.Id), payload)
|
|
||||||
if err != nil {
|
|
||||||
return torrent, err
|
|
||||||
}
|
|
||||||
} else if status == "downloaded" {
|
|
||||||
log.Printf("Torrent: %s downloaded\n", torrent.Name)
|
|
||||||
err = r.DownloadLink(torrent)
|
|
||||||
if err != nil {
|
|
||||||
return torrent, err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
} else if status == "downloading" {
|
|
||||||
return torrent, fmt.Errorf("torrent is uncached")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return torrent, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RealDebrid) DownloadLink(torrent *Torrent) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRealDebrid(dc common.DebridConfig) *RealDebrid {
|
|
||||||
rl := common.ParseRateLimit(dc.RateLimit)
|
|
||||||
headers := map[string]string{
|
|
||||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
|
||||||
}
|
|
||||||
client := common.NewRLHTTPClient(rl, headers)
|
|
||||||
return &RealDebrid{
|
|
||||||
Host: dc.Host,
|
|
||||||
APIKey: dc.APIKey,
|
|
||||||
DownloadUncached: dc.DownloadUncached,
|
|
||||||
client: client,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
package structs
|
|
||||||
|
|
||||||
type RealDebridAvailabilityResponse map[string]Hosters
|
|
||||||
|
|
||||||
type Hosters map[string][]FileIDs
|
|
||||||
|
|
||||||
type FileIDs map[string]FileVariant
|
|
||||||
|
|
||||||
type FileVariant struct {
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
Filesize int `json:"filesize"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type RealDebridAddMagnetSchema struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
Uri string `json:"uri"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type RealDebridTorrentInfo struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
OriginalFilename string `json:"original_filename"`
|
|
||||||
Hash string `json:"hash"`
|
|
||||||
Bytes int `json:"bytes"`
|
|
||||||
OriginalBytes int `json:"original_bytes"`
|
|
||||||
Host string `json:"host"`
|
|
||||||
Split int `json:"split"`
|
|
||||||
Progress int `json:"progress"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
Added string `json:"added"`
|
|
||||||
Files []struct {
|
|
||||||
ID int `json:"id"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
Bytes int `json:"bytes"`
|
|
||||||
Selected int `json:"selected"`
|
|
||||||
} `json:"files"`
|
|
||||||
Links []string `json:"links"`
|
|
||||||
Ended string `json:"ended,omitempty"`
|
|
||||||
Speed int `json:"speed,omitempty"`
|
|
||||||
Seeders int `json:"seeders,omitempty"`
|
|
||||||
}
|
|
||||||
@@ -1,142 +0,0 @@
|
|||||||
package debrid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"goBlack/common"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
gourl "net/url"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Arr struct {
|
|
||||||
WatchFolder string `json:"watch_folder"`
|
|
||||||
CompletedFolder string `json:"completed_folder"`
|
|
||||||
Debrid common.DebridConfig `json:"debrid"`
|
|
||||||
Token string `json:"token"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
Client *common.RLHTTPClient
|
|
||||||
}
|
|
||||||
|
|
||||||
type ArrHistorySchema struct {
|
|
||||||
Page int `json:"page"`
|
|
||||||
PageSize int `json:"pageSize"`
|
|
||||||
SortKey string `json:"sortKey"`
|
|
||||||
SortDirection string `json:"sortDirection"`
|
|
||||||
TotalRecords int `json:"totalRecords"`
|
|
||||||
Records []struct {
|
|
||||||
ID int `json:"id"`
|
|
||||||
DownloadID string `json:"downloadId"`
|
|
||||||
} `json:"records"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Torrent struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
InfoHash string `json:"info_hash"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Folder string `json:"folder"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Magnet *common.Magnet `json:"magnet"`
|
|
||||||
Files []TorrentFile `json:"files"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
|
|
||||||
Arr *Arr
|
|
||||||
}
|
|
||||||
|
|
||||||
type TorrentFile struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (arr *Arr) GetHeaders() map[string]string {
|
|
||||||
return map[string]string{
|
|
||||||
"X-Api-Key": arr.Token,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (arr *Arr) GetURL() string {
|
|
||||||
url, _ := gourl.JoinPath(arr.URL, "api/v3/")
|
|
||||||
return url
|
|
||||||
}
|
|
||||||
|
|
||||||
func getEventId(eventType string) int {
|
|
||||||
switch eventType {
|
|
||||||
case "grabbed":
|
|
||||||
return 1
|
|
||||||
case "seriesFolderDownloaded":
|
|
||||||
return 2
|
|
||||||
case "DownloadFolderImported":
|
|
||||||
return 3
|
|
||||||
case "DownloadFailed":
|
|
||||||
return 4
|
|
||||||
case "DownloadIgnored":
|
|
||||||
return 7
|
|
||||||
default:
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (arr *Arr) GetHistory(downloadId, eventType string) *ArrHistorySchema {
|
|
||||||
eventId := getEventId(eventType)
|
|
||||||
query := gourl.Values{}
|
|
||||||
if downloadId != "" {
|
|
||||||
query.Add("downloadId", downloadId)
|
|
||||||
}
|
|
||||||
if eventId != 0 {
|
|
||||||
query.Add("eventId", strconv.Itoa(eventId))
|
|
||||||
|
|
||||||
}
|
|
||||||
query.Add("pageSize", "100")
|
|
||||||
url := arr.GetURL() + "history/" + "?" + query.Encode()
|
|
||||||
resp, err := arr.Client.MakeRequest(http.MethodGet, url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var data *ArrHistorySchema
|
|
||||||
err = json.Unmarshal(resp, &data)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return data
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Torrent) Cleanup(remove bool) {
|
|
||||||
if remove {
|
|
||||||
err := os.Remove(t.Filename)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Torrent) MarkAsFailed() error {
|
|
||||||
downloadId := strings.ToUpper(t.Magnet.InfoHash)
|
|
||||||
history := t.Arr.GetHistory(downloadId, "grabbed")
|
|
||||||
if history == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
torrentId := 0
|
|
||||||
for _, record := range history.Records {
|
|
||||||
if strings.EqualFold(record.DownloadID, downloadId) {
|
|
||||||
torrentId = record.ID
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if torrentId != 0 {
|
|
||||||
url, err := gourl.JoinPath(t.Arr.GetURL(), "history/failed/", strconv.Itoa(torrentId))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = t.Arr.Client.MakeRequest(http.MethodPost, url, nil)
|
|
||||||
if err == nil {
|
|
||||||
log.Printf("Marked torrent: %s as failed", t.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
9
go.mod
9
go.mod
@@ -4,9 +4,12 @@ go 1.22
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/anacrolix/torrent v1.55.0
|
github.com/anacrolix/torrent v1.55.0
|
||||||
|
github.com/cavaliergopher/grab/v3 v3.0.1
|
||||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380
|
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380
|
||||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2
|
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2
|
||||||
github.com/fsnotify/fsnotify v1.7.0
|
github.com/go-chi/chi/v5 v5.1.0
|
||||||
|
github.com/google/uuid v1.6.0
|
||||||
|
github.com/valyala/fasthttp v1.55.0
|
||||||
github.com/valyala/fastjson v1.6.4
|
github.com/valyala/fastjson v1.6.4
|
||||||
golang.org/x/time v0.6.0
|
golang.org/x/time v0.6.0
|
||||||
)
|
)
|
||||||
@@ -14,10 +17,12 @@ require (
|
|||||||
require (
|
require (
|
||||||
github.com/anacrolix/missinggo v1.3.0 // indirect
|
github.com/anacrolix/missinggo v1.3.0 // indirect
|
||||||
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
|
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
|
||||||
|
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||||
github.com/google/go-cmp v0.6.0 // indirect
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
github.com/huandu/xstrings v1.3.2 // indirect
|
github.com/huandu/xstrings v1.3.2 // indirect
|
||||||
|
github.com/klauspost/compress v1.17.9 // indirect
|
||||||
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
golang.org/x/net v0.27.0 // indirect
|
golang.org/x/net v0.27.0 // indirect
|
||||||
golang.org/x/sys v0.22.0 // indirect
|
|
||||||
golang.org/x/text v0.16.0 // indirect
|
golang.org/x/text v0.16.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
18
go.sum
18
go.sum
@@ -35,6 +35,8 @@ github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pm
|
|||||||
github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8=
|
github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8=
|
||||||
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
|
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
|
||||||
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
|
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
|
||||||
|
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||||
|
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||||
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
@@ -44,6 +46,8 @@ github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2w
|
|||||||
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
|
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
|
||||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og=
|
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og=
|
||||||
|
github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4=
|
||||||
|
github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
@@ -62,14 +66,14 @@ github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8
|
|||||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
|
||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
|
||||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||||
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||||
github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||||
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||||
|
github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||||
|
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
@@ -100,6 +104,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
@@ -119,6 +125,8 @@ github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVY
|
|||||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||||
|
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
@@ -185,6 +193,10 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
|||||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||||
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||||
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||||
|
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||||
|
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||||
|
github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8=
|
||||||
|
github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM=
|
||||||
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
|
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
|
||||||
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||||
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||||
@@ -229,8 +241,6 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
|
||||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||||
|
|||||||
148
pkg/debrid/debrid.go
Normal file
148
pkg/debrid/debrid.go
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
|
"goBlack/common"
|
||||||
|
"log"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Service interface {
|
||||||
|
SubmitMagnet(torrent *Torrent) (*Torrent, error)
|
||||||
|
CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error)
|
||||||
|
GetDownloadLinks(torrent *Torrent) error
|
||||||
|
DeleteTorrent(torrent *Torrent)
|
||||||
|
IsAvailable(infohashes []string) map[string]bool
|
||||||
|
GetMountPath() string
|
||||||
|
GetDownloadUncached() bool
|
||||||
|
GetTorrent(id string) (*Torrent, error)
|
||||||
|
GetName() string
|
||||||
|
GetLogger() *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
type Debrid struct {
|
||||||
|
Host string `json:"host"`
|
||||||
|
APIKey string
|
||||||
|
DownloadUncached bool
|
||||||
|
client *common.RLHTTPClient
|
||||||
|
cache *common.Cache
|
||||||
|
MountPath string
|
||||||
|
logger *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDebrid(dc common.DebridConfig, cache *common.Cache) Service {
|
||||||
|
switch dc.Name {
|
||||||
|
case "realdebrid":
|
||||||
|
return NewRealDebrid(dc, cache)
|
||||||
|
default:
|
||||||
|
return NewRealDebrid(dc, cache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTorrentInfo(filePath string) (*Torrent, error) {
|
||||||
|
// Open and read the .torrent file
|
||||||
|
if filepath.Ext(filePath) == ".torrent" {
|
||||||
|
return getTorrentInfo(filePath)
|
||||||
|
} else {
|
||||||
|
return torrentFromMagnetFile(filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func torrentFromMagnetFile(filePath string) (*Torrent, error) {
|
||||||
|
magnetLink := common.OpenMagnetFile(filePath)
|
||||||
|
magnet, err := common.GetMagnetInfo(magnetLink)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
torrent := &Torrent{
|
||||||
|
InfoHash: magnet.InfoHash,
|
||||||
|
Name: magnet.Name,
|
||||||
|
Size: magnet.Size,
|
||||||
|
Magnet: magnet,
|
||||||
|
Filename: filePath,
|
||||||
|
}
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTorrentInfo(filePath string) (*Torrent, error) {
|
||||||
|
mi, err := metainfo.LoadFromFile(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hash := mi.HashInfoBytes()
|
||||||
|
infoHash := hash.HexString()
|
||||||
|
info, err := mi.UnmarshalInfo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
magnet := &common.Magnet{
|
||||||
|
InfoHash: infoHash,
|
||||||
|
Name: info.Name,
|
||||||
|
Size: info.Length,
|
||||||
|
Link: mi.Magnet(&hash, &info).String(),
|
||||||
|
}
|
||||||
|
torrent := &Torrent{
|
||||||
|
InfoHash: infoHash,
|
||||||
|
Name: info.Name,
|
||||||
|
Size: info.Length,
|
||||||
|
Magnet: magnet,
|
||||||
|
Filename: filePath,
|
||||||
|
}
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetLocalCache(infohashes []string, cache *common.Cache) ([]string, map[string]bool) {
|
||||||
|
result := make(map[string]bool)
|
||||||
|
hashes := make([]string, len(infohashes))
|
||||||
|
|
||||||
|
if len(infohashes) == 0 {
|
||||||
|
return hashes, result
|
||||||
|
}
|
||||||
|
if len(infohashes) == 1 {
|
||||||
|
if cache.Exists(infohashes[0]) {
|
||||||
|
return hashes, map[string]bool{infohashes[0]: true}
|
||||||
|
}
|
||||||
|
return infohashes, result
|
||||||
|
}
|
||||||
|
|
||||||
|
cachedHashes := cache.GetMultiple(infohashes)
|
||||||
|
for _, h := range infohashes {
|
||||||
|
_, exists := cachedHashes[h]
|
||||||
|
if !exists {
|
||||||
|
hashes = append(hashes, h)
|
||||||
|
} else {
|
||||||
|
result[h] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hashes, result
|
||||||
|
}
|
||||||
|
|
||||||
|
func ProcessQBitTorrent(d Service, magnet *common.Magnet, arr *Arr, isSymlink bool) (*Torrent, error) {
|
||||||
|
debridTorrent := &Torrent{
|
||||||
|
InfoHash: magnet.InfoHash,
|
||||||
|
Magnet: magnet,
|
||||||
|
Name: magnet.Name,
|
||||||
|
Arr: arr,
|
||||||
|
Size: magnet.Size,
|
||||||
|
}
|
||||||
|
logger := d.GetLogger()
|
||||||
|
logger.Printf("Torrent Hash: %s", debridTorrent.InfoHash)
|
||||||
|
if !d.GetDownloadUncached() {
|
||||||
|
hash, exists := d.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
|
||||||
|
if !exists || !hash {
|
||||||
|
return debridTorrent, fmt.Errorf("torrent: %s is not cached", debridTorrent.Name)
|
||||||
|
} else {
|
||||||
|
logger.Printf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debridTorrent, err := d.SubmitMagnet(debridTorrent)
|
||||||
|
if err != nil || debridTorrent.Id == "" {
|
||||||
|
logger.Printf("Error submitting magnet: %s", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return d.CheckStatus(debridTorrent, isSymlink)
|
||||||
|
}
|
||||||
287
pkg/debrid/realdebrid.go
Normal file
287
pkg/debrid/realdebrid.go
Normal file
@@ -0,0 +1,287 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid/structs"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
gourl "net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RealDebrid struct {
|
||||||
|
Host string `json:"host"`
|
||||||
|
APIKey string
|
||||||
|
DownloadUncached bool
|
||||||
|
client *common.RLHTTPClient
|
||||||
|
cache *common.Cache
|
||||||
|
MountPath string
|
||||||
|
logger *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetMountPath() string {
|
||||||
|
return r.MountPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetName() string {
|
||||||
|
return "realdebrid"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetLogger() *log.Logger {
|
||||||
|
return r.logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTorrentFiles(data structs.RealDebridTorrentInfo) []TorrentFile {
|
||||||
|
files := make([]TorrentFile, 0)
|
||||||
|
for _, f := range data.Files {
|
||||||
|
name := filepath.Base(f.Path)
|
||||||
|
if (!common.RegexMatch(common.VIDEOMATCH, name) &&
|
||||||
|
!common.RegexMatch(common.SUBMATCH, name) &&
|
||||||
|
!common.RegexMatch(common.MUSICMATCH, name)) || common.RegexMatch(common.SAMPLEMATCH, name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fileId := f.ID
|
||||||
|
file := &TorrentFile{
|
||||||
|
Name: name,
|
||||||
|
Path: name,
|
||||||
|
Size: int64(f.Bytes),
|
||||||
|
Id: strconv.Itoa(fileId),
|
||||||
|
}
|
||||||
|
files = append(files, *file)
|
||||||
|
}
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool {
|
||||||
|
// Check if the infohashes are available in the local cache
|
||||||
|
hashes, result := GetLocalCache(infohashes, r.cache)
|
||||||
|
|
||||||
|
if len(hashes) == 0 {
|
||||||
|
// Either all the infohashes are locally cached or none are
|
||||||
|
r.cache.AddMultiple(result)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Divide hashes into groups of 100
|
||||||
|
for i := 0; i < len(hashes); i += 200 {
|
||||||
|
end := i + 200
|
||||||
|
if end > len(hashes) {
|
||||||
|
end = len(hashes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter out empty strings
|
||||||
|
validHashes := make([]string, 0, end-i)
|
||||||
|
for _, hash := range hashes[i:end] {
|
||||||
|
if hash != "" {
|
||||||
|
validHashes = append(validHashes, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no valid hashes in this batch, continue to the next batch
|
||||||
|
if len(validHashes) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hashStr := strings.Join(validHashes, "/")
|
||||||
|
url := fmt.Sprintf("%s/torrents/instantAvailability/%s", r.Host, hashStr)
|
||||||
|
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("Error checking availability:", err)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
var data structs.RealDebridAvailabilityResponse
|
||||||
|
err = json.Unmarshal(resp, &data)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("Error marshalling availability:", err)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
for _, h := range hashes[i:end] {
|
||||||
|
hosters, exists := data[strings.ToLower(h)]
|
||||||
|
if exists && len(hosters.Rd) > 0 {
|
||||||
|
result[h] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.cache.AddMultiple(result) // Add the results to the cache
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
|
||||||
|
url := fmt.Sprintf("%s/torrents/addMagnet", r.Host)
|
||||||
|
payload := gourl.Values{
|
||||||
|
"magnet": {torrent.Magnet.Link},
|
||||||
|
}
|
||||||
|
var data structs.RealDebridAddMagnetSchema
|
||||||
|
resp, err := r.client.MakeRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(resp, &data)
|
||||||
|
log.Printf("Torrent: %s added with id: %s\n", torrent.Name, data.Id)
|
||||||
|
torrent.Id = data.Id
|
||||||
|
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetTorrent(id string) (*Torrent, error) {
|
||||||
|
torrent := &Torrent{}
|
||||||
|
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, id)
|
||||||
|
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
var data structs.RealDebridTorrentInfo
|
||||||
|
err = json.Unmarshal(resp, &data)
|
||||||
|
if err != nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
name := common.RemoveInvalidChars(data.OriginalFilename)
|
||||||
|
torrent.Id = id
|
||||||
|
torrent.Name = name
|
||||||
|
torrent.Bytes = data.Bytes
|
||||||
|
torrent.Folder = name
|
||||||
|
torrent.Progress = data.Progress
|
||||||
|
torrent.Status = data.Status
|
||||||
|
torrent.Speed = data.Speed
|
||||||
|
torrent.Seeders = data.Seeders
|
||||||
|
torrent.Filename = data.Filename
|
||||||
|
torrent.OriginalFilename = data.OriginalFilename
|
||||||
|
torrent.Links = data.Links
|
||||||
|
files := GetTorrentFiles(data)
|
||||||
|
torrent.Files = files
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
|
||||||
|
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, torrent.Id)
|
||||||
|
for {
|
||||||
|
resp, err := r.client.MakeRequest(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("ERROR Checking file: ", err)
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
var data structs.RealDebridTorrentInfo
|
||||||
|
err = json.Unmarshal(resp, &data)
|
||||||
|
status := data.Status
|
||||||
|
name := common.RemoveInvalidChars(data.OriginalFilename)
|
||||||
|
torrent.Name = name // Important because some magnet changes the name
|
||||||
|
torrent.Folder = name
|
||||||
|
torrent.Filename = data.Filename
|
||||||
|
torrent.OriginalFilename = data.OriginalFilename
|
||||||
|
torrent.Bytes = data.Bytes
|
||||||
|
torrent.Progress = data.Progress
|
||||||
|
torrent.Speed = data.Speed
|
||||||
|
torrent.Seeders = data.Seeders
|
||||||
|
torrent.Links = data.Links
|
||||||
|
torrent.Status = status
|
||||||
|
if status == "error" || status == "dead" || status == "magnet_error" {
|
||||||
|
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
||||||
|
} else if status == "waiting_files_selection" {
|
||||||
|
files := GetTorrentFiles(data)
|
||||||
|
torrent.Files = files
|
||||||
|
if len(files) == 0 {
|
||||||
|
return torrent, fmt.Errorf("no video files found")
|
||||||
|
}
|
||||||
|
filesId := make([]string, 0)
|
||||||
|
for _, f := range files {
|
||||||
|
filesId = append(filesId, f.Id)
|
||||||
|
}
|
||||||
|
p := gourl.Values{
|
||||||
|
"files": {strings.Join(filesId, ",")},
|
||||||
|
}
|
||||||
|
payload := strings.NewReader(p.Encode())
|
||||||
|
_, err = r.client.MakeRequest(http.MethodPost, fmt.Sprintf("%s/torrents/selectFiles/%s", r.Host, torrent.Id), payload)
|
||||||
|
if err != nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
} else if status == "downloaded" {
|
||||||
|
files := GetTorrentFiles(data)
|
||||||
|
torrent.Files = files
|
||||||
|
log.Printf("Torrent: %s downloaded to RD\n", torrent.Name)
|
||||||
|
if !isSymlink {
|
||||||
|
err = r.GetDownloadLinks(torrent)
|
||||||
|
if err != nil {
|
||||||
|
return torrent, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
} else if status == "downloading" {
|
||||||
|
if !r.DownloadUncached {
|
||||||
|
go r.DeleteTorrent(torrent)
|
||||||
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
|
}
|
||||||
|
// Break out of the loop if the torrent is downloading.
|
||||||
|
// This is necessary to prevent infinite loop since we moved to sync downloading and async processing
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return torrent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) DeleteTorrent(torrent *Torrent) {
|
||||||
|
url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrent.Id)
|
||||||
|
_, err := r.client.MakeRequest(http.MethodDelete, url, nil)
|
||||||
|
if err == nil {
|
||||||
|
r.logger.Printf("Torrent: %s deleted\n", torrent.Name)
|
||||||
|
} else {
|
||||||
|
r.logger.Printf("Error deleting torrent: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetDownloadLinks(torrent *Torrent) error {
|
||||||
|
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
||||||
|
downloadLinks := make([]TorrentDownloadLinks, 0)
|
||||||
|
for _, link := range torrent.Links {
|
||||||
|
if link == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
payload := gourl.Values{
|
||||||
|
"link": {link},
|
||||||
|
}
|
||||||
|
resp, err := r.client.MakeRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var data structs.RealDebridUnrestrictResponse
|
||||||
|
if err = json.Unmarshal(resp, &data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
download := TorrentDownloadLinks{
|
||||||
|
Link: data.Link,
|
||||||
|
Filename: data.Filename,
|
||||||
|
DownloadLink: data.Download,
|
||||||
|
}
|
||||||
|
downloadLinks = append(downloadLinks, download)
|
||||||
|
}
|
||||||
|
torrent.DownloadLinks = downloadLinks
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RealDebrid) GetDownloadUncached() bool {
|
||||||
|
return r.DownloadUncached
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
|
||||||
|
rl := common.ParseRateLimit(dc.RateLimit)
|
||||||
|
headers := map[string]string{
|
||||||
|
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||||
|
}
|
||||||
|
client := common.NewRLHTTPClient(rl, headers)
|
||||||
|
logger := common.NewLogger(dc.Name, os.Stdout)
|
||||||
|
return &RealDebrid{
|
||||||
|
Host: dc.Host,
|
||||||
|
APIKey: dc.APIKey,
|
||||||
|
DownloadUncached: dc.DownloadUncached,
|
||||||
|
client: client,
|
||||||
|
cache: cache,
|
||||||
|
MountPath: dc.Folder,
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
}
|
||||||
107
pkg/debrid/structs/realdebrid.go
Normal file
107
pkg/debrid/structs/realdebrid.go
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
package structs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RealDebridAvailabilityResponse map[string]Hoster
|
||||||
|
|
||||||
|
func (r *RealDebridAvailabilityResponse) UnmarshalJSON(data []byte) error {
|
||||||
|
// First, try to unmarshal as an object
|
||||||
|
var objectData map[string]Hoster
|
||||||
|
err := json.Unmarshal(data, &objectData)
|
||||||
|
if err == nil {
|
||||||
|
*r = objectData
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If that fails, try to unmarshal as an array
|
||||||
|
var arrayData []map[string]Hoster
|
||||||
|
err = json.Unmarshal(data, &arrayData)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to unmarshal as both object and array: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it's an array, use the first element
|
||||||
|
if len(arrayData) > 0 {
|
||||||
|
*r = arrayData[0]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it's an empty array, initialize as an empty map
|
||||||
|
*r = make(map[string]Hoster)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Hoster struct {
|
||||||
|
Rd []map[string]FileVariant `json:"rd"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Hoster) UnmarshalJSON(data []byte) error {
|
||||||
|
// Attempt to unmarshal into the expected structure (an object with an "rd" key)
|
||||||
|
type Alias Hoster
|
||||||
|
var obj Alias
|
||||||
|
if err := json.Unmarshal(data, &obj); err == nil {
|
||||||
|
*h = Hoster(obj)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If unmarshalling into an object fails, check if it's an empty array
|
||||||
|
var arr []interface{}
|
||||||
|
if err := json.Unmarshal(data, &arr); err == nil && len(arr) == 0 {
|
||||||
|
// It's an empty array; initialize with no entries
|
||||||
|
*h = Hoster{Rd: nil}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If both attempts fail, return an error
|
||||||
|
return fmt.Errorf("hoster: cannot unmarshal JSON data: %s", string(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileVariant struct {
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
Filesize int `json:"filesize"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RealDebridAddMagnetSchema struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Uri string `json:"uri"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RealDebridTorrentInfo struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
OriginalFilename string `json:"original_filename"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
Bytes int64 `json:"bytes"`
|
||||||
|
OriginalBytes int `json:"original_bytes"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
Split int `json:"split"`
|
||||||
|
Progress float64 `json:"progress"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Added string `json:"added"`
|
||||||
|
Files []struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Bytes int `json:"bytes"`
|
||||||
|
Selected int `json:"selected"`
|
||||||
|
} `json:"files"`
|
||||||
|
Links []string `json:"links"`
|
||||||
|
Ended string `json:"ended,omitempty"`
|
||||||
|
Speed int64 `json:"speed,omitempty"`
|
||||||
|
Seeders int `json:"seeders,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RealDebridUnrestrictResponse struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
MimeType string `json:"mimeType"`
|
||||||
|
Filesize int64 `json:"filesize"`
|
||||||
|
Link string `json:"link"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
Chunks int64 `json:"chunks"`
|
||||||
|
Crc int64 `json:"crc"`
|
||||||
|
Download string `json:"download"`
|
||||||
|
Streamable int `json:"streamable"`
|
||||||
|
}
|
||||||
103
pkg/debrid/torrent.go
Normal file
103
pkg/debrid/torrent.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
package debrid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"goBlack/common"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Arr struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Token string `json:"token"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ArrHistorySchema struct {
|
||||||
|
Page int `json:"page"`
|
||||||
|
PageSize int `json:"pageSize"`
|
||||||
|
SortKey string `json:"sortKey"`
|
||||||
|
SortDirection string `json:"sortDirection"`
|
||||||
|
TotalRecords int `json:"totalRecords"`
|
||||||
|
Records []struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
DownloadID string `json:"downloadId"`
|
||||||
|
} `json:"records"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Torrent struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
InfoHash string `json:"info_hash"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Folder string `json:"folder"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
OriginalFilename string `json:"original_filename"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Bytes int64 `json:"bytes"` // Size of only the files that are downloaded
|
||||||
|
Magnet *common.Magnet `json:"magnet"`
|
||||||
|
Files []TorrentFile `json:"files"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Progress float64 `json:"progress"`
|
||||||
|
Speed int64 `json:"speed"`
|
||||||
|
Seeders int `json:"seeders"`
|
||||||
|
Links []string `json:"links"`
|
||||||
|
DownloadLinks []TorrentDownloadLinks `json:"download_links"`
|
||||||
|
|
||||||
|
Debrid *Debrid
|
||||||
|
Arr *Arr
|
||||||
|
}
|
||||||
|
|
||||||
|
type TorrentDownloadLinks struct {
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
Link string `json:"link"`
|
||||||
|
DownloadLink string `json:"download_link"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) GetSymlinkFolder(parent string) string {
|
||||||
|
return filepath.Join(parent, t.Arr.Name, t.Folder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) GetMountFolder(rClonePath string) string {
|
||||||
|
pathWithNoExt := common.RemoveExtension(t.OriginalFilename)
|
||||||
|
if common.FileReady(filepath.Join(rClonePath, t.OriginalFilename)) {
|
||||||
|
return t.OriginalFilename
|
||||||
|
} else if common.FileReady(filepath.Join(rClonePath, t.Filename)) {
|
||||||
|
return t.Filename
|
||||||
|
} else if common.FileReady(filepath.Join(rClonePath, pathWithNoExt)) {
|
||||||
|
return pathWithNoExt
|
||||||
|
} else {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type TorrentFile struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEventId(eventType string) int {
|
||||||
|
switch eventType {
|
||||||
|
case "grabbed":
|
||||||
|
return 1
|
||||||
|
case "seriesFolderDownloaded":
|
||||||
|
return 2
|
||||||
|
case "DownloadFolderImported":
|
||||||
|
return 3
|
||||||
|
case "DownloadFailed":
|
||||||
|
return 4
|
||||||
|
case "DownloadIgnored":
|
||||||
|
return 7
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) Cleanup(remove bool) {
|
||||||
|
if remove {
|
||||||
|
err := os.Remove(t.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
333
pkg/proxy/proxy.go
Normal file
333
pkg/proxy/proxy.go
Normal file
@@ -0,0 +1,333 @@
|
|||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"cmp"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"github.com/elazarl/goproxy"
|
||||||
|
"github.com/elazarl/goproxy/ext/auth"
|
||||||
|
"github.com/valyala/fastjson"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RSS struct {
|
||||||
|
XMLName xml.Name `xml:"rss"`
|
||||||
|
Text string `xml:",chardata"`
|
||||||
|
Version string `xml:"version,attr"`
|
||||||
|
Atom string `xml:"atom,attr"`
|
||||||
|
Torznab string `xml:"torznab,attr"`
|
||||||
|
Channel struct {
|
||||||
|
Text string `xml:",chardata"`
|
||||||
|
Link struct {
|
||||||
|
Text string `xml:",chardata"`
|
||||||
|
Rel string `xml:"rel,attr"`
|
||||||
|
Type string `xml:"type,attr"`
|
||||||
|
} `xml:"link"`
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Items []Item `xml:"item"`
|
||||||
|
} `xml:"channel"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Item struct {
|
||||||
|
Text string `xml:",chardata"`
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Description string `xml:"description"`
|
||||||
|
GUID string `xml:"guid"`
|
||||||
|
ProwlarrIndexer struct {
|
||||||
|
Text string `xml:",chardata"`
|
||||||
|
ID string `xml:"id,attr"`
|
||||||
|
Type string `xml:"type,attr"`
|
||||||
|
} `xml:"prowlarrindexer"`
|
||||||
|
Comments string `xml:"comments"`
|
||||||
|
PubDate string `xml:"pubDate"`
|
||||||
|
Size string `xml:"size"`
|
||||||
|
Link string `xml:"link"`
|
||||||
|
Category []string `xml:"category"`
|
||||||
|
Enclosure struct {
|
||||||
|
Text string `xml:",chardata"`
|
||||||
|
URL string `xml:"url,attr"`
|
||||||
|
Length string `xml:"length,attr"`
|
||||||
|
Type string `xml:"type,attr"`
|
||||||
|
} `xml:"enclosure"`
|
||||||
|
TorznabAttrs []struct {
|
||||||
|
Text string `xml:",chardata"`
|
||||||
|
Name string `xml:"name,attr"`
|
||||||
|
Value string `xml:"value,attr"`
|
||||||
|
} `xml:"attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Proxy struct {
|
||||||
|
port string
|
||||||
|
enabled bool
|
||||||
|
debug bool
|
||||||
|
username string
|
||||||
|
password string
|
||||||
|
cachedOnly bool
|
||||||
|
debrid debrid.Service
|
||||||
|
cache *common.Cache
|
||||||
|
logger *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewProxy(config common.Config, deb debrid.Service, cache *common.Cache) *Proxy {
|
||||||
|
cfg := config.Proxy
|
||||||
|
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
|
||||||
|
return &Proxy{
|
||||||
|
port: port,
|
||||||
|
enabled: cfg.Enabled,
|
||||||
|
debug: cfg.Debug,
|
||||||
|
username: cfg.Username,
|
||||||
|
password: cfg.Password,
|
||||||
|
cachedOnly: *cfg.CachedOnly,
|
||||||
|
debrid: deb,
|
||||||
|
cache: cache,
|
||||||
|
logger: common.NewLogger("Proxy", os.Stdout),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Proxy) ProcessJSONResponse(resp *http.Response) *http.Response {
|
||||||
|
if resp == nil || resp.Body == nil {
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
err = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var par fastjson.Parser
|
||||||
|
v, err := par.ParseBytes(body)
|
||||||
|
if err != nil {
|
||||||
|
// If it's not JSON, return the original response
|
||||||
|
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify the JSON
|
||||||
|
|
||||||
|
// Serialize the modified JSON back to bytes
|
||||||
|
modifiedBody := v.MarshalTo(nil)
|
||||||
|
|
||||||
|
// Set the modified body back to the response
|
||||||
|
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
||||||
|
resp.ContentLength = int64(len(modifiedBody))
|
||||||
|
resp.Header.Set("Content-Length", string(rune(len(modifiedBody))))
|
||||||
|
|
||||||
|
return resp
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Proxy) ProcessResponse(resp *http.Response) *http.Response {
|
||||||
|
if resp == nil || resp.Body == nil {
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
contentType := resp.Header.Get("Content-Type")
|
||||||
|
switch contentType {
|
||||||
|
case "application/json":
|
||||||
|
return resp // p.ProcessJSONResponse(resp)
|
||||||
|
case "application/xml":
|
||||||
|
return p.ProcessXMLResponse(resp)
|
||||||
|
case "application/rss+xml":
|
||||||
|
return p.ProcessXMLResponse(resp)
|
||||||
|
default:
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getItemsHash(items []Item) map[string]string {
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
idHashMap := sync.Map{} // Use sync.Map for concurrent access
|
||||||
|
|
||||||
|
for _, item := range items {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(item Item) {
|
||||||
|
defer wg.Done()
|
||||||
|
hash := strings.ToLower(item.getHash())
|
||||||
|
if hash != "" {
|
||||||
|
idHashMap.Store(item.GUID, hash) // Store directly into sync.Map
|
||||||
|
}
|
||||||
|
}(item)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Convert sync.Map to regular map
|
||||||
|
finalMap := make(map[string]string)
|
||||||
|
idHashMap.Range(func(key, value interface{}) bool {
|
||||||
|
finalMap[key.(string)] = value.(string)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return finalMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (item Item) getHash() string {
|
||||||
|
infohash := ""
|
||||||
|
|
||||||
|
for _, attr := range item.TorznabAttrs {
|
||||||
|
if attr.Name == "infohash" {
|
||||||
|
return attr.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(item.GUID, "magnet:?") {
|
||||||
|
magnet, err := common.GetMagnetInfo(item.GUID)
|
||||||
|
if err == nil && magnet != nil && magnet.InfoHash != "" {
|
||||||
|
return magnet.InfoHash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
magnetLink := item.Link
|
||||||
|
|
||||||
|
if magnetLink == "" {
|
||||||
|
// We can't check the availability of the torrent without a magnet link or infohash
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(magnetLink, "magnet:?") {
|
||||||
|
magnet, err := common.GetMagnetInfo(magnetLink)
|
||||||
|
if err == nil && magnet != nil && magnet.InfoHash != "" {
|
||||||
|
return magnet.InfoHash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Check Description for infohash
|
||||||
|
hash := common.ExtractInfoHash(item.Description)
|
||||||
|
if hash == "" {
|
||||||
|
// Check Title for infohash
|
||||||
|
hash = common.ExtractInfoHash(item.Comments)
|
||||||
|
}
|
||||||
|
infohash = hash
|
||||||
|
if infohash == "" {
|
||||||
|
if strings.Contains(magnetLink, "http") {
|
||||||
|
h, _ := common.GetInfohashFromURL(magnetLink)
|
||||||
|
if h != "" {
|
||||||
|
infohash = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return infohash
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Proxy) ProcessXMLResponse(resp *http.Response) *http.Response {
|
||||||
|
if resp == nil || resp.Body == nil {
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
p.logger.Println("Error reading response body:", err)
|
||||||
|
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
err = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var rss RSS
|
||||||
|
err = xml.Unmarshal(body, &rss)
|
||||||
|
if err != nil {
|
||||||
|
p.logger.Printf("Error unmarshalling XML: %v", err)
|
||||||
|
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
indexer := ""
|
||||||
|
if len(rss.Channel.Items) > 0 {
|
||||||
|
indexer = rss.Channel.Items[0].ProwlarrIndexer.Text
|
||||||
|
} else {
|
||||||
|
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 4: Extract infohash or magnet URI, manipulate data
|
||||||
|
IdsHashMap := getItemsHash(rss.Channel.Items)
|
||||||
|
hashes := make([]string, 0)
|
||||||
|
for _, hash := range IdsHashMap {
|
||||||
|
if hash != "" {
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
availableHashesMap := p.debrid.IsAvailable(hashes)
|
||||||
|
newItems := make([]Item, 0, len(rss.Channel.Items))
|
||||||
|
|
||||||
|
if len(hashes) > 0 {
|
||||||
|
for _, item := range rss.Channel.Items {
|
||||||
|
hash := IdsHashMap[item.GUID]
|
||||||
|
if hash == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
isCached, exists := availableHashesMap[hash]
|
||||||
|
if !exists || !isCached {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newItems = append(newItems, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(newItems) > 0 {
|
||||||
|
p.logger.Printf("[%s Report]: %d/%d items are cached || Found %d infohash", indexer, len(newItems), len(rss.Channel.Items), len(hashes))
|
||||||
|
} else {
|
||||||
|
// This will prevent the indexer from being disabled by the arr
|
||||||
|
p.logger.Printf("[%s Report]: No Items are cached; Return only first item with [UnCached]", indexer)
|
||||||
|
item := rss.Channel.Items[0]
|
||||||
|
item.Title = fmt.Sprintf("%s [UnCached]", item.Title)
|
||||||
|
newItems = append(newItems, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
rss.Channel.Items = newItems
|
||||||
|
modifiedBody, err := xml.MarshalIndent(rss, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
p.logger.Printf("Error marshalling XML: %v", err)
|
||||||
|
resp.Body = io.NopCloser(bytes.NewReader(body))
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
modifiedBody = append([]byte(xml.Header), modifiedBody...)
|
||||||
|
|
||||||
|
// Set the modified body back to the response
|
||||||
|
resp.Body = io.NopCloser(bytes.NewReader(modifiedBody))
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func UrlMatches(re *regexp.Regexp) goproxy.ReqConditionFunc {
|
||||||
|
return func(req *http.Request, ctx *goproxy.ProxyCtx) bool {
|
||||||
|
return re.MatchString(req.URL.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Proxy) Start() {
|
||||||
|
username, password := p.username, p.password
|
||||||
|
proxy := goproxy.NewProxyHttpServer()
|
||||||
|
if username != "" || password != "" {
|
||||||
|
// Set up basic auth for proxy
|
||||||
|
auth.ProxyBasic(proxy, "my_realm", func(user, pwd string) bool {
|
||||||
|
return user == username && password == pwd
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy.OnRequest(goproxy.ReqHostMatches(regexp.MustCompile("^.443$"))).HandleConnect(goproxy.AlwaysMitm)
|
||||||
|
proxy.OnResponse(
|
||||||
|
UrlMatches(regexp.MustCompile("^.*/api\\?t=(search|tvsearch|movie)(&.*)?$")),
|
||||||
|
goproxy.StatusCodeIs(http.StatusOK, http.StatusAccepted)).DoFunc(
|
||||||
|
func(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {
|
||||||
|
return p.ProcessResponse(resp)
|
||||||
|
})
|
||||||
|
|
||||||
|
proxy.Verbose = p.debug
|
||||||
|
portFmt := fmt.Sprintf(":%s", p.port)
|
||||||
|
p.logger.Printf("[*] Starting proxy server on %s\n", portFmt)
|
||||||
|
p.logger.Fatal(http.ListenAndServe(fmt.Sprintf("%s", portFmt), proxy))
|
||||||
|
}
|
||||||
103
pkg/qbit/arr.go
Normal file
103
pkg/qbit/arr.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"cmp"
|
||||||
|
"encoding/json"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"net/http"
|
||||||
|
gourl "net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *QBit) RefreshArr(arr *debrid.Arr) {
|
||||||
|
if arr.Token == "" || arr.Host == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
url, err := common.JoinURL(arr.Host, "api/v3/command")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
payload := map[string]string{"name": "RefreshMonitoredDownloads"}
|
||||||
|
jsonPayload, err := json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &http.Client{}
|
||||||
|
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("X-Api-Key", arr.Token)
|
||||||
|
|
||||||
|
resp, reqErr := client.Do(req)
|
||||||
|
if reqErr == nil {
|
||||||
|
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
|
||||||
|
if statusOk {
|
||||||
|
if q.debug {
|
||||||
|
q.logger.Printf("Refreshed monitored downloads for %s", cmp.Or(arr.Name, arr.Host))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if reqErr != nil {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) GetArrHistory(arr *debrid.Arr, downloadId, eventType string) *debrid.ArrHistorySchema {
|
||||||
|
query := gourl.Values{}
|
||||||
|
if downloadId != "" {
|
||||||
|
query.Add("downloadId", downloadId)
|
||||||
|
}
|
||||||
|
query.Add("eventType", eventType)
|
||||||
|
query.Add("pageSize", "100")
|
||||||
|
url, _ := common.JoinURL(arr.Host, "history")
|
||||||
|
url += "?" + query.Encode()
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var data *debrid.ArrHistorySchema
|
||||||
|
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) MarkArrAsFailed(torrent *Torrent, arr *debrid.Arr) error {
|
||||||
|
downloadId := strings.ToUpper(torrent.Hash)
|
||||||
|
history := q.GetArrHistory(arr, downloadId, "grabbed")
|
||||||
|
if history == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
torrentId := 0
|
||||||
|
for _, record := range history.Records {
|
||||||
|
if strings.EqualFold(record.DownloadID, downloadId) {
|
||||||
|
torrentId = record.ID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if torrentId != 0 {
|
||||||
|
url, err := common.JoinURL(arr.Host, "history/failed/", strconv.Itoa(torrentId))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
client := &http.Client{}
|
||||||
|
_, err = client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
q.logger.Printf("Marked torrent: %s as failed", torrent.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
134
pkg/qbit/downloader.go
Normal file
134
pkg/qbit/downloader.go
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"goBlack/pkg/qbit/downloaders"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *QBit) processManualFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr) {
|
||||||
|
q.logger.Printf("Downloading %d files...", len(debridTorrent.DownloadLinks))
|
||||||
|
torrentPath := common.RemoveExtension(debridTorrent.OriginalFilename)
|
||||||
|
parent := common.RemoveInvalidChars(filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath))
|
||||||
|
err := os.MkdirAll(parent, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Failed to create directory: %s\n", parent)
|
||||||
|
q.MarkAsFailed(torrent)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
torrent.TorrentPath = torrentPath
|
||||||
|
q.downloadFiles(debridTorrent, parent)
|
||||||
|
q.UpdateTorrent(torrent, debridTorrent)
|
||||||
|
q.RefreshArr(arr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) downloadFiles(debridTorrent *debrid.Torrent, parent string) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
semaphore := make(chan struct{}, 5)
|
||||||
|
client := downloaders.GetHTTPClient()
|
||||||
|
for _, link := range debridTorrent.DownloadLinks {
|
||||||
|
if link.DownloadLink == "" {
|
||||||
|
q.logger.Printf("No download link found for %s\n", link.Filename)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
wg.Add(1)
|
||||||
|
semaphore <- struct{}{}
|
||||||
|
go func(link debrid.TorrentDownloadLinks) {
|
||||||
|
defer wg.Done()
|
||||||
|
defer func() { <-semaphore }()
|
||||||
|
err := downloaders.NormalHTTP(client, link.DownloadLink, filepath.Join(parent, link.Filename))
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Error downloading %s: %v\n", link.DownloadLink, err)
|
||||||
|
} else {
|
||||||
|
q.logger.Printf("Downloaded %s successfully\n", link.DownloadLink)
|
||||||
|
}
|
||||||
|
}(link)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
q.logger.Printf("Downloaded all files for %s\n", debridTorrent.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) processSymlink(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
files := debridTorrent.Files
|
||||||
|
ready := make(chan debrid.TorrentFile, len(files))
|
||||||
|
|
||||||
|
q.logger.Printf("Checking %d files...", len(files))
|
||||||
|
rCloneBase := q.debrid.GetMountPath()
|
||||||
|
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
|
||||||
|
if err != nil {
|
||||||
|
q.MarkAsFailed(torrent)
|
||||||
|
q.logger.Printf("Error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath) // /mnt/symlinks/{category}/MyTVShow/
|
||||||
|
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Failed to create directory: %s\n", torrentSymlinkPath)
|
||||||
|
q.MarkAsFailed(torrent)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
torrentRclonePath := filepath.Join(rCloneBase, torrentPath)
|
||||||
|
for _, file := range files {
|
||||||
|
wg.Add(1)
|
||||||
|
go checkFileLoop(&wg, torrentRclonePath, file, ready)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(ready)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for f := range ready {
|
||||||
|
q.logger.Println("File is ready:", f.Path)
|
||||||
|
q.createSymLink(torrentSymlinkPath, torrentRclonePath, f)
|
||||||
|
}
|
||||||
|
// Update the torrent when all files are ready
|
||||||
|
torrent.TorrentPath = filepath.Base(torrentPath) // Quite important
|
||||||
|
q.UpdateTorrent(torrent, debridTorrent)
|
||||||
|
q.RefreshArr(arr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
|
||||||
|
pathChan := make(chan string)
|
||||||
|
errChan := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
torrentPath := debridTorrent.GetMountFolder(rclonePath)
|
||||||
|
if torrentPath != "" {
|
||||||
|
pathChan <- torrentPath
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case path := <-pathChan:
|
||||||
|
return path, nil
|
||||||
|
case err := <-errChan:
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) createSymLink(path string, torrentMountPath string, file debrid.TorrentFile) {
|
||||||
|
|
||||||
|
// Combine the directory and filename to form a full path
|
||||||
|
fullPath := filepath.Join(path, file.Name) // /mnt/symlinks/{category}/MyTVShow/MyTVShow.S01E01.720p.mkv
|
||||||
|
// Create a symbolic link if file doesn't exist
|
||||||
|
torrentFilePath := filepath.Join(torrentMountPath, file.Name) // debridFolder/MyTVShow/MyTVShow.S01E01.720p.mkv
|
||||||
|
err := os.Symlink(torrentFilePath, fullPath)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Failed to create symlink: %s\n", fullPath)
|
||||||
|
}
|
||||||
|
// Check if the file exists
|
||||||
|
if !common.FileReady(fullPath) {
|
||||||
|
q.logger.Printf("Symlink not ready: %s\n", fullPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
59
pkg/qbit/downloaders/fasthttp.go
Normal file
59
pkg/qbit/downloaders/fasthttp.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
package downloaders
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetFastHTTPClient() *fasthttp.Client {
|
||||||
|
return &fasthttp.Client{
|
||||||
|
TLSConfig: &tls.Config{InsecureSkipVerify: true},
|
||||||
|
StreamResponseBody: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NormalFastHTTP(client *fasthttp.Client, url, filename string) error {
|
||||||
|
req := fasthttp.AcquireRequest()
|
||||||
|
resp := fasthttp.AcquireResponse()
|
||||||
|
defer fasthttp.ReleaseRequest(req)
|
||||||
|
defer fasthttp.ReleaseResponse(resp)
|
||||||
|
|
||||||
|
req.SetRequestURI(url)
|
||||||
|
req.Header.SetMethod(fasthttp.MethodGet)
|
||||||
|
|
||||||
|
if err := client.Do(req, resp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the response status code
|
||||||
|
if resp.StatusCode() != fasthttp.StatusOK {
|
||||||
|
return fmt.Errorf("unexpected status code: %d", resp.StatusCode())
|
||||||
|
}
|
||||||
|
file, err := os.Create(filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func(file *os.File) {
|
||||||
|
err := file.Close()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error closing file:", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(file)
|
||||||
|
bodyStream := resp.BodyStream()
|
||||||
|
if bodyStream == nil {
|
||||||
|
// Write to memory and then to file
|
||||||
|
_, err := file.Write(resp.Body())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if _, err := io.Copy(file, bodyStream); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
55
pkg/qbit/downloaders/grab.go
Normal file
55
pkg/qbit/downloaders/grab.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package downloaders
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"github.com/cavaliergopher/grab/v3"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetGrabClient() *grab.Client {
|
||||||
|
tr := &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
}
|
||||||
|
return &grab.Client{
|
||||||
|
UserAgent: "qBitTorrent",
|
||||||
|
HTTPClient: &http.Client{
|
||||||
|
Transport: tr,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NormalGrab(client *grab.Client, url, filename string) error {
|
||||||
|
req, err := grab.NewRequest(filename, url)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp := client.Do(req)
|
||||||
|
if err := resp.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t := time.NewTicker(2 * time.Second)
|
||||||
|
defer t.Stop()
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-t.C:
|
||||||
|
fmt.Printf(" %s: transferred %d / %d bytes (%.2f%%)\n",
|
||||||
|
resp.Filename,
|
||||||
|
resp.BytesComplete(),
|
||||||
|
resp.Size,
|
||||||
|
100*resp.Progress())
|
||||||
|
|
||||||
|
case <-resp.Done:
|
||||||
|
// download is complete
|
||||||
|
break Loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := resp.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
44
pkg/qbit/downloaders/http.go
Normal file
44
pkg/qbit/downloaders/http.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package downloaders
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetHTTPClient() *http.Client {
|
||||||
|
tr := &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||||
|
}
|
||||||
|
return &http.Client{Transport: tr}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NormalHTTP(client *http.Client, url, filename string) error {
|
||||||
|
file, err := os.Create(filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// Send the HTTP GET request
|
||||||
|
resp, err := client.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error downloading file:", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Check server response
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("server returned non-200 status: %d %s", resp.StatusCode, resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the response body to file
|
||||||
|
_, err = io.Copy(file, resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
40
pkg/qbit/handlers.go
Normal file
40
pkg/qbit/handlers.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *QBit) AddRoutes(r chi.Router) http.Handler {
|
||||||
|
r.Route("/api/v2", func(r chi.Router) {
|
||||||
|
r.Post("/auth/login", q.handleLogin)
|
||||||
|
|
||||||
|
r.Group(func(r chi.Router) {
|
||||||
|
//r.Use(q.authMiddleware)
|
||||||
|
r.Use(q.authContext)
|
||||||
|
r.Route("/torrents", func(r chi.Router) {
|
||||||
|
r.Use(HashesCtx)
|
||||||
|
r.Get("/info", q.handleTorrentsInfo)
|
||||||
|
r.Post("/add", q.handleTorrentsAdd)
|
||||||
|
r.Post("/delete", q.handleTorrentsDelete)
|
||||||
|
r.Get("/categories", q.handleCategories)
|
||||||
|
r.Post("/createCategory", q.handleCreateCategory)
|
||||||
|
|
||||||
|
r.Get("/pause", q.handleTorrentsPause)
|
||||||
|
r.Get("/resume", q.handleTorrentsResume)
|
||||||
|
r.Get("/recheck", q.handleTorrentRecheck)
|
||||||
|
r.Get("/properties", q.handleTorrentProperties)
|
||||||
|
})
|
||||||
|
|
||||||
|
r.Route("/app", func(r chi.Router) {
|
||||||
|
r.Get("/version", q.handleVersion)
|
||||||
|
r.Get("/webapiVersion", q.handleWebAPIVersion)
|
||||||
|
r.Get("/preferences", q.handlePreferences)
|
||||||
|
r.Get("/buildInfo", q.handleBuildInfo)
|
||||||
|
r.Get("/shutdown", q.shutdown)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
return r
|
||||||
|
}
|
||||||
40
pkg/qbit/handlers_app.go
Normal file
40
pkg/qbit/handlers_app.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *QBit) handleVersion(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_, _ = w.Write([]byte("v4.3.2"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_, _ = w.Write([]byte("2.7"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) {
|
||||||
|
preferences := NewAppPreferences()
|
||||||
|
|
||||||
|
preferences.WebUiUsername = q.Username
|
||||||
|
preferences.SavePath = q.DownloadFolder
|
||||||
|
preferences.TempPath = filepath.Join(q.DownloadFolder, "temp")
|
||||||
|
|
||||||
|
JSONResponse(w, preferences, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
|
||||||
|
res := BuildInfo{
|
||||||
|
Bitness: 64,
|
||||||
|
Boost: "1.75.0",
|
||||||
|
Libtorrent: "1.2.11.0",
|
||||||
|
Openssl: "1.1.1i",
|
||||||
|
Qt: "5.15.2",
|
||||||
|
Zlib: "1.2.11",
|
||||||
|
}
|
||||||
|
JSONResponse(w, res, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) shutdown(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
10
pkg/qbit/handlers_auth.go
Normal file
10
pkg/qbit/handlers_auth.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte("Ok."))
|
||||||
|
}
|
||||||
165
pkg/qbit/handlers_torrent.go
Normal file
165
pkg/qbit/handlers_torrent.go
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
|
||||||
|
//log all url params
|
||||||
|
ctx := r.Context()
|
||||||
|
category := strings.Trim(r.URL.Query().Get("category"), "")
|
||||||
|
filter := strings.Trim(r.URL.Query().Get("filter"), "")
|
||||||
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
|
torrents := q.storage.GetAll(category, filter, hashes)
|
||||||
|
JSONResponse(w, torrents, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
contentType := strings.Split(r.Header.Get("Content-Type"), ";")[0]
|
||||||
|
switch contentType {
|
||||||
|
case "multipart/form-data":
|
||||||
|
err := r.ParseMultipartForm(32 << 20) // 32MB max memory
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Error parsing form: %v\n", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case "application/x-www-form-urlencoded":
|
||||||
|
err := r.ParseForm()
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Error parsing form: %v\n", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
|
||||||
|
q.logger.Printf("isSymlink: %v\n", isSymlink)
|
||||||
|
urls := r.FormValue("urls")
|
||||||
|
category := r.FormValue("category")
|
||||||
|
|
||||||
|
var urlList []string
|
||||||
|
if urls != "" {
|
||||||
|
urlList = strings.Split(urls, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
|
||||||
|
|
||||||
|
for _, url := range urlList {
|
||||||
|
if err := q.AddMagnet(ctx, url, category); err != nil {
|
||||||
|
q.logger.Printf("Error adding magnet: %v\n", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if contentType == "multipart/form-data" {
|
||||||
|
files := r.MultipartForm.File["torrents"]
|
||||||
|
for _, fileHeader := range files {
|
||||||
|
if err := q.AddTorrent(ctx, fileHeader, category); err != nil {
|
||||||
|
q.logger.Printf("Error adding torrent: %v\n", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
|
if len(hashes) == 0 {
|
||||||
|
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, hash := range hashes {
|
||||||
|
q.storage.Delete(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
|
for _, hash := range hashes {
|
||||||
|
torrent := q.storage.Get(hash)
|
||||||
|
if torrent == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
go q.PauseTorrent(torrent)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
|
for _, hash := range hashes {
|
||||||
|
torrent := q.storage.Get(hash)
|
||||||
|
if torrent == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
go q.ResumeTorrent(torrent)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
|
for _, hash := range hashes {
|
||||||
|
torrent := q.storage.Get(hash)
|
||||||
|
if torrent == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
go q.RefreshTorrent(torrent)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handleCategories(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var categories = map[string]TorrentCategory{}
|
||||||
|
for _, cat := range q.Categories {
|
||||||
|
path := filepath.Join(q.DownloadFolder, cat)
|
||||||
|
categories[cat] = TorrentCategory{
|
||||||
|
Name: cat,
|
||||||
|
SavePath: path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
JSONResponse(w, categories, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
|
||||||
|
err := r.ParseForm()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
name := r.Form.Get("category")
|
||||||
|
if name == "" {
|
||||||
|
http.Error(w, "No name provided", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
q.Categories = append(q.Categories, name)
|
||||||
|
|
||||||
|
JSONResponse(w, nil, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
|
||||||
|
hash := r.URL.Query().Get("hash")
|
||||||
|
torrent := q.storage.Get(hash)
|
||||||
|
properties := q.GetTorrentProperties(torrent)
|
||||||
|
JSONResponse(w, properties, http.StatusOK)
|
||||||
|
}
|
||||||
80
pkg/qbit/main.go
Normal file
80
pkg/qbit/main.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"github.com/go-chi/chi/v5/middleware"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WorkerType struct {
|
||||||
|
ticker *time.Ticker
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
type Worker struct {
|
||||||
|
types map[string]WorkerType
|
||||||
|
}
|
||||||
|
|
||||||
|
type QBit struct {
|
||||||
|
Username string `json:"username"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
Port string `json:"port"`
|
||||||
|
DownloadFolder string `json:"download_folder"`
|
||||||
|
Categories []string `json:"categories"`
|
||||||
|
debrid debrid.Service
|
||||||
|
cache *common.Cache
|
||||||
|
storage *TorrentStorage
|
||||||
|
debug bool
|
||||||
|
logger *log.Logger
|
||||||
|
arrs sync.Map // host:token (Used for refreshing in worker)
|
||||||
|
RefreshInterval int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewQBit(config *common.Config, deb debrid.Service, cache *common.Cache) *QBit {
|
||||||
|
cfg := config.QBitTorrent
|
||||||
|
storage := NewTorrentStorage("torrents.json")
|
||||||
|
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8182")
|
||||||
|
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
|
||||||
|
return &QBit{
|
||||||
|
Username: cfg.Username,
|
||||||
|
Password: cfg.Password,
|
||||||
|
Port: port,
|
||||||
|
DownloadFolder: cfg.DownloadFolder,
|
||||||
|
Categories: cfg.Categories,
|
||||||
|
debrid: deb,
|
||||||
|
cache: cache,
|
||||||
|
debug: cfg.Debug,
|
||||||
|
storage: storage,
|
||||||
|
logger: common.NewLogger("QBit", os.Stdout),
|
||||||
|
arrs: sync.Map{},
|
||||||
|
RefreshInterval: refreshInterval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) Start() {
|
||||||
|
|
||||||
|
r := chi.NewRouter()
|
||||||
|
if q.debug {
|
||||||
|
r.Use(middleware.Logger)
|
||||||
|
}
|
||||||
|
r.Use(middleware.Recoverer)
|
||||||
|
|
||||||
|
q.AddRoutes(r)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
go q.StartWorker(ctx)
|
||||||
|
|
||||||
|
q.logger.Printf("Starting QBit server on :%s", q.Port)
|
||||||
|
port := fmt.Sprintf(":%s", q.Port)
|
||||||
|
q.logger.Fatal(http.ListenAndServe(port, r))
|
||||||
|
}
|
||||||
78
pkg/qbit/middleware.go
Normal file
78
pkg/qbit/middleware.go
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/subtle"
|
||||||
|
"encoding/base64"
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *QBit) authMiddleware(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
user, pass, ok := r.BasicAuth()
|
||||||
|
if !ok {
|
||||||
|
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if subtle.ConstantTimeCompare([]byte(user), []byte(q.Username)) != 1 || subtle.ConstantTimeCompare([]byte(pass), []byte(q.Password)) != 1 {
|
||||||
|
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func DecodeAuthHeader(header string) (string, string, error) {
|
||||||
|
encodedTokens := strings.Split(header, " ")
|
||||||
|
if len(encodedTokens) != 2 {
|
||||||
|
return "", "", nil
|
||||||
|
}
|
||||||
|
encodedToken := encodedTokens[1]
|
||||||
|
|
||||||
|
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
bearer := string(bytes)
|
||||||
|
|
||||||
|
colonIndex := strings.LastIndex(bearer, ":")
|
||||||
|
host := bearer[:colonIndex]
|
||||||
|
token := bearer[colonIndex+1:]
|
||||||
|
|
||||||
|
return host, token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) authContext(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
host, token, err := DecodeAuthHeader(r.Header.Get("Authorization"))
|
||||||
|
ctx := r.Context()
|
||||||
|
if err == nil {
|
||||||
|
ctx = context.WithValue(r.Context(), "host", host)
|
||||||
|
ctx = context.WithValue(ctx, "token", token)
|
||||||
|
q.arrs.Store(host, token)
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func HashesCtx(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_hashes := chi.URLParam(r, "hashes")
|
||||||
|
var hashes []string
|
||||||
|
if _hashes != "" {
|
||||||
|
hashes = strings.Split(_hashes, "|")
|
||||||
|
}
|
||||||
|
if hashes == nil {
|
||||||
|
// Get hashes from form
|
||||||
|
_ = r.ParseForm()
|
||||||
|
hashes = r.Form["hashes"]
|
||||||
|
}
|
||||||
|
ctx := context.WithValue(r.Context(), "hashes", hashes)
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
116
pkg/qbit/qbit.go
Normal file
116
pkg/qbit/qbit.go
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
|
||||||
|
magnet, err := common.GetMagnetFromUrl(url)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Error parsing magnet link: %v\n", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = q.Process(ctx, magnet, category)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Println("Failed to process magnet:", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
|
||||||
|
file, _ := fileHeader.Open()
|
||||||
|
defer file.Close()
|
||||||
|
var reader io.Reader = file
|
||||||
|
magnet, err := common.GetMagnetFromFile(reader, fileHeader.Filename)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Error reading file: %s", fileHeader.Filename)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = q.Process(ctx, magnet, category)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Println("Failed to process torrent:", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category string) error {
|
||||||
|
torrent := q.CreateTorrentFromMagnet(magnet, category)
|
||||||
|
arr := &debrid.Arr{
|
||||||
|
Name: category,
|
||||||
|
Token: ctx.Value("token").(string),
|
||||||
|
Host: ctx.Value("host").(string),
|
||||||
|
}
|
||||||
|
isSymlink := ctx.Value("isSymlink").(bool)
|
||||||
|
debridTorrent, err := debrid.ProcessQBitTorrent(q.debrid, magnet, arr, isSymlink)
|
||||||
|
if err != nil || debridTorrent == nil {
|
||||||
|
if err == nil {
|
||||||
|
err = fmt.Errorf("failed to process torrent")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
torrent.ID = debridTorrent.Id
|
||||||
|
torrent.DebridTorrent = debridTorrent
|
||||||
|
torrent.Name = debridTorrent.Name
|
||||||
|
q.storage.AddOrUpdate(torrent)
|
||||||
|
go q.processFiles(torrent, debridTorrent, arr, isSymlink) // We can send async for file processing not to delay the response
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) CreateTorrentFromMagnet(magnet *common.Magnet, category string) *Torrent {
|
||||||
|
torrent := &Torrent{
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Hash: strings.ToLower(magnet.InfoHash),
|
||||||
|
Name: magnet.Name,
|
||||||
|
Size: magnet.Size,
|
||||||
|
Category: category,
|
||||||
|
State: "downloading",
|
||||||
|
AddedOn: time.Now().Unix(),
|
||||||
|
MagnetUri: magnet.Link,
|
||||||
|
|
||||||
|
Tracker: "udp://tracker.opentrackr.org:1337",
|
||||||
|
UpLimit: -1,
|
||||||
|
DlLimit: -1,
|
||||||
|
FlPiecePrio: false,
|
||||||
|
ForceStart: false,
|
||||||
|
AutoTmm: false,
|
||||||
|
Availability: 2,
|
||||||
|
MaxRatio: -1,
|
||||||
|
MaxSeedingTime: -1,
|
||||||
|
NumComplete: 10,
|
||||||
|
NumIncomplete: 0,
|
||||||
|
NumLeechs: 1,
|
||||||
|
Ratio: 1,
|
||||||
|
RatioLimit: 1,
|
||||||
|
}
|
||||||
|
return torrent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) processFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr, isSymlink bool) {
|
||||||
|
for debridTorrent.Status != "downloaded" {
|
||||||
|
progress := debridTorrent.Progress
|
||||||
|
q.logger.Printf("Progress: %.2f%%", progress)
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
dbT, err := q.debrid.CheckStatus(debridTorrent, isSymlink)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Error checking status: %v", err)
|
||||||
|
q.MarkAsFailed(torrent)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
debridTorrent = dbT
|
||||||
|
}
|
||||||
|
if isSymlink {
|
||||||
|
q.processSymlink(torrent, debridTorrent, arr)
|
||||||
|
} else {
|
||||||
|
q.processManualFiles(torrent, debridTorrent, arr)
|
||||||
|
}
|
||||||
|
}
|
||||||
137
pkg/qbit/storage.go
Normal file
137
pkg/qbit/storage.go
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TorrentStorage struct {
|
||||||
|
torrents map[string]*Torrent
|
||||||
|
mu sync.RWMutex
|
||||||
|
order []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadTorrentsFromJSON(filename string) (map[string]*Torrent, error) {
|
||||||
|
data, err := os.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
torrents := make(map[string]*Torrent)
|
||||||
|
if err := json.Unmarshal(data, &torrents); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return torrents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTorrentStorage(filename string) *TorrentStorage {
|
||||||
|
// Open the json file and read the data
|
||||||
|
torrents, err := loadTorrentsFromJSON(filename)
|
||||||
|
if err != nil {
|
||||||
|
torrents = make(map[string]*Torrent)
|
||||||
|
}
|
||||||
|
order := make([]string, 0, len(torrents))
|
||||||
|
for id := range torrents {
|
||||||
|
order = append(order, id)
|
||||||
|
}
|
||||||
|
// Create a new TorrentStorage
|
||||||
|
return &TorrentStorage{
|
||||||
|
torrents: torrents,
|
||||||
|
order: order,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *TorrentStorage) Add(torrent *Torrent) {
|
||||||
|
ts.mu.Lock()
|
||||||
|
defer ts.mu.Unlock()
|
||||||
|
ts.torrents[torrent.Hash] = torrent
|
||||||
|
ts.order = append(ts.order, torrent.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *TorrentStorage) AddOrUpdate(torrent *Torrent) {
|
||||||
|
ts.mu.Lock()
|
||||||
|
defer ts.mu.Unlock()
|
||||||
|
if _, exists := ts.torrents[torrent.Hash]; !exists {
|
||||||
|
ts.order = append(ts.order, torrent.Hash)
|
||||||
|
}
|
||||||
|
ts.torrents[torrent.Hash] = torrent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *TorrentStorage) GetByID(id string) *Torrent {
|
||||||
|
ts.mu.RLock()
|
||||||
|
defer ts.mu.RUnlock()
|
||||||
|
for _, torrent := range ts.torrents {
|
||||||
|
if torrent.ID == id {
|
||||||
|
return torrent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *TorrentStorage) Get(hash string) *Torrent {
|
||||||
|
ts.mu.RLock()
|
||||||
|
defer ts.mu.RUnlock()
|
||||||
|
return ts.torrents[hash]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *TorrentStorage) GetAll(category string, filter string, hashes []string) []*Torrent {
|
||||||
|
ts.mu.RLock()
|
||||||
|
defer ts.mu.RUnlock()
|
||||||
|
torrents := make([]*Torrent, 0)
|
||||||
|
for _, id := range ts.order {
|
||||||
|
torrent := ts.torrents[id]
|
||||||
|
if category != "" && torrent.Category != category {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if filter != "" && torrent.State != filter {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
torrents = append(torrents, torrent)
|
||||||
|
}
|
||||||
|
if len(hashes) > 0 {
|
||||||
|
filtered := make([]*Torrent, 0, len(torrents))
|
||||||
|
for _, hash := range hashes {
|
||||||
|
if torrent := ts.torrents[hash]; torrent != nil {
|
||||||
|
filtered = append(filtered, torrent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
torrents = filtered
|
||||||
|
}
|
||||||
|
return torrents
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *TorrentStorage) Update(torrent *Torrent) {
|
||||||
|
ts.mu.Lock()
|
||||||
|
defer ts.mu.Unlock()
|
||||||
|
ts.torrents[torrent.Hash] = torrent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *TorrentStorage) Delete(hash string) {
|
||||||
|
ts.mu.Lock()
|
||||||
|
defer ts.mu.Unlock()
|
||||||
|
torrent, exists := ts.torrents[hash]
|
||||||
|
if !exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delete(ts.torrents, hash)
|
||||||
|
for i, id := range ts.order {
|
||||||
|
if id == hash {
|
||||||
|
ts.order = append(ts.order[:i], ts.order[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete the torrent folder
|
||||||
|
if torrent.ContentPath != "" {
|
||||||
|
os.RemoveAll(torrent.ContentPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *TorrentStorage) Save(filename string) error {
|
||||||
|
ts.mu.RLock()
|
||||||
|
defer ts.mu.RUnlock()
|
||||||
|
data, err := json.Marshal(ts.torrents)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(filename, data, 0644)
|
||||||
|
}
|
||||||
412
pkg/qbit/structs.go
Normal file
412
pkg/qbit/structs.go
Normal file
@@ -0,0 +1,412 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import "goBlack/pkg/debrid"
|
||||||
|
|
||||||
|
type BuildInfo struct {
|
||||||
|
Libtorrent string `json:"libtorrent"`
|
||||||
|
Bitness int `json:"bitness"`
|
||||||
|
Boost string `json:"boost"`
|
||||||
|
Openssl string `json:"openssl"`
|
||||||
|
Qt string `json:"qt"`
|
||||||
|
Zlib string `json:"zlib"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AppPreferences struct {
|
||||||
|
AddTrackers string `json:"add_trackers"`
|
||||||
|
AddTrackersEnabled bool `json:"add_trackers_enabled"`
|
||||||
|
AltDlLimit int64 `json:"alt_dl_limit"`
|
||||||
|
AltUpLimit int64 `json:"alt_up_limit"`
|
||||||
|
AlternativeWebuiEnabled bool `json:"alternative_webui_enabled"`
|
||||||
|
AlternativeWebuiPath string `json:"alternative_webui_path"`
|
||||||
|
AnnounceIp string `json:"announce_ip"`
|
||||||
|
AnnounceToAllTiers bool `json:"announce_to_all_tiers"`
|
||||||
|
AnnounceToAllTrackers bool `json:"announce_to_all_trackers"`
|
||||||
|
AnonymousMode bool `json:"anonymous_mode"`
|
||||||
|
AsyncIoThreads int64 `json:"async_io_threads"`
|
||||||
|
AutoDeleteMode int64 `json:"auto_delete_mode"`
|
||||||
|
AutoTmmEnabled bool `json:"auto_tmm_enabled"`
|
||||||
|
AutorunEnabled bool `json:"autorun_enabled"`
|
||||||
|
AutorunProgram string `json:"autorun_program"`
|
||||||
|
BannedIPs string `json:"banned_IPs"`
|
||||||
|
BittorrentProtocol int64 `json:"bittorrent_protocol"`
|
||||||
|
BypassAuthSubnetWhitelist string `json:"bypass_auth_subnet_whitelist"`
|
||||||
|
BypassAuthSubnetWhitelistEnabled bool `json:"bypass_auth_subnet_whitelist_enabled"`
|
||||||
|
BypassLocalAuth bool `json:"bypass_local_auth"`
|
||||||
|
CategoryChangedTmmEnabled bool `json:"category_changed_tmm_enabled"`
|
||||||
|
CheckingMemoryUse int64 `json:"checking_memory_use"`
|
||||||
|
CreateSubfolderEnabled bool `json:"create_subfolder_enabled"`
|
||||||
|
CurrentInterfaceAddress string `json:"current_interface_address"`
|
||||||
|
CurrentNetworkInterface string `json:"current_network_interface"`
|
||||||
|
Dht bool `json:"dht"`
|
||||||
|
DiskCache int64 `json:"disk_cache"`
|
||||||
|
DiskCacheTtl int64 `json:"disk_cache_ttl"`
|
||||||
|
DlLimit int64 `json:"dl_limit"`
|
||||||
|
DontCountSlowTorrents bool `json:"dont_count_slow_torrents"`
|
||||||
|
DyndnsDomain string `json:"dyndns_domain"`
|
||||||
|
DyndnsEnabled bool `json:"dyndns_enabled"`
|
||||||
|
DyndnsPassword string `json:"dyndns_password"`
|
||||||
|
DyndnsService int64 `json:"dyndns_service"`
|
||||||
|
DyndnsUsername string `json:"dyndns_username"`
|
||||||
|
EmbeddedTrackerPort int64 `json:"embedded_tracker_port"`
|
||||||
|
EnableCoalesceReadWrite bool `json:"enable_coalesce_read_write"`
|
||||||
|
EnableEmbeddedTracker bool `json:"enable_embedded_tracker"`
|
||||||
|
EnableMultiConnectionsFromSameIp bool `json:"enable_multi_connections_from_same_ip"`
|
||||||
|
EnableOsCache bool `json:"enable_os_cache"`
|
||||||
|
EnablePieceExtentAffinity bool `json:"enable_piece_extent_affinity"`
|
||||||
|
EnableSuperSeeding bool `json:"enable_super_seeding"`
|
||||||
|
EnableUploadSuggestions bool `json:"enable_upload_suggestions"`
|
||||||
|
Encryption int64 `json:"encryption"`
|
||||||
|
ExportDir string `json:"export_dir"`
|
||||||
|
ExportDirFin string `json:"export_dir_fin"`
|
||||||
|
FilePoolSize int64 `json:"file_pool_size"`
|
||||||
|
IncompleteFilesExt bool `json:"incomplete_files_ext"`
|
||||||
|
IpFilterEnabled bool `json:"ip_filter_enabled"`
|
||||||
|
IpFilterPath string `json:"ip_filter_path"`
|
||||||
|
IpFilterTrackers bool `json:"ip_filter_trackers"`
|
||||||
|
LimitLanPeers bool `json:"limit_lan_peers"`
|
||||||
|
LimitTcpOverhead bool `json:"limit_tcp_overhead"`
|
||||||
|
LimitUtpRate bool `json:"limit_utp_rate"`
|
||||||
|
ListenPort int64 `json:"listen_port"`
|
||||||
|
Locale string `json:"locale"`
|
||||||
|
Lsd bool `json:"lsd"`
|
||||||
|
MailNotificationAuthEnabled bool `json:"mail_notification_auth_enabled"`
|
||||||
|
MailNotificationEmail string `json:"mail_notification_email"`
|
||||||
|
MailNotificationEnabled bool `json:"mail_notification_enabled"`
|
||||||
|
MailNotificationPassword string `json:"mail_notification_password"`
|
||||||
|
MailNotificationSender string `json:"mail_notification_sender"`
|
||||||
|
MailNotificationSmtp string `json:"mail_notification_smtp"`
|
||||||
|
MailNotificationSslEnabled bool `json:"mail_notification_ssl_enabled"`
|
||||||
|
MailNotificationUsername string `json:"mail_notification_username"`
|
||||||
|
MaxActiveDownloads int64 `json:"max_active_downloads"`
|
||||||
|
MaxActiveTorrents int64 `json:"max_active_torrents"`
|
||||||
|
MaxActiveUploads int64 `json:"max_active_uploads"`
|
||||||
|
MaxConnec int64 `json:"max_connec"`
|
||||||
|
MaxConnecPerTorrent int64 `json:"max_connec_per_torrent"`
|
||||||
|
MaxRatio int64 `json:"max_ratio"`
|
||||||
|
MaxRatioAct int64 `json:"max_ratio_act"`
|
||||||
|
MaxRatioEnabled bool `json:"max_ratio_enabled"`
|
||||||
|
MaxSeedingTime int64 `json:"max_seeding_time"`
|
||||||
|
MaxSeedingTimeEnabled bool `json:"max_seeding_time_enabled"`
|
||||||
|
MaxUploads int64 `json:"max_uploads"`
|
||||||
|
MaxUploadsPerTorrent int64 `json:"max_uploads_per_torrent"`
|
||||||
|
OutgoingPortsMax int64 `json:"outgoing_ports_max"`
|
||||||
|
OutgoingPortsMin int64 `json:"outgoing_ports_min"`
|
||||||
|
Pex bool `json:"pex"`
|
||||||
|
PreallocateAll bool `json:"preallocate_all"`
|
||||||
|
ProxyAuthEnabled bool `json:"proxy_auth_enabled"`
|
||||||
|
ProxyIp string `json:"proxy_ip"`
|
||||||
|
ProxyPassword string `json:"proxy_password"`
|
||||||
|
ProxyPeerConnections bool `json:"proxy_peer_connections"`
|
||||||
|
ProxyPort int64 `json:"proxy_port"`
|
||||||
|
ProxyTorrentsOnly bool `json:"proxy_torrents_only"`
|
||||||
|
ProxyType int64 `json:"proxy_type"`
|
||||||
|
ProxyUsername string `json:"proxy_username"`
|
||||||
|
QueueingEnabled bool `json:"queueing_enabled"`
|
||||||
|
RandomPort bool `json:"random_port"`
|
||||||
|
RecheckCompletedTorrents bool `json:"recheck_completed_torrents"`
|
||||||
|
ResolvePeerCountries bool `json:"resolve_peer_countries"`
|
||||||
|
RssAutoDownloadingEnabled bool `json:"rss_auto_downloading_enabled"`
|
||||||
|
RssMaxArticlesPerFeed int64 `json:"rss_max_articles_per_feed"`
|
||||||
|
RssProcessingEnabled bool `json:"rss_processing_enabled"`
|
||||||
|
RssRefreshInterval int64 `json:"rss_refresh_interval"`
|
||||||
|
SavePath string `json:"save_path"`
|
||||||
|
SavePathChangedTmmEnabled bool `json:"save_path_changed_tmm_enabled"`
|
||||||
|
SaveResumeDataInterval int64 `json:"save_resume_data_interval"`
|
||||||
|
ScanDirs ScanDirs `json:"scan_dirs"`
|
||||||
|
ScheduleFromHour int64 `json:"schedule_from_hour"`
|
||||||
|
ScheduleFromMin int64 `json:"schedule_from_min"`
|
||||||
|
ScheduleToHour int64 `json:"schedule_to_hour"`
|
||||||
|
ScheduleToMin int64 `json:"schedule_to_min"`
|
||||||
|
SchedulerDays int64 `json:"scheduler_days"`
|
||||||
|
SchedulerEnabled bool `json:"scheduler_enabled"`
|
||||||
|
SendBufferLowWatermark int64 `json:"send_buffer_low_watermark"`
|
||||||
|
SendBufferWatermark int64 `json:"send_buffer_watermark"`
|
||||||
|
SendBufferWatermarkFactor int64 `json:"send_buffer_watermark_factor"`
|
||||||
|
SlowTorrentDlRateThreshold int64 `json:"slow_torrent_dl_rate_threshold"`
|
||||||
|
SlowTorrentInactiveTimer int64 `json:"slow_torrent_inactive_timer"`
|
||||||
|
SlowTorrentUlRateThreshold int64 `json:"slow_torrent_ul_rate_threshold"`
|
||||||
|
SocketBacklogSize int64 `json:"socket_backlog_size"`
|
||||||
|
StartPausedEnabled bool `json:"start_paused_enabled"`
|
||||||
|
StopTrackerTimeout int64 `json:"stop_tracker_timeout"`
|
||||||
|
TempPath string `json:"temp_path"`
|
||||||
|
TempPathEnabled bool `json:"temp_path_enabled"`
|
||||||
|
TorrentChangedTmmEnabled bool `json:"torrent_changed_tmm_enabled"`
|
||||||
|
UpLimit int64 `json:"up_limit"`
|
||||||
|
UploadChokingAlgorithm int64 `json:"upload_choking_algorithm"`
|
||||||
|
UploadSlotsBehavior int64 `json:"upload_slots_behavior"`
|
||||||
|
Upnp bool `json:"upnp"`
|
||||||
|
UpnpLeaseDuration int64 `json:"upnp_lease_duration"`
|
||||||
|
UseHttps bool `json:"use_https"`
|
||||||
|
UtpTcpMixedMode int64 `json:"utp_tcp_mixed_mode"`
|
||||||
|
WebUiAddress string `json:"web_ui_address"`
|
||||||
|
WebUiBanDuration int64 `json:"web_ui_ban_duration"`
|
||||||
|
WebUiClickjackingProtectionEnabled bool `json:"web_ui_clickjacking_protection_enabled"`
|
||||||
|
WebUiCsrfProtectionEnabled bool `json:"web_ui_csrf_protection_enabled"`
|
||||||
|
WebUiDomainList string `json:"web_ui_domain_list"`
|
||||||
|
WebUiHostHeaderValidationEnabled bool `json:"web_ui_host_header_validation_enabled"`
|
||||||
|
WebUiHttpsCertPath string `json:"web_ui_https_cert_path"`
|
||||||
|
WebUiHttpsKeyPath string `json:"web_ui_https_key_path"`
|
||||||
|
WebUiMaxAuthFailCount int64 `json:"web_ui_max_auth_fail_count"`
|
||||||
|
WebUiPort int64 `json:"web_ui_port"`
|
||||||
|
WebUiSecureCookieEnabled bool `json:"web_ui_secure_cookie_enabled"`
|
||||||
|
WebUiSessionTimeout int64 `json:"web_ui_session_timeout"`
|
||||||
|
WebUiUpnp bool `json:"web_ui_upnp"`
|
||||||
|
WebUiUsername string `json:"web_ui_username"`
|
||||||
|
WebUiPassword string `json:"web_ui_password"`
|
||||||
|
SSLKey string `json:"ssl_key"`
|
||||||
|
SSLCert string `json:"ssl_cert"`
|
||||||
|
RSSDownloadRepack string `json:"rss_download_repack_proper_episodes"`
|
||||||
|
RSSSmartEpisodeFilters string `json:"rss_smart_episode_filters"`
|
||||||
|
WebUiUseCustomHttpHeaders bool `json:"web_ui_use_custom_http_headers"`
|
||||||
|
WebUiUseCustomHttpHeadersEnabled bool `json:"web_ui_use_custom_http_headers_enabled"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScanDirs struct{}
|
||||||
|
|
||||||
|
type TorrentCategory struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
SavePath string `json:"savePath"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Torrent struct {
|
||||||
|
ID string `json:"-"`
|
||||||
|
DebridTorrent *debrid.Torrent `json:"-"`
|
||||||
|
TorrentPath string `json:"-"`
|
||||||
|
|
||||||
|
AddedOn int64 `json:"added_on,omitempty"`
|
||||||
|
AmountLeft int64 `json:"amount_left,omitempty"`
|
||||||
|
AutoTmm bool `json:"auto_tmm"`
|
||||||
|
Availability float64 `json:"availability"`
|
||||||
|
Category string `json:"category,omitempty"`
|
||||||
|
Completed int64 `json:"completed,omitempty"`
|
||||||
|
CompletionOn int64 `json:"completion_on,omitempty"`
|
||||||
|
ContentPath string `json:"content_path,omitempty"`
|
||||||
|
DlLimit int64 `json:"dl_limit,omitempty"`
|
||||||
|
Dlspeed int64 `json:"dlspeed,omitempty"`
|
||||||
|
Downloaded int64 `json:"downloaded,omitempty"`
|
||||||
|
DownloadedSession int64 `json:"downloaded_session,omitempty"`
|
||||||
|
Eta int64 `json:"eta,omitempty"`
|
||||||
|
FlPiecePrio bool `json:"f_l_piece_prio"`
|
||||||
|
ForceStart bool `json:"force_start"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
LastActivity int64 `json:"last_activity,omitempty"`
|
||||||
|
MagnetUri string `json:"magnet_uri,omitempty"`
|
||||||
|
MaxRatio int64 `json:"max_ratio,omitempty"`
|
||||||
|
MaxSeedingTime int64 `json:"max_seeding_time,omitempty"`
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
NumComplete int64 `json:"num_complete,omitempty"`
|
||||||
|
NumIncomplete int64 `json:"num_incomplete,omitempty"`
|
||||||
|
NumLeechs int64 `json:"num_leechs,omitempty"`
|
||||||
|
NumSeeds int64 `json:"num_seeds,omitempty"`
|
||||||
|
Priority int64 `json:"priority,omitempty"`
|
||||||
|
Progress float32 `json:"progress"`
|
||||||
|
Ratio int64 `json:"ratio,omitempty"`
|
||||||
|
RatioLimit int64 `json:"ratio_limit,omitempty"`
|
||||||
|
SavePath string `json:"save_path,omitempty"`
|
||||||
|
SeedingTimeLimit int64 `json:"seeding_time_limit,omitempty"`
|
||||||
|
SeenComplete int64 `json:"seen_complete,omitempty"`
|
||||||
|
SeqDl bool `json:"seq_dl"`
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
|
State string `json:"state,omitempty"`
|
||||||
|
SuperSeeding bool `json:"super_seeding"`
|
||||||
|
Tags string `json:"tags,omitempty"`
|
||||||
|
TimeActive int64 `json:"time_active,omitempty"`
|
||||||
|
TotalSize int64 `json:"total_size,omitempty"`
|
||||||
|
Tracker string `json:"tracker,omitempty"`
|
||||||
|
UpLimit int64 `json:"up_limit,omitempty"`
|
||||||
|
Uploaded int64 `json:"uploaded,omitempty"`
|
||||||
|
UploadedSession int64 `json:"uploaded_session,omitempty"`
|
||||||
|
Upspeed int64 `json:"upspeed,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) IsReady() bool {
|
||||||
|
return t.AmountLeft <= 0 && t.TorrentPath != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type TorrentProperties struct {
|
||||||
|
AdditionDate int64 `json:"addition_date,omitempty"`
|
||||||
|
Comment string `json:"comment,omitempty"`
|
||||||
|
CompletionDate int64 `json:"completion_date,omitempty"`
|
||||||
|
CreatedBy string `json:"created_by,omitempty"`
|
||||||
|
CreationDate int64 `json:"creation_date,omitempty"`
|
||||||
|
DlLimit int64 `json:"dl_limit,omitempty"`
|
||||||
|
DlSpeed int64 `json:"dl_speed,omitempty"`
|
||||||
|
DlSpeedAvg int64 `json:"dl_speed_avg,omitempty"`
|
||||||
|
Eta int64 `json:"eta,omitempty"`
|
||||||
|
LastSeen int64 `json:"last_seen,omitempty"`
|
||||||
|
NbConnections int64 `json:"nb_connections,omitempty"`
|
||||||
|
NbConnectionsLimit int64 `json:"nb_connections_limit,omitempty"`
|
||||||
|
Peers int64 `json:"peers,omitempty"`
|
||||||
|
PeersTotal int64 `json:"peers_total,omitempty"`
|
||||||
|
PieceSize int64 `json:"piece_size,omitempty"`
|
||||||
|
PiecesHave int64 `json:"pieces_have,omitempty"`
|
||||||
|
PiecesNum int64 `json:"pieces_num,omitempty"`
|
||||||
|
Reannounce int64 `json:"reannounce,omitempty"`
|
||||||
|
SavePath string `json:"save_path,omitempty"`
|
||||||
|
SeedingTime int64 `json:"seeding_time,omitempty"`
|
||||||
|
Seeds int64 `json:"seeds,omitempty"`
|
||||||
|
SeedsTotal int64 `json:"seeds_total,omitempty"`
|
||||||
|
ShareRatio int64 `json:"share_ratio,omitempty"`
|
||||||
|
TimeElapsed int64 `json:"time_elapsed,omitempty"`
|
||||||
|
TotalDownloaded int64 `json:"total_downloaded,omitempty"`
|
||||||
|
TotalDownloadedSession int64 `json:"total_downloaded_session,omitempty"`
|
||||||
|
TotalSize int64 `json:"total_size,omitempty"`
|
||||||
|
TotalUploaded int64 `json:"total_uploaded,omitempty"`
|
||||||
|
TotalUploadedSession int64 `json:"total_uploaded_session,omitempty"`
|
||||||
|
TotalWasted int64 `json:"total_wasted,omitempty"`
|
||||||
|
UpLimit int64 `json:"up_limit,omitempty"`
|
||||||
|
UpSpeed int64 `json:"up_speed,omitempty"`
|
||||||
|
UpSpeedAvg int64 `json:"up_speed_avg,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAppPreferences() *AppPreferences {
|
||||||
|
preferences := &AppPreferences{
|
||||||
|
AddTrackers: "",
|
||||||
|
AddTrackersEnabled: false,
|
||||||
|
AltDlLimit: 10240,
|
||||||
|
AltUpLimit: 10240,
|
||||||
|
AlternativeWebuiEnabled: false,
|
||||||
|
AlternativeWebuiPath: "",
|
||||||
|
AnnounceIp: "",
|
||||||
|
AnnounceToAllTiers: true,
|
||||||
|
AnnounceToAllTrackers: false,
|
||||||
|
AnonymousMode: false,
|
||||||
|
AsyncIoThreads: 4,
|
||||||
|
AutoDeleteMode: 0,
|
||||||
|
AutoTmmEnabled: false,
|
||||||
|
AutorunEnabled: false,
|
||||||
|
AutorunProgram: "",
|
||||||
|
BannedIPs: "",
|
||||||
|
BittorrentProtocol: 0,
|
||||||
|
BypassAuthSubnetWhitelist: "",
|
||||||
|
BypassAuthSubnetWhitelistEnabled: false,
|
||||||
|
BypassLocalAuth: false,
|
||||||
|
CategoryChangedTmmEnabled: false,
|
||||||
|
CheckingMemoryUse: 32,
|
||||||
|
CreateSubfolderEnabled: true,
|
||||||
|
CurrentInterfaceAddress: "",
|
||||||
|
CurrentNetworkInterface: "",
|
||||||
|
Dht: true,
|
||||||
|
DiskCache: -1,
|
||||||
|
DiskCacheTtl: 60,
|
||||||
|
DlLimit: 0,
|
||||||
|
DontCountSlowTorrents: false,
|
||||||
|
DyndnsDomain: "changeme.dyndns.org",
|
||||||
|
DyndnsEnabled: false,
|
||||||
|
DyndnsPassword: "",
|
||||||
|
DyndnsService: 0,
|
||||||
|
DyndnsUsername: "",
|
||||||
|
EmbeddedTrackerPort: 9000,
|
||||||
|
EnableCoalesceReadWrite: true,
|
||||||
|
EnableEmbeddedTracker: false,
|
||||||
|
EnableMultiConnectionsFromSameIp: false,
|
||||||
|
EnableOsCache: true,
|
||||||
|
EnablePieceExtentAffinity: false,
|
||||||
|
EnableSuperSeeding: false,
|
||||||
|
EnableUploadSuggestions: false,
|
||||||
|
Encryption: 0,
|
||||||
|
ExportDir: "",
|
||||||
|
ExportDirFin: "",
|
||||||
|
FilePoolSize: 40,
|
||||||
|
IncompleteFilesExt: false,
|
||||||
|
IpFilterEnabled: false,
|
||||||
|
IpFilterPath: "",
|
||||||
|
IpFilterTrackers: false,
|
||||||
|
LimitLanPeers: true,
|
||||||
|
LimitTcpOverhead: false,
|
||||||
|
LimitUtpRate: true,
|
||||||
|
ListenPort: 31193,
|
||||||
|
Locale: "en",
|
||||||
|
Lsd: true,
|
||||||
|
MailNotificationAuthEnabled: false,
|
||||||
|
MailNotificationEmail: "",
|
||||||
|
MailNotificationEnabled: false,
|
||||||
|
MailNotificationPassword: "",
|
||||||
|
MailNotificationSender: "qBittorrentNotification@example.com",
|
||||||
|
MailNotificationSmtp: "smtp.changeme.com",
|
||||||
|
MailNotificationSslEnabled: false,
|
||||||
|
MailNotificationUsername: "",
|
||||||
|
MaxActiveDownloads: 3,
|
||||||
|
MaxActiveTorrents: 5,
|
||||||
|
MaxActiveUploads: 3,
|
||||||
|
MaxConnec: 500,
|
||||||
|
MaxConnecPerTorrent: 100,
|
||||||
|
MaxRatio: -1,
|
||||||
|
MaxRatioAct: 0,
|
||||||
|
MaxRatioEnabled: false,
|
||||||
|
MaxSeedingTime: -1,
|
||||||
|
MaxSeedingTimeEnabled: false,
|
||||||
|
MaxUploads: -1,
|
||||||
|
MaxUploadsPerTorrent: -1,
|
||||||
|
OutgoingPortsMax: 0,
|
||||||
|
OutgoingPortsMin: 0,
|
||||||
|
Pex: true,
|
||||||
|
PreallocateAll: false,
|
||||||
|
ProxyAuthEnabled: false,
|
||||||
|
ProxyIp: "0.0.0.0",
|
||||||
|
ProxyPassword: "",
|
||||||
|
ProxyPeerConnections: false,
|
||||||
|
ProxyPort: 8080,
|
||||||
|
ProxyTorrentsOnly: false,
|
||||||
|
ProxyType: 0,
|
||||||
|
ProxyUsername: "",
|
||||||
|
QueueingEnabled: false,
|
||||||
|
RandomPort: false,
|
||||||
|
RecheckCompletedTorrents: false,
|
||||||
|
ResolvePeerCountries: true,
|
||||||
|
RssAutoDownloadingEnabled: false,
|
||||||
|
RssMaxArticlesPerFeed: 50,
|
||||||
|
RssProcessingEnabled: false,
|
||||||
|
RssRefreshInterval: 30,
|
||||||
|
SavePathChangedTmmEnabled: false,
|
||||||
|
SaveResumeDataInterval: 60,
|
||||||
|
ScanDirs: ScanDirs{},
|
||||||
|
ScheduleFromHour: 8,
|
||||||
|
ScheduleFromMin: 0,
|
||||||
|
ScheduleToHour: 20,
|
||||||
|
ScheduleToMin: 0,
|
||||||
|
SchedulerDays: 0,
|
||||||
|
SchedulerEnabled: false,
|
||||||
|
SendBufferLowWatermark: 10,
|
||||||
|
SendBufferWatermark: 500,
|
||||||
|
SendBufferWatermarkFactor: 50,
|
||||||
|
SlowTorrentDlRateThreshold: 2,
|
||||||
|
SlowTorrentInactiveTimer: 60,
|
||||||
|
SlowTorrentUlRateThreshold: 2,
|
||||||
|
SocketBacklogSize: 30,
|
||||||
|
StartPausedEnabled: false,
|
||||||
|
StopTrackerTimeout: 1,
|
||||||
|
TempPathEnabled: false,
|
||||||
|
TorrentChangedTmmEnabled: true,
|
||||||
|
UpLimit: 0,
|
||||||
|
UploadChokingAlgorithm: 1,
|
||||||
|
UploadSlotsBehavior: 0,
|
||||||
|
Upnp: true,
|
||||||
|
UpnpLeaseDuration: 0,
|
||||||
|
UseHttps: false,
|
||||||
|
UtpTcpMixedMode: 0,
|
||||||
|
WebUiAddress: "*",
|
||||||
|
WebUiBanDuration: 3600,
|
||||||
|
WebUiClickjackingProtectionEnabled: true,
|
||||||
|
WebUiCsrfProtectionEnabled: true,
|
||||||
|
WebUiDomainList: "*",
|
||||||
|
WebUiHostHeaderValidationEnabled: true,
|
||||||
|
WebUiHttpsCertPath: "",
|
||||||
|
WebUiHttpsKeyPath: "",
|
||||||
|
WebUiMaxAuthFailCount: 5,
|
||||||
|
WebUiPort: 8080,
|
||||||
|
WebUiSecureCookieEnabled: true,
|
||||||
|
WebUiSessionTimeout: 3600,
|
||||||
|
WebUiUpnp: false,
|
||||||
|
|
||||||
|
// Fields in the struct but not in the JSON (set to zero values):
|
||||||
|
WebUiPassword: "",
|
||||||
|
SSLKey: "",
|
||||||
|
SSLCert: "",
|
||||||
|
RSSDownloadRepack: "",
|
||||||
|
RSSSmartEpisodeFilters: "",
|
||||||
|
WebUiUseCustomHttpHeaders: false,
|
||||||
|
WebUiUseCustomHttpHeadersEnabled: false,
|
||||||
|
}
|
||||||
|
return preferences
|
||||||
|
}
|
||||||
125
pkg/qbit/torrent.go
Normal file
125
pkg/qbit/torrent.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// All torrent related helpers goes here
|
||||||
|
|
||||||
|
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
|
||||||
|
t.State = "error"
|
||||||
|
q.storage.AddOrUpdate(t)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
||||||
|
rcLoneMount := q.debrid.GetMountPath()
|
||||||
|
if debridTorrent == nil && t.ID != "" {
|
||||||
|
debridTorrent, _ = q.debrid.GetTorrent(t.ID)
|
||||||
|
}
|
||||||
|
if debridTorrent == nil {
|
||||||
|
q.logger.Printf("Torrent with ID %s not found in %s", t.ID, q.debrid.GetName())
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
if debridTorrent.Status != "downloaded" {
|
||||||
|
debridTorrent, _ = q.debrid.GetTorrent(t.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.TorrentPath == "" {
|
||||||
|
t.TorrentPath = filepath.Base(debridTorrent.GetMountFolder(rcLoneMount))
|
||||||
|
}
|
||||||
|
|
||||||
|
totalSize := float64(cmp.Or(debridTorrent.Bytes, 1.0))
|
||||||
|
progress := cmp.Or(debridTorrent.Progress, 100.0)
|
||||||
|
progress = progress / 100.0
|
||||||
|
var sizeCompleted int64
|
||||||
|
|
||||||
|
sizeCompleted = int64(totalSize * progress)
|
||||||
|
savePath := filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
|
||||||
|
torrentPath := filepath.Join(savePath, t.TorrentPath) + string(os.PathSeparator)
|
||||||
|
|
||||||
|
var speed int64
|
||||||
|
if debridTorrent.Speed != 0 {
|
||||||
|
speed = debridTorrent.Speed
|
||||||
|
}
|
||||||
|
var eta int64
|
||||||
|
if speed != 0 {
|
||||||
|
eta = int64((totalSize - float64(sizeCompleted)) / float64(speed))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Size = debridTorrent.Bytes
|
||||||
|
t.DebridTorrent = debridTorrent
|
||||||
|
t.Completed = sizeCompleted
|
||||||
|
t.Downloaded = sizeCompleted
|
||||||
|
t.DownloadedSession = sizeCompleted
|
||||||
|
t.Uploaded = sizeCompleted
|
||||||
|
t.UploadedSession = sizeCompleted
|
||||||
|
t.AmountLeft = int64(totalSize) - sizeCompleted
|
||||||
|
t.Progress = float32(progress)
|
||||||
|
t.SavePath = savePath
|
||||||
|
t.ContentPath = torrentPath
|
||||||
|
t.Eta = eta
|
||||||
|
t.Dlspeed = speed
|
||||||
|
t.Upspeed = speed
|
||||||
|
|
||||||
|
if t.IsReady() {
|
||||||
|
t.State = "pausedUP"
|
||||||
|
q.storage.AddOrUpdate(t)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
ticker := time.NewTicker(3 * time.Second)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
if t.IsReady() {
|
||||||
|
t.State = "pausedUP"
|
||||||
|
q.storage.AddOrUpdate(t)
|
||||||
|
ticker.Stop()
|
||||||
|
return t
|
||||||
|
} else {
|
||||||
|
return q.UpdateTorrent(t, debridTorrent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) ResumeTorrent(t *Torrent) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) PauseTorrent(t *Torrent) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) RefreshTorrent(t *Torrent) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
|
||||||
|
return &TorrentProperties{
|
||||||
|
AdditionDate: t.AddedOn,
|
||||||
|
Comment: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
||||||
|
CreatedBy: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
||||||
|
CreationDate: t.AddedOn,
|
||||||
|
DlLimit: -1,
|
||||||
|
UpLimit: -1,
|
||||||
|
DlSpeed: t.Dlspeed,
|
||||||
|
UpSpeed: t.Upspeed,
|
||||||
|
TotalSize: t.Size,
|
||||||
|
TotalUploaded: t.Uploaded,
|
||||||
|
TotalDownloaded: t.Downloaded,
|
||||||
|
TotalUploadedSession: t.UploadedSession,
|
||||||
|
TotalDownloadedSession: t.DownloadedSession,
|
||||||
|
LastSeen: time.Now().Unix(),
|
||||||
|
NbConnectionsLimit: 100,
|
||||||
|
Peers: 0,
|
||||||
|
PeersTotal: 2,
|
||||||
|
SeedingTime: 1,
|
||||||
|
Seeds: 100,
|
||||||
|
ShareRatio: 100,
|
||||||
|
}
|
||||||
|
}
|
||||||
41
pkg/qbit/utils.go
Normal file
41
pkg/qbit/utils.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
//func generateSID() (string, error) {
|
||||||
|
// bytes := make([]byte, sidLength)
|
||||||
|
// if _, err := rand.Read(bytes); err != nil {
|
||||||
|
// return "", err
|
||||||
|
// }
|
||||||
|
// return hex.EncodeToString(bytes), nil
|
||||||
|
//}
|
||||||
|
|
||||||
|
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(code)
|
||||||
|
json.NewEncoder(w).Encode(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.TorrentFile, ready chan<- debrid.TorrentFile) {
|
||||||
|
defer wg.Done()
|
||||||
|
ticker := time.NewTicker(1 * time.Second) // Check every second
|
||||||
|
defer ticker.Stop()
|
||||||
|
path := filepath.Join(dir, file.Path)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
if common.FileReady(path) {
|
||||||
|
ready <- file
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
46
pkg/qbit/worker.go
Normal file
46
pkg/qbit/worker.go
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
package qbit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *QBit) StartWorker(ctx context.Context) {
|
||||||
|
q.logger.Println("Qbit Worker started")
|
||||||
|
q.StartRefreshWorker(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) StartRefreshWorker(ctx context.Context) {
|
||||||
|
refreshCtx := context.WithValue(ctx, "worker", "refresh")
|
||||||
|
refreshTicker := time.NewTicker(time.Duration(q.RefreshInterval) * time.Second)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-refreshCtx.Done():
|
||||||
|
q.logger.Println("Qbit Refresh Worker stopped")
|
||||||
|
return
|
||||||
|
case <-refreshTicker.C:
|
||||||
|
torrents := q.storage.GetAll("", "", nil)
|
||||||
|
if len(torrents) > 0 {
|
||||||
|
q.RefreshArrs()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) RefreshArrs() {
|
||||||
|
q.arrs.Range(func(key, value interface{}) bool {
|
||||||
|
host, ok := key.(string)
|
||||||
|
token, ok2 := value.(string)
|
||||||
|
if !ok || !ok2 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
arr := &debrid.Arr{
|
||||||
|
Name: "",
|
||||||
|
Token: token,
|
||||||
|
Host: host,
|
||||||
|
}
|
||||||
|
q.RefreshArr(arr)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user