Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e9d3e120f3 | ||
|
|
104df3c33c | ||
|
|
810c9d705e | ||
|
|
4ff00859a3 | ||
|
|
b77dbcc4f4 | ||
|
|
58c0aafab1 | ||
|
|
357da54083 | ||
|
|
88a7196eaf | ||
|
|
abc86a0460 | ||
|
|
dd0b7efdff | ||
|
|
7359f280b0 | ||
|
|
4eb3539347 | ||
|
|
9fb1118475 | ||
|
|
07491b43fe | ||
|
|
8f7c9a19c5 | ||
|
|
a51364d150 |
52
.air.toml
Normal file
52
.air.toml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
root = "."
|
||||||
|
testdata_dir = "testdata"
|
||||||
|
tmp_dir = "tmp"
|
||||||
|
|
||||||
|
[build]
|
||||||
|
args_bin = []
|
||||||
|
bin = "./tmp/main"
|
||||||
|
cmd = "go build -o ./tmp/main ."
|
||||||
|
delay = 1000
|
||||||
|
exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"]
|
||||||
|
exclude_file = []
|
||||||
|
exclude_regex = ["_test.go"]
|
||||||
|
exclude_unchanged = false
|
||||||
|
follow_symlink = false
|
||||||
|
full_bin = ""
|
||||||
|
include_dir = []
|
||||||
|
include_ext = ["go", "tpl", "tmpl", "html", ".json"]
|
||||||
|
include_file = []
|
||||||
|
kill_delay = "0s"
|
||||||
|
log = "build-errors.log"
|
||||||
|
poll = false
|
||||||
|
poll_interval = 0
|
||||||
|
post_cmd = []
|
||||||
|
pre_cmd = []
|
||||||
|
rerun = false
|
||||||
|
rerun_delay = 500
|
||||||
|
send_interrupt = false
|
||||||
|
stop_on_error = false
|
||||||
|
|
||||||
|
[color]
|
||||||
|
app = ""
|
||||||
|
build = "yellow"
|
||||||
|
main = "magenta"
|
||||||
|
runner = "green"
|
||||||
|
watcher = "cyan"
|
||||||
|
|
||||||
|
[log]
|
||||||
|
main_only = false
|
||||||
|
silent = false
|
||||||
|
time = false
|
||||||
|
|
||||||
|
[misc]
|
||||||
|
clean_on_exit = false
|
||||||
|
|
||||||
|
[proxy]
|
||||||
|
app_port = 0
|
||||||
|
enabled = false
|
||||||
|
proxy_port = 0
|
||||||
|
|
||||||
|
[screen]
|
||||||
|
clear_on_rebuild = false
|
||||||
|
keep_scroll = true
|
||||||
@@ -5,4 +5,5 @@ docker-compose.yml
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
**/.idea/
|
**/.idea/
|
||||||
*.magnet
|
*.magnet
|
||||||
**.torrent
|
**.torrent
|
||||||
|
|
||||||
|
|||||||
54
.github/workflows/docker.yml
vendored
Normal file
54
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
name: Docker Build and Push
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- beta
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Get version
|
||||||
|
id: get_version
|
||||||
|
run: |
|
||||||
|
LATEST_TAG=$(git tag | sort -V | tail -n1)
|
||||||
|
echo "latest_tag=${LATEST_TAG}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push for beta branch
|
||||||
|
if: github.ref == 'refs/heads/beta'
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
|
push: true
|
||||||
|
tags: cy01/blackhole:beta
|
||||||
|
|
||||||
|
- name: Build and push for main branch
|
||||||
|
if: github.ref == 'refs/heads/main'
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
cy01/blackhole:latest
|
||||||
|
cy01/blackhole:${{ env.latest_tag }}
|
||||||
32
.github/workflows/release.yml
vendored
Normal file
32
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
goreleaser:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: '1.22'
|
||||||
|
|
||||||
|
- name: Run GoReleaser
|
||||||
|
uses: goreleaser/goreleaser-action@v5
|
||||||
|
with:
|
||||||
|
distribution: goreleaser
|
||||||
|
version: latest
|
||||||
|
args: release --clean
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,3 +9,4 @@ docker-compose.yml
|
|||||||
*.log
|
*.log
|
||||||
*.log.*
|
*.log.*
|
||||||
dist/
|
dist/
|
||||||
|
tmp/**
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
version: 2
|
version: 1
|
||||||
|
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
|
|||||||
24
CHANGELOG.md
24
CHANGELOG.md
@@ -83,4 +83,26 @@
|
|||||||
- Add support for multiple debrid providers
|
- Add support for multiple debrid providers
|
||||||
- Add Torbox support
|
- Add Torbox support
|
||||||
- Add support for configurable debrid cache checks
|
- Add support for configurable debrid cache checks
|
||||||
- Add support for configurable debrid download uncached torrents
|
- Add support for configurable debrid download uncached torrents
|
||||||
|
|
||||||
|
#### 0.3.0
|
||||||
|
|
||||||
|
- Add UI for adding torrents
|
||||||
|
- Refraction of the code
|
||||||
|
- -Fix Torbox bug
|
||||||
|
- Update CI/CD
|
||||||
|
- Update Readme
|
||||||
|
|
||||||
|
#### 0.3.1
|
||||||
|
|
||||||
|
- Add DebridLink Support
|
||||||
|
- Refactor error handling
|
||||||
|
|
||||||
|
#### 0.3.2
|
||||||
|
|
||||||
|
- Fix DebridLink not downloading
|
||||||
|
- Fix Torbox with uncached torrents
|
||||||
|
- Add new /internal/cached endpoint to check if an hash is cached
|
||||||
|
- implement per-debrid local cache
|
||||||
|
- Fix file check for torbox
|
||||||
|
- Other minor bug fixes
|
||||||
@@ -20,8 +20,11 @@ RUN CGO_ENABLED=0 GOOS=$(echo $TARGETPLATFORM | cut -d '/' -f1) GOARCH=$(echo $T
|
|||||||
FROM scratch
|
FROM scratch
|
||||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||||
COPY --from=builder /blackhole /blackhole
|
COPY --from=builder /blackhole /blackhole
|
||||||
|
COPY --from=builder /app/README.md /README.md
|
||||||
|
|
||||||
EXPOSE 8181
|
EXPOSE 8181
|
||||||
|
|
||||||
|
VOLUME ["/app"]
|
||||||
|
|
||||||
# Run
|
# Run
|
||||||
CMD ["/blackhole", "--config", "/app/config.json"]
|
CMD ["/blackhole", "--config", "/app/config.json"]
|
||||||
|
|||||||
20
README.md
20
README.md
@@ -9,13 +9,16 @@ This is a Golang implementation go Torrent QbitTorrent with a **Real Debrid & To
|
|||||||
- Proxy support for the Arrs
|
- Proxy support for the Arrs
|
||||||
- Real Debrid Support
|
- Real Debrid Support
|
||||||
- Torbox Support
|
- Torbox Support
|
||||||
|
- Debrid Link Support
|
||||||
- Multi-Debrid Providers support
|
- Multi-Debrid Providers support
|
||||||
|
- UI for adding torrents directly to *arrs
|
||||||
|
|
||||||
The proxy is useful in filtering out un-cached Real Debrid torrents
|
The proxy is useful in filtering out un-cached Real Debrid torrents
|
||||||
|
|
||||||
### Supported Debrid Providers
|
### Supported Debrid Providers
|
||||||
- Real Debrid
|
- Real Debrid
|
||||||
- Torbox
|
- Torbox
|
||||||
|
- Debrid Link
|
||||||
|
|
||||||
### Changelog
|
### Changelog
|
||||||
|
|
||||||
@@ -66,7 +69,7 @@ Download the binary from the releases page and run it with the config file.
|
|||||||
"name": "torbox",
|
"name": "torbox",
|
||||||
"host": "https://api.torbox.app/v1",
|
"host": "https://api.torbox.app/v1",
|
||||||
"api_key": "torbox_api_key",
|
"api_key": "torbox_api_key",
|
||||||
"folder": "data/realdebrid/torrents/",
|
"folder": "data/torbox/torrents/",
|
||||||
"rate_limit": "250/minute",
|
"rate_limit": "250/minute",
|
||||||
"download_uncached": false,
|
"download_uncached": false,
|
||||||
"check_cached": true
|
"check_cached": true
|
||||||
@@ -79,6 +82,15 @@ Download the binary from the releases page and run it with the config file.
|
|||||||
"rate_limit": "250/minute",
|
"rate_limit": "250/minute",
|
||||||
"download_uncached": false,
|
"download_uncached": false,
|
||||||
"check_cached": false
|
"check_cached": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "debridlink",
|
||||||
|
"host": "https://debrid-link.com/api/v2",
|
||||||
|
"api_key": "debridlink_key",
|
||||||
|
"folder": "data/debridlink/torrents/",
|
||||||
|
"rate_limit": "250/minute",
|
||||||
|
"download_uncached": false,
|
||||||
|
"check_cached": false
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"proxy": {
|
"proxy": {
|
||||||
@@ -163,6 +175,12 @@ Setting Up Qbittorrent in Arr
|
|||||||
- Test
|
- Test
|
||||||
- Save
|
- Save
|
||||||
|
|
||||||
|
### UI for adding torrents
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
The UI is a simple web interface that allows you to add torrents directly to the Arrs(Sonarr, Radarr, etc)
|
||||||
|
|
||||||
### TODO
|
### TODO
|
||||||
- [ ] A proper name!!!!
|
- [ ] A proper name!!!!
|
||||||
- [ ] Debrid
|
- [ ] Debrid
|
||||||
|
|||||||
30
cmd/main.go
30
cmd/main.go
@@ -2,6 +2,7 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"cmp"
|
"cmp"
|
||||||
|
"context"
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
"goBlack/pkg/debrid"
|
"goBlack/pkg/debrid"
|
||||||
"goBlack/pkg/proxy"
|
"goBlack/pkg/proxy"
|
||||||
@@ -9,32 +10,43 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Start(config *common.Config) {
|
func Start(ctx context.Context, config *common.Config) error {
|
||||||
maxCacheSize := cmp.Or(config.MaxCacheSize, 1000)
|
maxCacheSize := cmp.Or(config.MaxCacheSize, 1000)
|
||||||
cache := common.NewCache(maxCacheSize)
|
|
||||||
|
|
||||||
deb := debrid.NewDebrid(config.Debrids, cache)
|
deb := debrid.NewDebrid(config.Debrids, maxCacheSize)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
errChan := make(chan error, 2)
|
||||||
|
|
||||||
if config.Proxy.Enabled {
|
if config.Proxy.Enabled {
|
||||||
p := proxy.NewProxy(*config, deb, cache)
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
p.Start()
|
if err := proxy.NewProxy(*config, deb).Start(ctx); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
if config.QBitTorrent.Port != "" {
|
if config.QBitTorrent.Port != "" {
|
||||||
qb := qbit.NewQBit(config, deb, cache)
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
qb.Start()
|
if err := qbit.Start(ctx, config, deb); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait indefinitely
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
close(errChan)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for context cancellation or completion or error
|
||||||
|
select {
|
||||||
|
case err := <-errChan:
|
||||||
|
return err
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,14 +40,16 @@ func (c *Cache) AddMultiple(values map[string]bool) {
|
|||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
for value := range values {
|
for value, exists := range values {
|
||||||
if _, exists := c.data[value]; !exists {
|
if !exists {
|
||||||
if len(c.order) >= c.maxItems {
|
if _, exists := c.data[value]; !exists {
|
||||||
delete(c.data, c.order[0])
|
if len(c.order) >= c.maxItems {
|
||||||
c.order = c.order[1:]
|
delete(c.data, c.order[0])
|
||||||
|
c.order = c.order[1:]
|
||||||
|
}
|
||||||
|
c.data[value] = struct{}{}
|
||||||
|
c.order = append(c.order, value)
|
||||||
}
|
}
|
||||||
c.data[value] = struct{}{}
|
|
||||||
c.order = append(c.order, value)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,8 +2,11 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DebridConfig struct {
|
type DebridConfig struct {
|
||||||
@@ -43,6 +46,82 @@ type Config struct {
|
|||||||
QBitTorrent QBitTorrentConfig `json:"qbittorrent"`
|
QBitTorrent QBitTorrentConfig `json:"qbittorrent"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateDebrids(debrids []DebridConfig) error {
|
||||||
|
if len(debrids) == 0 {
|
||||||
|
return errors.New("no debrids configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
errChan := make(chan error, len(debrids))
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
for _, debrid := range debrids {
|
||||||
|
// Basic field validation
|
||||||
|
if debrid.Host == "" {
|
||||||
|
return errors.New("debrid host is required")
|
||||||
|
}
|
||||||
|
if debrid.APIKey == "" {
|
||||||
|
return errors.New("debrid api key is required")
|
||||||
|
}
|
||||||
|
if debrid.Folder == "" {
|
||||||
|
return errors.New("debrid folder is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check folder existence concurrently
|
||||||
|
wg.Add(1)
|
||||||
|
go func(folder string) {
|
||||||
|
defer wg.Done()
|
||||||
|
if _, err := os.Stat(folder); os.IsNotExist(err) {
|
||||||
|
errChan <- fmt.Errorf("debrid folder does not exist: %s", folder)
|
||||||
|
}
|
||||||
|
}(debrid.Folder)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all checks to complete
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(errChan)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Return first error if any
|
||||||
|
if err := <-errChan; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateQbitTorrent(config *QBitTorrentConfig) error {
|
||||||
|
if config.DownloadFolder == "" {
|
||||||
|
return errors.New("qbittorent download folder is required")
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
|
||||||
|
return errors.New("qbittorent download folder does not exist")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateConfig(config *Config) error {
|
||||||
|
// Run validations concurrently
|
||||||
|
errChan := make(chan error, 2)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
errChan <- validateDebrids(config.Debrids)
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
errChan <- validateQbitTorrent(&config.QBitTorrent)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Check for errors
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if err := <-errChan; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func LoadConfig(path string) (*Config, error) {
|
func LoadConfig(path string) (*Config, error) {
|
||||||
// Load the config file
|
// Load the config file
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
@@ -67,5 +146,10 @@ func LoadConfig(path string) (*Config, error) {
|
|||||||
config.Debrids = append(config.Debrids, config.Debrid)
|
config.Debrids = append(config.Debrids, config.Debrid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate the config
|
||||||
|
//if err := validateConfig(config); err != nil {
|
||||||
|
// return nil, err
|
||||||
|
//}
|
||||||
|
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|||||||
14
common/logger.go
Normal file
14
common/logger.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewLogger(prefix string, output *os.File) *log.Logger {
|
||||||
|
f := fmt.Sprintf("[%s] ", prefix)
|
||||||
|
return log.New(output, f, log.LstdFlags)
|
||||||
|
}
|
||||||
|
|
||||||
|
var Logger = NewLogger("Main", os.Stdout)
|
||||||
@@ -2,6 +2,7 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
"io"
|
"io"
|
||||||
@@ -74,7 +75,9 @@ func (c *RLHTTPClient) MakeRequest(req *http.Request) ([]byte, error) {
|
|||||||
b, _ := io.ReadAll(res.Body)
|
b, _ := io.ReadAll(res.Body)
|
||||||
statusOk := strconv.Itoa(res.StatusCode)[0] == '2'
|
statusOk := strconv.Itoa(res.StatusCode)[0] == '2'
|
||||||
if !statusOk {
|
if !statusOk {
|
||||||
return nil, fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
// Add status code error to the body
|
||||||
|
b = append(b, []byte(fmt.Sprintf("\nstatus code: %d", res.StatusCode))...)
|
||||||
|
return nil, fmt.Errorf(string(b))
|
||||||
}
|
}
|
||||||
defer func(Body io.ReadCloser) {
|
defer func(Body io.ReadCloser) {
|
||||||
err := Body.Close()
|
err := Body.Close()
|
||||||
@@ -125,3 +128,9 @@ func ParseRateLimit(rateStr string) *rate.Limiter {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(code)
|
||||||
|
json.NewEncoder(w).Encode(data)
|
||||||
|
}
|
||||||
|
|||||||
@@ -209,11 +209,6 @@ func processInfoHash(input string) (string, error) {
|
|||||||
return "", fmt.Errorf("invalid infohash: %s", input)
|
return "", fmt.Errorf("invalid infohash: %s", input)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLogger(prefix string, output *os.File) *log.Logger {
|
|
||||||
f := fmt.Sprintf("[%s] ", prefix)
|
|
||||||
return log.New(output, f, log.LstdFlags)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetInfohashFromURL(url string) (string, error) {
|
func GetInfohashFromURL(url string) (string, error) {
|
||||||
// Download the torrent file
|
// Download the torrent file
|
||||||
var magnetLink string
|
var magnetLink string
|
||||||
|
|||||||
BIN
doc/ui.png
Normal file
BIN
doc/ui.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 124 KiB |
6
main.go
6
main.go
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"goBlack/cmd"
|
"goBlack/cmd"
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
@@ -17,6 +18,9 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
cmd.Start(conf)
|
ctx := context.Background()
|
||||||
|
if err := cmd.Start(ctx, conf); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
120
pkg/arr/arr.go
Normal file
120
pkg/arr/arr.go
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"goBlack/common"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Type is a type of arr
|
||||||
|
type Type string
|
||||||
|
|
||||||
|
const (
|
||||||
|
Sonarr Type = "sonarr"
|
||||||
|
Radarr Type = "radarr"
|
||||||
|
Lidarr Type = "lidarr"
|
||||||
|
Readarr Type = "readarr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
client *common.RLHTTPClient = common.NewRLHTTPClient(nil, nil)
|
||||||
|
logger *log.Logger = common.NewLogger("QBit", os.Stdout)
|
||||||
|
)
|
||||||
|
|
||||||
|
type Arr struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
Token string `json:"token"`
|
||||||
|
Type Type `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewArr(name, host, token string, arrType Type) *Arr {
|
||||||
|
return &Arr{
|
||||||
|
Name: name,
|
||||||
|
Host: host,
|
||||||
|
Token: token,
|
||||||
|
Type: arrType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Response, error) {
|
||||||
|
if a.Token == "" || a.Host == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
url, err := common.JoinURL(a.Host, endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var jsonPayload []byte
|
||||||
|
|
||||||
|
if payload != nil {
|
||||||
|
jsonPayload, err = json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(method, url, bytes.NewBuffer(jsonPayload))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("X-Api-Key", a.Token)
|
||||||
|
return client.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Storage struct {
|
||||||
|
Arrs map[string]*Arr // name -> arr
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func inferType(host, name string) Type {
|
||||||
|
switch {
|
||||||
|
case strings.Contains(host, "sonarr") || strings.Contains(name, "sonarr"):
|
||||||
|
return Sonarr
|
||||||
|
case strings.Contains(host, "radarr") || strings.Contains(name, "radarr"):
|
||||||
|
return Radarr
|
||||||
|
case strings.Contains(host, "lidarr") || strings.Contains(name, "lidarr"):
|
||||||
|
return Lidarr
|
||||||
|
case strings.Contains(host, "readarr") || strings.Contains(name, "readarr"):
|
||||||
|
return Readarr
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStorage() *Storage {
|
||||||
|
arrs := make(map[string]*Arr)
|
||||||
|
//for name, arrCfg := range cfg {
|
||||||
|
// arrs[name] = NewArr(name, arrCfg.Host, arrCfg.Token, inferType(arrCfg.Host, name))
|
||||||
|
//}
|
||||||
|
return &Storage{
|
||||||
|
Arrs: arrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *Storage) AddOrUpdate(arr *Arr) {
|
||||||
|
as.mu.Lock()
|
||||||
|
defer as.mu.Unlock()
|
||||||
|
as.Arrs[arr.Host] = arr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *Storage) Get(name string) *Arr {
|
||||||
|
as.mu.RLock()
|
||||||
|
defer as.mu.RUnlock()
|
||||||
|
return as.Arrs[name]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *Storage) GetAll() []*Arr {
|
||||||
|
as.mu.RLock()
|
||||||
|
defer as.mu.RUnlock()
|
||||||
|
arrs := make([]*Arr, 0, len(as.Arrs))
|
||||||
|
for _, arr := range as.Arrs {
|
||||||
|
arrs = append(arrs, arr)
|
||||||
|
}
|
||||||
|
return arrs
|
||||||
|
}
|
||||||
29
pkg/arr/content.go
Normal file
29
pkg/arr/content.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ContentRequest struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Title string `json:"name"`
|
||||||
|
Arr string `json:"arr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) GetContents() *ContentRequest {
|
||||||
|
resp, err := a.Request(http.MethodGet, "api/v3/series", nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var data *ContentRequest
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fmt.Printf("Data: %v\n", data)
|
||||||
|
//data.Arr = a.Name
|
||||||
|
return data
|
||||||
|
}
|
||||||
41
pkg/arr/history.go
Normal file
41
pkg/arr/history.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
gourl "net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HistorySchema struct {
|
||||||
|
Page int `json:"page"`
|
||||||
|
PageSize int `json:"pageSize"`
|
||||||
|
SortKey string `json:"sortKey"`
|
||||||
|
SortDirection string `json:"sortDirection"`
|
||||||
|
TotalRecords int `json:"totalRecords"`
|
||||||
|
Records []struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
DownloadID string `json:"downloadId"`
|
||||||
|
} `json:"records"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) GetHistory(downloadId, eventType string) *HistorySchema {
|
||||||
|
query := gourl.Values{}
|
||||||
|
if downloadId != "" {
|
||||||
|
query.Add("downloadId", downloadId)
|
||||||
|
}
|
||||||
|
query.Add("eventType", eventType)
|
||||||
|
query.Add("pageSize", "100")
|
||||||
|
url := "history" + "?" + query.Encode()
|
||||||
|
resp, err := a.Request(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var data *HistorySchema
|
||||||
|
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
|
||||||
|
}
|
||||||
209
pkg/arr/import.go
Normal file
209
pkg/arr/import.go
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
gourl "net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ImportResponseSchema struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
RelativePath string `json:"relativePath"`
|
||||||
|
FolderName string `json:"folderName"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
Series struct {
|
||||||
|
Title string `json:"title"`
|
||||||
|
SortTitle string `json:"sortTitle"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Ended bool `json:"ended"`
|
||||||
|
Overview string `json:"overview"`
|
||||||
|
Network string `json:"network"`
|
||||||
|
AirTime string `json:"airTime"`
|
||||||
|
Images []struct {
|
||||||
|
CoverType string `json:"coverType"`
|
||||||
|
RemoteUrl string `json:"remoteUrl"`
|
||||||
|
} `json:"images"`
|
||||||
|
OriginalLanguage struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"originalLanguage"`
|
||||||
|
Seasons []struct {
|
||||||
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
|
Monitored bool `json:"monitored"`
|
||||||
|
} `json:"seasons"`
|
||||||
|
Year int `json:"year"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
QualityProfileId int `json:"qualityProfileId"`
|
||||||
|
SeasonFolder bool `json:"seasonFolder"`
|
||||||
|
Monitored bool `json:"monitored"`
|
||||||
|
MonitorNewItems string `json:"monitorNewItems"`
|
||||||
|
UseSceneNumbering bool `json:"useSceneNumbering"`
|
||||||
|
Runtime int `json:"runtime"`
|
||||||
|
TvdbId int `json:"tvdbId"`
|
||||||
|
TvRageId int `json:"tvRageId"`
|
||||||
|
TvMazeId int `json:"tvMazeId"`
|
||||||
|
TmdbId int `json:"tmdbId"`
|
||||||
|
FirstAired time.Time `json:"firstAired"`
|
||||||
|
LastAired time.Time `json:"lastAired"`
|
||||||
|
SeriesType string `json:"seriesType"`
|
||||||
|
CleanTitle string `json:"cleanTitle"`
|
||||||
|
ImdbId string `json:"imdbId"`
|
||||||
|
TitleSlug string `json:"titleSlug"`
|
||||||
|
Certification string `json:"certification"`
|
||||||
|
Genres []string `json:"genres"`
|
||||||
|
Tags []interface{} `json:"tags"`
|
||||||
|
Added time.Time `json:"added"`
|
||||||
|
Ratings struct {
|
||||||
|
Votes int `json:"votes"`
|
||||||
|
Value float64 `json:"value"`
|
||||||
|
} `json:"ratings"`
|
||||||
|
LanguageProfileId int `json:"languageProfileId"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
} `json:"series"`
|
||||||
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
|
Episodes []struct {
|
||||||
|
SeriesId int `json:"seriesId"`
|
||||||
|
TvdbId int `json:"tvdbId"`
|
||||||
|
EpisodeFileId int `json:"episodeFileId"`
|
||||||
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
|
EpisodeNumber int `json:"episodeNumber"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
AirDate string `json:"airDate"`
|
||||||
|
AirDateUtc time.Time `json:"airDateUtc"`
|
||||||
|
Runtime int `json:"runtime"`
|
||||||
|
Overview string `json:"overview"`
|
||||||
|
HasFile bool `json:"hasFile"`
|
||||||
|
Monitored bool `json:"monitored"`
|
||||||
|
AbsoluteEpisodeNumber int `json:"absoluteEpisodeNumber"`
|
||||||
|
UnverifiedSceneNumbering bool `json:"unverifiedSceneNumbering"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
FinaleType string `json:"finaleType,omitempty"`
|
||||||
|
} `json:"episodes"`
|
||||||
|
ReleaseGroup string `json:"releaseGroup"`
|
||||||
|
Quality struct {
|
||||||
|
Quality struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
Resolution int `json:"resolution"`
|
||||||
|
} `json:"quality"`
|
||||||
|
Revision struct {
|
||||||
|
Version int `json:"version"`
|
||||||
|
Real int `json:"real"`
|
||||||
|
IsRepack bool `json:"isRepack"`
|
||||||
|
} `json:"revision"`
|
||||||
|
} `json:"quality"`
|
||||||
|
Languages []struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"languages"`
|
||||||
|
QualityWeight int `json:"qualityWeight"`
|
||||||
|
CustomFormats []interface{} `json:"customFormats"`
|
||||||
|
CustomFormatScore int `json:"customFormatScore"`
|
||||||
|
IndexerFlags int `json:"indexerFlags"`
|
||||||
|
ReleaseType string `json:"releaseType"`
|
||||||
|
Rejections []struct {
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
} `json:"rejections"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ManualImportRequestFile struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
SeriesId int `json:"seriesId"`
|
||||||
|
SeasonNumber int `json:"seasonNumber"`
|
||||||
|
EpisodeIds []int `json:"episodeIds"`
|
||||||
|
Quality struct {
|
||||||
|
Quality struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
Resolution int `json:"resolution"`
|
||||||
|
} `json:"quality"`
|
||||||
|
Revision struct {
|
||||||
|
Version int `json:"version"`
|
||||||
|
Real int `json:"real"`
|
||||||
|
IsRepack bool `json:"isRepack"`
|
||||||
|
} `json:"revision"`
|
||||||
|
} `json:"quality"`
|
||||||
|
Languages []struct {
|
||||||
|
Id int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"languages"`
|
||||||
|
ReleaseGroup string `json:"releaseGroup"`
|
||||||
|
CustomFormats []interface{} `json:"customFormats"`
|
||||||
|
CustomFormatScore int `json:"customFormatScore"`
|
||||||
|
IndexerFlags int `json:"indexerFlags"`
|
||||||
|
ReleaseType string `json:"releaseType"`
|
||||||
|
Rejections []struct {
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
} `json:"rejections"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ManualImportRequestSchema struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Files []ManualImportRequestFile `json:"files"`
|
||||||
|
ImportMode string `json:"importMode"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, error) {
|
||||||
|
query := gourl.Values{}
|
||||||
|
query.Add("folder", path)
|
||||||
|
if seriesId != 0 {
|
||||||
|
query.Add("seriesId", strconv.Itoa(seriesId))
|
||||||
|
}
|
||||||
|
url := "api/v3/manualimport" + "?" + query.Encode()
|
||||||
|
resp, err := a.Request(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to import, invalid file: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var data []ImportResponseSchema
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var files []ManualImportRequestFile
|
||||||
|
for _, d := range data {
|
||||||
|
episodesIds := []int{}
|
||||||
|
for _, e := range d.Episodes {
|
||||||
|
episodesIds = append(episodesIds, e.Id)
|
||||||
|
}
|
||||||
|
file := ManualImportRequestFile{
|
||||||
|
Path: d.Path,
|
||||||
|
SeriesId: d.Series.Id,
|
||||||
|
SeasonNumber: d.SeasonNumber,
|
||||||
|
EpisodeIds: episodesIds,
|
||||||
|
Quality: d.Quality,
|
||||||
|
Languages: d.Languages,
|
||||||
|
ReleaseGroup: d.ReleaseGroup,
|
||||||
|
CustomFormats: d.CustomFormats,
|
||||||
|
CustomFormatScore: d.CustomFormatScore,
|
||||||
|
IndexerFlags: d.IndexerFlags,
|
||||||
|
ReleaseType: d.ReleaseType,
|
||||||
|
Rejections: d.Rejections,
|
||||||
|
}
|
||||||
|
files = append(files, file)
|
||||||
|
}
|
||||||
|
request := ManualImportRequestSchema{
|
||||||
|
Name: "ManualImport",
|
||||||
|
Files: files,
|
||||||
|
ImportMode: "copy",
|
||||||
|
}
|
||||||
|
|
||||||
|
url = "api/v3/command"
|
||||||
|
resp, err = a.Request(http.MethodPost, url, request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to import: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
return resp.Body, nil
|
||||||
|
|
||||||
|
}
|
||||||
54
pkg/arr/refresh.go
Normal file
54
pkg/arr/refresh.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"fmt"
|
||||||
|
"goBlack/common"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (a *Arr) Refresh() error {
|
||||||
|
payload := map[string]string{"name": "RefreshMonitoredDownloads"}
|
||||||
|
|
||||||
|
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
|
||||||
|
if err == nil && resp != nil {
|
||||||
|
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
|
||||||
|
if statusOk {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("failed to refresh monitored downloads for %s", cmp.Or(a.Name, a.Host))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Arr) MarkAsFailed(infoHash string) error {
|
||||||
|
downloadId := strings.ToUpper(infoHash)
|
||||||
|
history := a.GetHistory(downloadId, "grabbed")
|
||||||
|
if history == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
torrentId := 0
|
||||||
|
for _, record := range history.Records {
|
||||||
|
if strings.EqualFold(record.DownloadID, downloadId) {
|
||||||
|
torrentId = record.ID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if torrentId != 0 {
|
||||||
|
url, err := common.JoinURL(a.Host, "history/failed/", strconv.Itoa(torrentId))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
client := &http.Client{}
|
||||||
|
_, err = client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("failed to mark %s as failed: %v", cmp.Or(a.Name, a.Host), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
31
pkg/arr/tmdb.go
Normal file
31
pkg/arr/tmdb.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
package arr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
url2 "net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TMDBResponse struct {
|
||||||
|
Page int `json:"page"`
|
||||||
|
Results []struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
MediaType string `json:"media_type"`
|
||||||
|
PosterPath string `json:"poster_path"`
|
||||||
|
} `json:"results"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func SearchTMDB(term string) (*TMDBResponse, error) {
|
||||||
|
resp, err := http.Get("https://api.themoviedb.org/3/search/multi?api_key=key&query=" + url2.QueryEscape(term))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var data *TMDBResponse
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
"log"
|
"log"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
@@ -33,10 +34,13 @@ type Service interface {
|
|||||||
GetLogger() *log.Logger
|
GetLogger() *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDebrid(debs []common.DebridConfig, cache *common.Cache) *DebridService {
|
func NewDebrid(debs []common.DebridConfig, maxCachedSize int) *DebridService {
|
||||||
debrids := make([]Service, 0)
|
debrids := make([]Service, 0)
|
||||||
|
// Divide the cache size by the number of debrids
|
||||||
|
maxCacheSize := maxCachedSize / len(debs)
|
||||||
|
|
||||||
for _, dc := range debs {
|
for _, dc := range debs {
|
||||||
d := createDebrid(dc, cache)
|
d := createDebrid(dc, common.NewCache(maxCacheSize))
|
||||||
d.GetLogger().Println("Debrid Service started")
|
d.GetLogger().Println("Debrid Service started")
|
||||||
debrids = append(debrids, d)
|
debrids = append(debrids, d)
|
||||||
}
|
}
|
||||||
@@ -94,16 +98,17 @@ func getTorrentInfo(filePath string) (*Torrent, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
infoLength := info.Length
|
||||||
magnet := &common.Magnet{
|
magnet := &common.Magnet{
|
||||||
InfoHash: infoHash,
|
InfoHash: infoHash,
|
||||||
Name: info.Name,
|
Name: info.Name,
|
||||||
Size: info.Length,
|
Size: infoLength,
|
||||||
Link: mi.Magnet(&hash, &info).String(),
|
Link: mi.Magnet(&hash, &info).String(),
|
||||||
}
|
}
|
||||||
torrent := &Torrent{
|
torrent := &Torrent{
|
||||||
InfoHash: infoHash,
|
InfoHash: infoHash,
|
||||||
Name: info.Name,
|
Name: info.Name,
|
||||||
Size: info.Length,
|
Size: infoLength,
|
||||||
Magnet: magnet,
|
Magnet: magnet,
|
||||||
Filename: filePath,
|
Filename: filePath,
|
||||||
}
|
}
|
||||||
@@ -112,39 +117,42 @@ func getTorrentInfo(filePath string) (*Torrent, error) {
|
|||||||
|
|
||||||
func GetLocalCache(infohashes []string, cache *common.Cache) ([]string, map[string]bool) {
|
func GetLocalCache(infohashes []string, cache *common.Cache) ([]string, map[string]bool) {
|
||||||
result := make(map[string]bool)
|
result := make(map[string]bool)
|
||||||
|
hashes := make([]string, 0)
|
||||||
|
|
||||||
//if len(infohashes) == 0 {
|
if len(infohashes) == 0 {
|
||||||
// return hashes, result
|
return hashes, result
|
||||||
//}
|
}
|
||||||
//if len(infohashes) == 1 {
|
if len(infohashes) == 1 {
|
||||||
// if cache.Exists(infohashes[0]) {
|
if cache.Exists(infohashes[0]) {
|
||||||
// return hashes, map[string]bool{infohashes[0]: true}
|
return hashes, map[string]bool{infohashes[0]: true}
|
||||||
// }
|
}
|
||||||
// return infohashes, result
|
return infohashes, result
|
||||||
//}
|
}
|
||||||
//
|
|
||||||
//cachedHashes := cache.GetMultiple(infohashes)
|
cachedHashes := cache.GetMultiple(infohashes)
|
||||||
//for _, h := range infohashes {
|
for _, h := range infohashes {
|
||||||
// _, exists := cachedHashes[h]
|
_, exists := cachedHashes[h]
|
||||||
// if !exists {
|
if !exists {
|
||||||
// hashes = append(hashes, h)
|
hashes = append(hashes, h)
|
||||||
// } else {
|
} else {
|
||||||
// result[h] = true
|
result[h] = true
|
||||||
// }
|
}
|
||||||
//}
|
}
|
||||||
|
|
||||||
return infohashes, result
|
return infohashes, result
|
||||||
}
|
}
|
||||||
|
|
||||||
func ProcessQBitTorrent(d *DebridService, magnet *common.Magnet, arr *Arr, isSymlink bool) (*Torrent, error) {
|
func ProcessTorrent(d *DebridService, magnet *common.Magnet, a *arr.Arr, isSymlink bool) (*Torrent, error) {
|
||||||
debridTorrent := &Torrent{
|
debridTorrent := &Torrent{
|
||||||
InfoHash: magnet.InfoHash,
|
InfoHash: magnet.InfoHash,
|
||||||
Magnet: magnet,
|
Magnet: magnet,
|
||||||
Name: magnet.Name,
|
Name: magnet.Name,
|
||||||
Arr: arr,
|
Arr: a,
|
||||||
Size: magnet.Size,
|
Size: magnet.Size,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
errs := make([]error, 0)
|
||||||
|
|
||||||
for index, db := range d.debrids {
|
for index, db := range d.debrids {
|
||||||
log.Println("Processing debrid: ", db.GetName())
|
log.Println("Processing debrid: ", db.GetName())
|
||||||
logger := db.GetLogger()
|
logger := db.GetLogger()
|
||||||
@@ -159,15 +167,22 @@ func ProcessQBitTorrent(d *DebridService, magnet *common.Magnet, arr *Arr, isSym
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
debridTorrent, err := db.SubmitMagnet(debridTorrent)
|
dbt, err := db.SubmitMagnet(debridTorrent)
|
||||||
if err != nil || debridTorrent.Id == "" {
|
if dbt != nil {
|
||||||
logger.Printf("Error submitting magnet: %s", err)
|
dbt.Debrid = db
|
||||||
|
dbt.Arr = a
|
||||||
|
}
|
||||||
|
if err != nil || dbt == nil || dbt.Id == "" {
|
||||||
|
errs = append(errs, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
logger.Printf("Torrent: %s submitted to %s", debridTorrent.Name, db.GetName())
|
logger.Printf("Torrent: %s submitted to %s", dbt.Name, db.GetName())
|
||||||
d.lastUsed = index
|
d.lastUsed = index
|
||||||
debridTorrent.Debrid = db
|
return db.CheckStatus(dbt, isSymlink)
|
||||||
return db.CheckStatus(debridTorrent, isSymlink)
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("failed to process torrent")
|
err := fmt.Errorf("failed to process torrent")
|
||||||
|
for _, e := range errs {
|
||||||
|
err = fmt.Errorf("%w\n%w", err, e)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,8 +39,8 @@ func (r *DebridLink) IsAvailable(infohashes []string) map[string]bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Divide hashes into groups of 100
|
// Divide hashes into groups of 100
|
||||||
for i := 0; i < len(hashes); i += 200 {
|
for i := 0; i < len(hashes); i += 100 {
|
||||||
end := i + 200
|
end := i + 100
|
||||||
if end > len(hashes) {
|
if end > len(hashes) {
|
||||||
end = len(hashes)
|
end = len(hashes)
|
||||||
}
|
}
|
||||||
@@ -89,7 +89,7 @@ func (r *DebridLink) IsAvailable(infohashes []string) map[string]bool {
|
|||||||
|
|
||||||
func (r *DebridLink) GetTorrent(id string) (*Torrent, error) {
|
func (r *DebridLink) GetTorrent(id string) (*Torrent, error) {
|
||||||
torrent := &Torrent{}
|
torrent := &Torrent{}
|
||||||
url := fmt.Sprintf("%s/seedbox/list/?ids=%s", r.Host, id)
|
url := fmt.Sprintf("%s/seedbox/list?ids=%s", r.Host, id)
|
||||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||||
resp, err := r.client.MakeRequest(req)
|
resp, err := r.client.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -107,14 +107,15 @@ func (r *DebridLink) GetTorrent(id string) (*Torrent, error) {
|
|||||||
return torrent, fmt.Errorf("torrent not found")
|
return torrent, fmt.Errorf("torrent not found")
|
||||||
}
|
}
|
||||||
dt := *res.Value
|
dt := *res.Value
|
||||||
fmt.Printf("Length of dt: %d\n", len(dt))
|
|
||||||
fmt.Printf("Raw response: %+v\n", res)
|
|
||||||
|
|
||||||
if len(dt) == 0 {
|
if len(dt) == 0 {
|
||||||
return torrent, fmt.Errorf("torrent not found")
|
return torrent, fmt.Errorf("torrent not found")
|
||||||
}
|
}
|
||||||
data := dt[0]
|
data := dt[0]
|
||||||
status := "downloading"
|
status := "downloading"
|
||||||
|
if data.Status == 100 {
|
||||||
|
status = "downloaded"
|
||||||
|
}
|
||||||
name := common.RemoveInvalidChars(data.Name)
|
name := common.RemoveInvalidChars(data.Name)
|
||||||
torrent.Id = data.ID
|
torrent.Id = data.ID
|
||||||
torrent.Name = name
|
torrent.Name = name
|
||||||
@@ -187,8 +188,8 @@ func (r *DebridLink) SubmitMagnet(torrent *Torrent) (*Torrent, error) {
|
|||||||
|
|
||||||
func (r *DebridLink) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
|
func (r *DebridLink) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error) {
|
||||||
for {
|
for {
|
||||||
torrent, err := r.GetTorrent(torrent.Id)
|
t, err := r.GetTorrent(torrent.Id)
|
||||||
|
torrent = t
|
||||||
if err != nil || torrent == nil {
|
if err != nil || torrent == nil {
|
||||||
return torrent, err
|
return torrent, err
|
||||||
}
|
}
|
||||||
@@ -206,7 +207,7 @@ func (r *DebridLink) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
|
|||||||
break
|
break
|
||||||
} else if status == "downloading" {
|
} else if status == "downloading" {
|
||||||
if !r.DownloadUncached {
|
if !r.DownloadUncached {
|
||||||
go r.DeleteTorrent(torrent)
|
go torrent.Delete()
|
||||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
}
|
}
|
||||||
// Break out of the loop if the torrent is downloading.
|
// Break out of the loop if the torrent is downloading.
|
||||||
@@ -250,6 +251,7 @@ func NewDebridLink(dc common.DebridConfig, cache *common.Cache) *DebridLink {
|
|||||||
rl := common.ParseRateLimit(dc.RateLimit)
|
rl := common.ParseRateLimit(dc.RateLimit)
|
||||||
headers := map[string]string{
|
headers := map[string]string{
|
||||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||||
|
"Content-Type": "application/json",
|
||||||
}
|
}
|
||||||
client := common.NewRLHTTPClient(rl, headers)
|
client := common.NewRLHTTPClient(rl, headers)
|
||||||
logger := common.NewLogger(dc.Name, os.Stdout)
|
logger := common.NewLogger(dc.Name, os.Stdout)
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@@ -40,13 +41,13 @@ func GetTorrentFiles(data structs.RealDebridTorrentInfo) []TorrentFile {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fileId := f.ID
|
fileId := f.ID
|
||||||
file := &TorrentFile{
|
file := TorrentFile{
|
||||||
Name: name,
|
Name: name,
|
||||||
Path: name,
|
Path: name,
|
||||||
Size: int64(f.Bytes),
|
Size: f.Bytes,
|
||||||
Id: strconv.Itoa(fileId),
|
Id: strconv.Itoa(fileId),
|
||||||
}
|
}
|
||||||
files = append(files, *file)
|
files = append(files, file)
|
||||||
}
|
}
|
||||||
return files
|
return files
|
||||||
}
|
}
|
||||||
@@ -179,13 +180,13 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
|
|||||||
torrent.Links = data.Links
|
torrent.Links = data.Links
|
||||||
torrent.Status = status
|
torrent.Status = status
|
||||||
torrent.Debrid = r
|
torrent.Debrid = r
|
||||||
|
downloadingStatus := []string{"downloading", "magnet_conversion", "queued", "compressing", "uploading"}
|
||||||
if status == "error" || status == "dead" || status == "magnet_error" {
|
if status == "error" || status == "dead" || status == "magnet_error" {
|
||||||
return torrent, fmt.Errorf("torrent: %s has error", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s has error: %s", torrent.Name, status)
|
||||||
} else if status == "waiting_files_selection" {
|
} else if status == "waiting_files_selection" {
|
||||||
files := GetTorrentFiles(data)
|
files := GetTorrentFiles(data)
|
||||||
torrent.Files = files
|
torrent.Files = files
|
||||||
if len(files) == 0 {
|
if len(files) == 0 {
|
||||||
r.DeleteTorrent(torrent)
|
|
||||||
return torrent, fmt.Errorf("no video files found")
|
return torrent, fmt.Errorf("no video files found")
|
||||||
}
|
}
|
||||||
filesId := make([]string, 0)
|
filesId := make([]string, 0)
|
||||||
@@ -212,9 +213,8 @@ func (r *RealDebrid) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
} else if status == "downloading" {
|
} else if slices.Contains(downloadingStatus, status) {
|
||||||
if !r.DownloadUncached {
|
if !r.DownloadUncached {
|
||||||
go r.DeleteTorrent(torrent)
|
|
||||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
}
|
}
|
||||||
// Break out of the loop if the torrent is downloading.
|
// Break out of the loop if the torrent is downloading.
|
||||||
|
|||||||
@@ -11,3 +11,12 @@ func (d *DebridService) Get() Service {
|
|||||||
}
|
}
|
||||||
return d.debrids[d.lastUsed]
|
return d.debrids[d.lastUsed]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *DebridService) GetByName(name string) Service {
|
||||||
|
for _, deb := range d.debrids {
|
||||||
|
if deb.GetName() == name {
|
||||||
|
return deb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -36,8 +36,8 @@ type debridLinkTorrentInfo struct {
|
|||||||
} `json:"trackers"`
|
} `json:"trackers"`
|
||||||
Created int64 `json:"created"`
|
Created int64 `json:"created"`
|
||||||
DownloadPercent float64 `json:"downloadPercent"`
|
DownloadPercent float64 `json:"downloadPercent"`
|
||||||
DownloadSpeed int64 `json:"downloadSpeed"`
|
DownloadSpeed int `json:"downloadSpeed"`
|
||||||
UploadSpeed int64 `json:"uploadSpeed"`
|
UploadSpeed int `json:"uploadSpeed"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DebridLinkTorrentInfo DebridLinkAPIResponse[[]debridLinkTorrentInfo]
|
type DebridLinkTorrentInfo DebridLinkAPIResponse[[]debridLinkTorrentInfo]
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ type RealDebridTorrentInfo struct {
|
|||||||
OriginalFilename string `json:"original_filename"`
|
OriginalFilename string `json:"original_filename"`
|
||||||
Hash string `json:"hash"`
|
Hash string `json:"hash"`
|
||||||
Bytes int64 `json:"bytes"`
|
Bytes int64 `json:"bytes"`
|
||||||
OriginalBytes int `json:"original_bytes"`
|
OriginalBytes int64 `json:"original_bytes"`
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
Split int `json:"split"`
|
Split int `json:"split"`
|
||||||
Progress float64 `json:"progress"`
|
Progress float64 `json:"progress"`
|
||||||
@@ -84,12 +84,12 @@ type RealDebridTorrentInfo struct {
|
|||||||
Files []struct {
|
Files []struct {
|
||||||
ID int `json:"id"`
|
ID int `json:"id"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Bytes int `json:"bytes"`
|
Bytes int64 `json:"bytes"`
|
||||||
Selected int `json:"selected"`
|
Selected int `json:"selected"`
|
||||||
} `json:"files"`
|
} `json:"files"`
|
||||||
Links []string `json:"links"`
|
Links []string `json:"links"`
|
||||||
Ended string `json:"ended,omitempty"`
|
Ended string `json:"ended,omitempty"`
|
||||||
Speed int64 `json:"speed,omitempty"`
|
Speed int `json:"speed,omitempty"`
|
||||||
Seeders int `json:"seeders,omitempty"`
|
Seeders int `json:"seeders,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,11 +97,11 @@ type RealDebridUnrestrictResponse struct {
|
|||||||
Id string `json:"id"`
|
Id string `json:"id"`
|
||||||
Filename string `json:"filename"`
|
Filename string `json:"filename"`
|
||||||
MimeType string `json:"mimeType"`
|
MimeType string `json:"mimeType"`
|
||||||
Filesize int64 `json:"filesize"`
|
Filesize int `json:"filesize"`
|
||||||
Link string `json:"link"`
|
Link string `json:"link"`
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
Chunks int64 `json:"chunks"`
|
Chunks int `json:"chunks"`
|
||||||
Crc int64 `json:"crc"`
|
Crc int `json:"crc"`
|
||||||
Download string `json:"download"`
|
Download string `json:"download"`
|
||||||
Streamable int `json:"streamable"`
|
Streamable int `json:"streamable"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ type TorboxAPIResponse[T any] struct {
|
|||||||
|
|
||||||
type TorBoxAvailableResponse TorboxAPIResponse[map[string]struct {
|
type TorBoxAvailableResponse TorboxAPIResponse[map[string]struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size int64 `json:"size"`
|
Size int `json:"size"`
|
||||||
Hash string `json:"hash"`
|
Hash string `json:"hash"`
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@@ -34,9 +34,9 @@ type torboxInfo struct {
|
|||||||
DownloadState string `json:"download_state"`
|
DownloadState string `json:"download_state"`
|
||||||
Seeds int `json:"seeds"`
|
Seeds int `json:"seeds"`
|
||||||
Peers int `json:"peers"`
|
Peers int `json:"peers"`
|
||||||
Ratio int `json:"ratio"`
|
Ratio float64 `json:"ratio"`
|
||||||
Progress float64 `json:"progress"`
|
Progress float64 `json:"progress"`
|
||||||
DownloadSpeed int64 `json:"download_speed"`
|
DownloadSpeed int `json:"download_speed"`
|
||||||
UploadSpeed int `json:"upload_speed"`
|
UploadSpeed int `json:"upload_speed"`
|
||||||
Eta int `json:"eta"`
|
Eta int `json:"eta"`
|
||||||
TorrentFile bool `json:"torrent_file"`
|
TorrentFile bool `json:"torrent_file"`
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -44,8 +45,8 @@ func (r *Torbox) IsAvailable(infohashes []string) map[string]bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Divide hashes into groups of 100
|
// Divide hashes into groups of 100
|
||||||
for i := 0; i < len(hashes); i += 200 {
|
for i := 0; i < len(hashes); i += 100 {
|
||||||
end := i + 200
|
end := i + 100
|
||||||
if end > len(hashes) {
|
if end > len(hashes) {
|
||||||
end = len(hashes)
|
end = len(hashes)
|
||||||
}
|
}
|
||||||
@@ -157,24 +158,36 @@ func (r *Torbox) GetTorrent(id string) (*Torrent, error) {
|
|||||||
torrent.Name = name
|
torrent.Name = name
|
||||||
torrent.Bytes = data.Size
|
torrent.Bytes = data.Size
|
||||||
torrent.Folder = name
|
torrent.Folder = name
|
||||||
torrent.Progress = data.Progress
|
torrent.Progress = data.Progress * 100
|
||||||
torrent.Status = getStatus(data.DownloadState, data.DownloadFinished)
|
torrent.Status = getStatus(data.DownloadState, data.DownloadFinished)
|
||||||
torrent.Speed = data.DownloadSpeed
|
torrent.Speed = data.DownloadSpeed
|
||||||
torrent.Seeders = data.Seeds
|
torrent.Seeders = data.Seeds
|
||||||
torrent.Filename = name
|
torrent.Filename = name
|
||||||
torrent.OriginalFilename = name
|
torrent.OriginalFilename = name
|
||||||
files := make([]TorrentFile, len(data.Files))
|
files := make([]TorrentFile, 0)
|
||||||
for i, f := range data.Files {
|
for _, f := range data.Files {
|
||||||
files[i] = TorrentFile{
|
fileName := filepath.Base(f.Name)
|
||||||
Id: strconv.Itoa(f.Id),
|
if (!common.RegexMatch(common.VIDEOMATCH, fileName) &&
|
||||||
Name: f.Name,
|
!common.RegexMatch(common.SUBMATCH, fileName) &&
|
||||||
Size: f.Size,
|
!common.RegexMatch(common.MUSICMATCH, fileName)) || common.RegexMatch(common.SAMPLEMATCH, fileName) {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
file := TorrentFile{
|
||||||
|
Id: strconv.Itoa(f.Id),
|
||||||
|
Name: fileName,
|
||||||
|
Size: f.Size,
|
||||||
|
Path: fileName,
|
||||||
|
}
|
||||||
|
files = append(files, file)
|
||||||
}
|
}
|
||||||
if len(files) > 0 && name == data.Hash {
|
var cleanPath string
|
||||||
cleanPath := path.Clean(files[0].Name)
|
if len(files) > 0 {
|
||||||
torrent.OriginalFilename = strings.Split(strings.TrimPrefix(cleanPath, "/"), "/")[0]
|
cleanPath = path.Clean(data.Files[0].Name)
|
||||||
|
} else {
|
||||||
|
cleanPath = path.Clean(data.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
torrent.OriginalFilename = strings.Split(cleanPath, "/")[0]
|
||||||
torrent.Files = files
|
torrent.Files = files
|
||||||
torrent.Debrid = r
|
torrent.Debrid = r
|
||||||
return torrent, nil
|
return torrent, nil
|
||||||
@@ -203,7 +216,7 @@ func (r *Torbox) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error)
|
|||||||
break
|
break
|
||||||
} else if status == "downloading" {
|
} else if status == "downloading" {
|
||||||
if !r.DownloadUncached {
|
if !r.DownloadUncached {
|
||||||
go r.DeleteTorrent(torrent)
|
go torrent.Delete()
|
||||||
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
|
||||||
}
|
}
|
||||||
// Break out of the loop if the torrent is downloading.
|
// Break out of the loop if the torrent is downloading.
|
||||||
@@ -216,7 +229,7 @@ func (r *Torbox) CheckStatus(torrent *Torrent, isSymlink bool) (*Torrent, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *Torbox) DeleteTorrent(torrent *Torrent) {
|
func (r *Torbox) DeleteTorrent(torrent *Torrent) {
|
||||||
url := fmt.Sprintf("%s/api//torrents/controltorrent/%s", r.Host, torrent.Id)
|
url := fmt.Sprintf("%s/api/torrents/controltorrent/%s", r.Host, torrent.Id)
|
||||||
payload := map[string]string{"torrent_id": torrent.Id, "action": "Delete"}
|
payload := map[string]string{"torrent_id": torrent.Id, "action": "Delete"}
|
||||||
jsonPayload, _ := json.Marshal(payload)
|
jsonPayload, _ := json.Marshal(payload)
|
||||||
req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(jsonPayload))
|
req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(jsonPayload))
|
||||||
|
|||||||
@@ -2,13 +2,14 @@ package debrid
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Arr struct {
|
type Arr struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Token string `json:"token"`
|
Token string `json:"-"`
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,13 +39,13 @@ type Torrent struct {
|
|||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Added string `json:"added"`
|
Added string `json:"added"`
|
||||||
Progress float64 `json:"progress"`
|
Progress float64 `json:"progress"`
|
||||||
Speed int64 `json:"speed"`
|
Speed int `json:"speed"`
|
||||||
Seeders int `json:"seeders"`
|
Seeders int `json:"seeders"`
|
||||||
Links []string `json:"links"`
|
Links []string `json:"links"`
|
||||||
DownloadLinks []TorrentDownloadLinks `json:"download_links"`
|
DownloadLinks []TorrentDownloadLinks `json:"download_links"`
|
||||||
|
|
||||||
Debrid Service
|
Debrid Service
|
||||||
Arr *Arr
|
Arr *arr.Arr
|
||||||
}
|
}
|
||||||
|
|
||||||
type TorrentDownloadLinks struct {
|
type TorrentDownloadLinks struct {
|
||||||
@@ -58,15 +59,26 @@ func (t *Torrent) GetSymlinkFolder(parent string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) GetMountFolder(rClonePath string) string {
|
func (t *Torrent) GetMountFolder(rClonePath string) string {
|
||||||
if common.FileReady(filepath.Join(rClonePath, t.OriginalFilename)) {
|
possiblePaths := []string{
|
||||||
return t.OriginalFilename
|
t.OriginalFilename,
|
||||||
} else if common.FileReady(filepath.Join(rClonePath, t.Filename)) {
|
t.Filename,
|
||||||
return t.Filename
|
common.RemoveExtension(t.OriginalFilename),
|
||||||
} else if pathWithNoExt := common.RemoveExtension(t.OriginalFilename); common.FileReady(filepath.Join(rClonePath, pathWithNoExt)) {
|
|
||||||
return pathWithNoExt
|
|
||||||
} else {
|
|
||||||
return ""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, path := range possiblePaths {
|
||||||
|
if path != "" && common.FileReady(filepath.Join(rClonePath, path)) {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Torrent) Delete() {
|
||||||
|
if t.Debrid == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Debrid.DeleteTorrent(t)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type TorrentFile struct {
|
type TorrentFile struct {
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ Loop:
|
|||||||
fmt.Printf(" %s: transferred %d / %d bytes (%.2f%%)\n",
|
fmt.Printf(" %s: transferred %d / %d bytes (%.2f%%)\n",
|
||||||
resp.Filename,
|
resp.Filename,
|
||||||
resp.BytesComplete(),
|
resp.BytesComplete(),
|
||||||
resp.Size,
|
resp.Size(),
|
||||||
100*resp.Progress())
|
100*resp.Progress())
|
||||||
|
|
||||||
case <-resp.Done:
|
case <-resp.Done:
|
||||||
@@ -3,7 +3,9 @@ package proxy
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"cmp"
|
"cmp"
|
||||||
|
"context"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/elazarl/goproxy"
|
"github.com/elazarl/goproxy"
|
||||||
"github.com/elazarl/goproxy/ext/auth"
|
"github.com/elazarl/goproxy/ext/auth"
|
||||||
@@ -73,11 +75,10 @@ type Proxy struct {
|
|||||||
password string
|
password string
|
||||||
cachedOnly bool
|
cachedOnly bool
|
||||||
debrid debrid.Service
|
debrid debrid.Service
|
||||||
cache *common.Cache
|
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewProxy(config common.Config, deb *debrid.DebridService, cache *common.Cache) *Proxy {
|
func NewProxy(config common.Config, deb *debrid.DebridService) *Proxy {
|
||||||
cfg := config.Proxy
|
cfg := config.Proxy
|
||||||
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
|
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
|
||||||
return &Proxy{
|
return &Proxy{
|
||||||
@@ -88,7 +89,6 @@ func NewProxy(config common.Config, deb *debrid.DebridService, cache *common.Cac
|
|||||||
password: cfg.Password,
|
password: cfg.Password,
|
||||||
cachedOnly: *cfg.CachedOnly,
|
cachedOnly: *cfg.CachedOnly,
|
||||||
debrid: deb.Get(),
|
debrid: deb.Get(),
|
||||||
cache: cache,
|
|
||||||
logger: common.NewLogger("Proxy", os.Stdout),
|
logger: common.NewLogger("Proxy", os.Stdout),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -308,7 +308,7 @@ func UrlMatches(re *regexp.Regexp) goproxy.ReqConditionFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Proxy) Start() {
|
func (p *Proxy) Start(ctx context.Context) error {
|
||||||
username, password := p.username, p.password
|
username, password := p.username, p.password
|
||||||
proxy := goproxy.NewProxyHttpServer()
|
proxy := goproxy.NewProxyHttpServer()
|
||||||
if username != "" || password != "" {
|
if username != "" || password != "" {
|
||||||
@@ -328,6 +328,17 @@ func (p *Proxy) Start() {
|
|||||||
|
|
||||||
proxy.Verbose = p.debug
|
proxy.Verbose = p.debug
|
||||||
portFmt := fmt.Sprintf(":%s", p.port)
|
portFmt := fmt.Sprintf(":%s", p.port)
|
||||||
|
srv := &http.Server{
|
||||||
|
Addr: portFmt,
|
||||||
|
Handler: proxy,
|
||||||
|
}
|
||||||
p.logger.Printf("[*] Starting proxy server on %s\n", portFmt)
|
p.logger.Printf("[*] Starting proxy server on %s\n", portFmt)
|
||||||
p.logger.Fatal(http.ListenAndServe(fmt.Sprintf("%s", portFmt), proxy))
|
go func() {
|
||||||
|
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
p.logger.Printf("Error starting proxy server: %v\n", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
<-ctx.Done()
|
||||||
|
p.logger.Println("Shutting down gracefully...")
|
||||||
|
return srv.Shutdown(context.Background())
|
||||||
}
|
}
|
||||||
|
|||||||
103
pkg/qbit/arr.go
103
pkg/qbit/arr.go
@@ -1,103 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"cmp"
|
|
||||||
"encoding/json"
|
|
||||||
"goBlack/common"
|
|
||||||
"goBlack/pkg/debrid"
|
|
||||||
"net/http"
|
|
||||||
gourl "net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) RefreshArr(arr *debrid.Arr) {
|
|
||||||
if arr.Token == "" || arr.Host == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
url, err := common.JoinURL(arr.Host, "api/v3/command")
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
payload := map[string]string{"name": "RefreshMonitoredDownloads"}
|
|
||||||
jsonPayload, err := json.Marshal(payload)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
client := &http.Client{}
|
|
||||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
req.Header.Set("X-Api-Key", arr.Token)
|
|
||||||
|
|
||||||
resp, reqErr := client.Do(req)
|
|
||||||
if reqErr == nil {
|
|
||||||
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
|
|
||||||
if statusOk {
|
|
||||||
if q.debug {
|
|
||||||
q.logger.Printf("Refreshed monitored downloads for %s", cmp.Or(arr.Name, arr.Host))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if reqErr != nil {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) GetArrHistory(arr *debrid.Arr, downloadId, eventType string) *debrid.ArrHistorySchema {
|
|
||||||
query := gourl.Values{}
|
|
||||||
if downloadId != "" {
|
|
||||||
query.Add("downloadId", downloadId)
|
|
||||||
}
|
|
||||||
query.Add("eventType", eventType)
|
|
||||||
query.Add("pageSize", "100")
|
|
||||||
url, _ := common.JoinURL(arr.Host, "history")
|
|
||||||
url += "?" + query.Encode()
|
|
||||||
resp, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var data *debrid.ArrHistorySchema
|
|
||||||
|
|
||||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return data
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) MarkArrAsFailed(torrent *Torrent, arr *debrid.Arr) error {
|
|
||||||
downloadId := strings.ToUpper(torrent.Hash)
|
|
||||||
history := q.GetArrHistory(arr, downloadId, "grabbed")
|
|
||||||
if history == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
torrentId := 0
|
|
||||||
for _, record := range history.Records {
|
|
||||||
if strings.EqualFold(record.DownloadID, downloadId) {
|
|
||||||
torrentId = record.ID
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if torrentId != 0 {
|
|
||||||
url, err := common.JoinURL(arr.Host, "history/failed/", strconv.Itoa(torrentId))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest(http.MethodPost, url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
client := &http.Client{}
|
|
||||||
_, err = client.Do(req)
|
|
||||||
if err == nil {
|
|
||||||
q.logger.Printf("Marked torrent: %s as failed", torrent.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/go-chi/chi/v5"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) AddRoutes(r chi.Router) http.Handler {
|
|
||||||
r.Route("/api/v2", func(r chi.Router) {
|
|
||||||
r.Post("/auth/login", q.handleLogin)
|
|
||||||
|
|
||||||
r.Group(func(r chi.Router) {
|
|
||||||
//r.Use(q.authMiddleware)
|
|
||||||
r.Use(q.authContext)
|
|
||||||
r.Route("/torrents", func(r chi.Router) {
|
|
||||||
r.Use(HashesCtx)
|
|
||||||
r.Get("/info", q.handleTorrentsInfo)
|
|
||||||
r.Post("/add", q.handleTorrentsAdd)
|
|
||||||
r.Post("/delete", q.handleTorrentsDelete)
|
|
||||||
r.Get("/categories", q.handleCategories)
|
|
||||||
r.Post("/createCategory", q.handleCreateCategory)
|
|
||||||
|
|
||||||
r.Get("/pause", q.handleTorrentsPause)
|
|
||||||
r.Get("/resume", q.handleTorrentsResume)
|
|
||||||
r.Get("/recheck", q.handleTorrentRecheck)
|
|
||||||
r.Get("/properties", q.handleTorrentProperties)
|
|
||||||
r.Get("/files", q.handleTorrentFiles)
|
|
||||||
})
|
|
||||||
|
|
||||||
r.Route("/app", func(r chi.Router) {
|
|
||||||
r.Get("/version", q.handleVersion)
|
|
||||||
r.Get("/webapiVersion", q.handleWebAPIVersion)
|
|
||||||
r.Get("/preferences", q.handlePreferences)
|
|
||||||
r.Get("/buildInfo", q.handleBuildInfo)
|
|
||||||
r.Get("/shutdown", q.shutdown)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
})
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) handleVersion(w http.ResponseWriter, r *http.Request) {
|
|
||||||
_, _ = w.Write([]byte("v4.3.2"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
|
|
||||||
_, _ = w.Write([]byte("2.7"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) {
|
|
||||||
preferences := NewAppPreferences()
|
|
||||||
|
|
||||||
preferences.WebUiUsername = q.Username
|
|
||||||
preferences.SavePath = q.DownloadFolder
|
|
||||||
preferences.TempPath = filepath.Join(q.DownloadFolder, "temp")
|
|
||||||
|
|
||||||
JSONResponse(w, preferences, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
|
|
||||||
res := BuildInfo{
|
|
||||||
Bitness: 64,
|
|
||||||
Boost: "1.75.0",
|
|
||||||
Libtorrent: "1.2.11.0",
|
|
||||||
Openssl: "1.1.1i",
|
|
||||||
Qt: "5.15.2",
|
|
||||||
Zlib: "1.2.11",
|
|
||||||
}
|
|
||||||
JSONResponse(w, res, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) shutdown(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
|
|
||||||
_, _ = w.Write([]byte("Ok."))
|
|
||||||
}
|
|
||||||
@@ -1,80 +1,17 @@
|
|||||||
package qbit
|
package qbit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cmp"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/go-chi/chi/v5"
|
|
||||||
"github.com/go-chi/chi/v5/middleware"
|
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
"goBlack/pkg/debrid"
|
"goBlack/pkg/debrid"
|
||||||
"log"
|
"goBlack/pkg/qbit/server"
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type WorkerType struct {
|
func Start(ctx context.Context, config *common.Config, deb *debrid.DebridService) error {
|
||||||
ticker *time.Ticker
|
srv := server.NewServer(config, deb)
|
||||||
ctx context.Context
|
if err := srv.Start(ctx); err != nil {
|
||||||
}
|
return fmt.Errorf("failed to start qbit server: %w", err)
|
||||||
|
|
||||||
type Worker struct {
|
|
||||||
types map[string]WorkerType
|
|
||||||
}
|
|
||||||
|
|
||||||
type QBit struct {
|
|
||||||
Username string `json:"username"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
Port string `json:"port"`
|
|
||||||
DownloadFolder string `json:"download_folder"`
|
|
||||||
Categories []string `json:"categories"`
|
|
||||||
debrid *debrid.DebridService
|
|
||||||
cache *common.Cache
|
|
||||||
storage *TorrentStorage
|
|
||||||
debug bool
|
|
||||||
logger *log.Logger
|
|
||||||
arrs sync.Map // host:token (Used for refreshing in worker)
|
|
||||||
RefreshInterval int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewQBit(config *common.Config, deb *debrid.DebridService, cache *common.Cache) *QBit {
|
|
||||||
cfg := config.QBitTorrent
|
|
||||||
storage := NewTorrentStorage("torrents.json")
|
|
||||||
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8182")
|
|
||||||
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
|
|
||||||
return &QBit{
|
|
||||||
Username: cfg.Username,
|
|
||||||
Password: cfg.Password,
|
|
||||||
Port: port,
|
|
||||||
DownloadFolder: cfg.DownloadFolder,
|
|
||||||
Categories: cfg.Categories,
|
|
||||||
debrid: deb,
|
|
||||||
cache: cache,
|
|
||||||
debug: cfg.Debug,
|
|
||||||
storage: storage,
|
|
||||||
logger: common.NewLogger("QBit", os.Stdout),
|
|
||||||
arrs: sync.Map{},
|
|
||||||
RefreshInterval: refreshInterval,
|
|
||||||
}
|
}
|
||||||
}
|
return nil
|
||||||
|
|
||||||
func (q *QBit) Start() {
|
|
||||||
|
|
||||||
r := chi.NewRouter()
|
|
||||||
if q.debug {
|
|
||||||
r.Use(middleware.Logger)
|
|
||||||
}
|
|
||||||
r.Use(middleware.Recoverer)
|
|
||||||
|
|
||||||
q.AddRoutes(r)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
go q.StartWorker(ctx)
|
|
||||||
|
|
||||||
q.logger.Printf("Starting QBit server on :%s", q.Port)
|
|
||||||
port := fmt.Sprintf(":%s", q.Port)
|
|
||||||
q.logger.Fatal(http.ListenAndServe(port, r))
|
|
||||||
}
|
}
|
||||||
|
|||||||
107
pkg/qbit/qbit.go
107
pkg/qbit/qbit.go
@@ -1,107 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"goBlack/common"
|
|
||||||
"goBlack/pkg/debrid"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
|
|
||||||
magnet, err := common.GetMagnetFromUrl(url)
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Printf("Error parsing magnet link: %v\n", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = q.Process(ctx, magnet, category)
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Println("Failed to process magnet:", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
|
|
||||||
file, _ := fileHeader.Open()
|
|
||||||
defer file.Close()
|
|
||||||
var reader io.Reader = file
|
|
||||||
magnet, err := common.GetMagnetFromFile(reader, fileHeader.Filename)
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Printf("Error reading file: %s", fileHeader.Filename)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = q.Process(ctx, magnet, category)
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Println("Failed to process torrent:", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category string) error {
|
|
||||||
torrent := q.CreateTorrentFromMagnet(magnet, category)
|
|
||||||
arr := &debrid.Arr{
|
|
||||||
Name: category,
|
|
||||||
Token: ctx.Value("token").(string),
|
|
||||||
Host: ctx.Value("host").(string),
|
|
||||||
}
|
|
||||||
isSymlink := ctx.Value("isSymlink").(bool)
|
|
||||||
debridTorrent, err := debrid.ProcessQBitTorrent(q.debrid, magnet, arr, isSymlink)
|
|
||||||
if err != nil || debridTorrent == nil {
|
|
||||||
if err == nil {
|
|
||||||
err = fmt.Errorf("failed to process torrent")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
|
||||||
q.storage.AddOrUpdate(torrent)
|
|
||||||
go q.processFiles(torrent, debridTorrent, arr, isSymlink) // We can send async for file processing not to delay the response
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) CreateTorrentFromMagnet(magnet *common.Magnet, category string) *Torrent {
|
|
||||||
torrent := &Torrent{
|
|
||||||
ID: uuid.NewString(),
|
|
||||||
Hash: strings.ToLower(magnet.InfoHash),
|
|
||||||
Name: magnet.Name,
|
|
||||||
Size: magnet.Size,
|
|
||||||
Category: category,
|
|
||||||
State: "downloading",
|
|
||||||
MagnetUri: magnet.Link,
|
|
||||||
|
|
||||||
Tracker: "udp://tracker.opentrackr.org:1337",
|
|
||||||
UpLimit: -1,
|
|
||||||
DlLimit: -1,
|
|
||||||
AutoTmm: false,
|
|
||||||
Ratio: 1,
|
|
||||||
RatioLimit: 1,
|
|
||||||
}
|
|
||||||
return torrent
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) processFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr, isSymlink bool) {
|
|
||||||
for debridTorrent.Status != "downloaded" {
|
|
||||||
progress := debridTorrent.Progress
|
|
||||||
q.logger.Printf("%s Download Progress: %.2f%%", debridTorrent.Debrid.GetName(), progress)
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
dbT, err := debridTorrent.Debrid.CheckStatus(debridTorrent, isSymlink)
|
|
||||||
if err != nil {
|
|
||||||
q.logger.Printf("Error checking status: %v", err)
|
|
||||||
q.MarkAsFailed(torrent)
|
|
||||||
q.RefreshArr(arr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
debridTorrent = dbT
|
|
||||||
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
|
||||||
}
|
|
||||||
if isSymlink {
|
|
||||||
q.processSymlink(torrent, debridTorrent, arr)
|
|
||||||
} else {
|
|
||||||
q.processManualFiles(torrent, debridTorrent, arr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
42
pkg/qbit/server/app_handlers.go
Normal file
42
pkg/qbit/server/app_handlers.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/qbit/shared"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) handleVersion(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_, _ = w.Write([]byte("v4.3.2"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_, _ = w.Write([]byte("2.7"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handlePreferences(w http.ResponseWriter, r *http.Request) {
|
||||||
|
preferences := shared.NewAppPreferences()
|
||||||
|
|
||||||
|
preferences.WebUiUsername = s.qbit.Username
|
||||||
|
preferences.SavePath = s.qbit.DownloadFolder
|
||||||
|
preferences.TempPath = filepath.Join(s.qbit.DownloadFolder, "temp")
|
||||||
|
|
||||||
|
common.JSONResponse(w, preferences, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
|
||||||
|
res := shared.BuildInfo{
|
||||||
|
Bitness: 64,
|
||||||
|
Boost: "1.75.0",
|
||||||
|
Libtorrent: "1.2.11.0",
|
||||||
|
Openssl: "1.1.1i",
|
||||||
|
Qt: "5.15.2",
|
||||||
|
Zlib: "1.2.11",
|
||||||
|
}
|
||||||
|
common.JSONResponse(w, res, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) shutdown(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
7
pkg/qbit/server/auth_handlers.go
Normal file
7
pkg/qbit/server/auth_handlers.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
func (s *Server) handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_, _ = w.Write([]byte("Ok."))
|
||||||
|
}
|
||||||
178
pkg/qbit/server/import.go
Normal file
178
pkg/qbit/server/import.go
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ImportRequest struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
URI string `json:"uri"`
|
||||||
|
Arr *arr.Arr `json:"arr"`
|
||||||
|
IsSymlink bool `json:"isSymlink"`
|
||||||
|
SeriesId int `json:"series"`
|
||||||
|
Seasons []int `json:"seasons"`
|
||||||
|
Episodes []string `json:"episodes"`
|
||||||
|
|
||||||
|
Failed bool `json:"failed"`
|
||||||
|
FailedAt time.Time `json:"failedAt"`
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
Completed bool `json:"completed"`
|
||||||
|
CompletedAt time.Time `json:"completedAt"`
|
||||||
|
Async bool `json:"async"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ManualImportResponseSchema struct {
|
||||||
|
Priority string `json:"priority"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Result string `json:"result"`
|
||||||
|
Queued time.Time `json:"queued"`
|
||||||
|
Trigger string `json:"trigger"`
|
||||||
|
SendUpdatesToClient bool `json:"sendUpdatesToClient"`
|
||||||
|
UpdateScheduledTask bool `json:"updateScheduledTask"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewImportRequest(uri string, arr *arr.Arr, isSymlink bool) *ImportRequest {
|
||||||
|
return &ImportRequest{
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
URI: uri,
|
||||||
|
Arr: arr,
|
||||||
|
Failed: false,
|
||||||
|
Completed: false,
|
||||||
|
Async: false,
|
||||||
|
IsSymlink: isSymlink,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImportRequest) Fail(reason string) {
|
||||||
|
i.Failed = true
|
||||||
|
i.FailedAt = time.Now()
|
||||||
|
i.Reason = reason
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImportRequest) Complete() {
|
||||||
|
i.Completed = true
|
||||||
|
i.CompletedAt = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImportRequest) Process(s *Server) (err error) {
|
||||||
|
// Use this for now.
|
||||||
|
// This sends the torrent to the arr
|
||||||
|
q := s.qbit
|
||||||
|
magnet, err := common.GetMagnetFromUrl(i.URI)
|
||||||
|
torrent := q.CreateTorrentFromMagnet(magnet, i.Arr.Name)
|
||||||
|
debridTorrent, err := debrid.ProcessTorrent(q.Debrid, magnet, i.Arr, i.IsSymlink)
|
||||||
|
if err != nil || debridTorrent == nil {
|
||||||
|
if debridTorrent != nil {
|
||||||
|
go debridTorrent.Delete()
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = fmt.Errorf("failed to process torrent")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
||||||
|
q.Storage.AddOrUpdate(torrent)
|
||||||
|
go q.ProcessFiles(torrent, debridTorrent, i.Arr, i.IsSymlink)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ImportRequest) BetaProcess(s *Server) (err error) {
|
||||||
|
// THis actually imports the torrent into the arr. Needs more work
|
||||||
|
if i.Arr == nil {
|
||||||
|
return errors.New("invalid arr")
|
||||||
|
}
|
||||||
|
q := s.qbit
|
||||||
|
magnet, err := common.GetMagnetFromUrl(i.URI)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing magnet link: %w", err)
|
||||||
|
}
|
||||||
|
debridTorrent, err := debrid.ProcessTorrent(q.Debrid, magnet, i.Arr, true)
|
||||||
|
if err != nil || debridTorrent == nil {
|
||||||
|
if debridTorrent != nil {
|
||||||
|
go debridTorrent.Delete()
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = fmt.Errorf("failed to process torrent")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
debridTorrent.Arr = i.Arr
|
||||||
|
|
||||||
|
torrentPath, err := q.ProcessSymlink(debridTorrent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to process symlink: %w", err)
|
||||||
|
}
|
||||||
|
i.Path = torrentPath
|
||||||
|
body, err := i.Arr.Import(torrentPath, i.SeriesId, i.Seasons)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to import: %w", err)
|
||||||
|
}
|
||||||
|
defer body.Close()
|
||||||
|
|
||||||
|
var resp ManualImportResponseSchema
|
||||||
|
if err := json.NewDecoder(body).Decode(&resp); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %w", err)
|
||||||
|
}
|
||||||
|
if resp.Status != "success" {
|
||||||
|
return fmt.Errorf("failed to import: %s", resp.Result)
|
||||||
|
}
|
||||||
|
i.Complete()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImportStore struct {
|
||||||
|
Imports map[string]*ImportRequest
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewImportStore() *ImportStore {
|
||||||
|
return &ImportStore{
|
||||||
|
Imports: make(map[string]*ImportRequest),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ImportStore) AddImport(i *ImportRequest) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.Imports[i.ID] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ImportStore) GetImport(id string) *ImportRequest {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.Imports[id]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ImportStore) GetAllImports() []*ImportRequest {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
var imports []*ImportRequest
|
||||||
|
for _, i := range s.Imports {
|
||||||
|
imports = append(imports, i)
|
||||||
|
}
|
||||||
|
return imports
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ImportStore) DeleteImport(id string) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
delete(s.Imports, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ImportStore) UpdateImport(i *ImportRequest) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.Imports[i.ID] = i
|
||||||
|
}
|
||||||
@@ -1,29 +1,14 @@
|
|||||||
package qbit
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/subtle"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (q *QBit) authMiddleware(next http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
user, pass, ok := r.BasicAuth()
|
|
||||||
if !ok {
|
|
||||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if subtle.ConstantTimeCompare([]byte(user), []byte(q.Username)) != 1 || subtle.ConstantTimeCompare([]byte(pass), []byte(q.Password)) != 1 {
|
|
||||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
next.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func DecodeAuthHeader(header string) (string, string, error) {
|
func DecodeAuthHeader(header string) (string, string, error) {
|
||||||
encodedTokens := strings.Split(header, " ")
|
encodedTokens := strings.Split(header, " ")
|
||||||
if len(encodedTokens) != 2 {
|
if len(encodedTokens) != 2 {
|
||||||
@@ -45,17 +30,38 @@ func DecodeAuthHeader(header string) (string, string, error) {
|
|||||||
return host, token, nil
|
return host, token, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) authContext(next http.Handler) http.Handler {
|
func (s *Server) CategoryContext(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
category := strings.Trim(r.URL.Query().Get("category"), "")
|
||||||
|
if category == "" {
|
||||||
|
// Get from form
|
||||||
|
_ = r.ParseForm()
|
||||||
|
category = r.Form.Get("category")
|
||||||
|
if category == "" {
|
||||||
|
// Get from multipart form
|
||||||
|
_ = r.ParseMultipartForm(0)
|
||||||
|
category = r.FormValue("category")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx := r.Context()
|
||||||
|
ctx = context.WithValue(r.Context(), "category", category)
|
||||||
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) authContext(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
host, token, err := DecodeAuthHeader(r.Header.Get("Authorization"))
|
host, token, err := DecodeAuthHeader(r.Header.Get("Authorization"))
|
||||||
ctx := r.Context()
|
category := r.Context().Value("category").(string)
|
||||||
if err == nil {
|
a := &arr.Arr{
|
||||||
ctx = context.WithValue(r.Context(), "host", host)
|
Name: category,
|
||||||
ctx = context.WithValue(ctx, "token", token)
|
|
||||||
q.arrs.Store(host, token)
|
|
||||||
next.ServeHTTP(w, r.WithContext(ctx))
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
if err == nil {
|
||||||
|
a.Host = host
|
||||||
|
a.Token = token
|
||||||
|
}
|
||||||
|
s.qbit.Arrs.AddOrUpdate(a)
|
||||||
|
ctx := context.WithValue(r.Context(), "arr", a)
|
||||||
next.ServeHTTP(w, r.WithContext(ctx))
|
next.ServeHTTP(w, r.WithContext(ctx))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
51
pkg/qbit/server/routes.go
Normal file
51
pkg/qbit/server/routes.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) Routes(r chi.Router) http.Handler {
|
||||||
|
r.Route("/api/v2", func(r chi.Router) {
|
||||||
|
r.Use(s.CategoryContext)
|
||||||
|
r.Post("/auth/login", s.handleLogin)
|
||||||
|
|
||||||
|
r.Group(func(r chi.Router) {
|
||||||
|
r.Use(s.authContext)
|
||||||
|
r.Route("/torrents", func(r chi.Router) {
|
||||||
|
r.Use(HashesCtx)
|
||||||
|
r.Get("/info", s.handleTorrentsInfo)
|
||||||
|
r.Post("/add", s.handleTorrentsAdd)
|
||||||
|
r.Post("/delete", s.handleTorrentsDelete)
|
||||||
|
r.Get("/categories", s.handleCategories)
|
||||||
|
r.Post("/createCategory", s.handleCreateCategory)
|
||||||
|
|
||||||
|
r.Get("/pause", s.handleTorrentsPause)
|
||||||
|
r.Get("/resume", s.handleTorrentsResume)
|
||||||
|
r.Get("/recheck", s.handleTorrentRecheck)
|
||||||
|
r.Get("/properties", s.handleTorrentProperties)
|
||||||
|
r.Get("/files", s.handleTorrentFiles)
|
||||||
|
})
|
||||||
|
|
||||||
|
r.Route("/app", func(r chi.Router) {
|
||||||
|
r.Get("/version", s.handleVersion)
|
||||||
|
r.Get("/webapiVersion", s.handleWebAPIVersion)
|
||||||
|
r.Get("/preferences", s.handlePreferences)
|
||||||
|
r.Get("/buildInfo", s.handleBuildInfo)
|
||||||
|
r.Get("/shutdown", s.shutdown)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
r.Get("/", s.handleHome)
|
||||||
|
r.Route("/internal", func(r chi.Router) {
|
||||||
|
r.Get("/arrs", s.handleGetArrs)
|
||||||
|
r.Get("/content", s.handleContent)
|
||||||
|
r.Get("/seasons/{contentId}", s.handleSeasons)
|
||||||
|
r.Get("/episodes/{contentId}", s.handleEpisodes)
|
||||||
|
r.Post("/add", s.handleAddContent)
|
||||||
|
r.Get("/search", s.handleSearch)
|
||||||
|
r.Get("/cached", s.handleCheckCached)
|
||||||
|
})
|
||||||
|
return r
|
||||||
|
}
|
||||||
66
pkg/qbit/server/server.go
Normal file
66
pkg/qbit/server/server.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"github.com/go-chi/chi/v5/middleware"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"goBlack/pkg/qbit/shared"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Server struct {
|
||||||
|
qbit *shared.QBit
|
||||||
|
logger *log.Logger
|
||||||
|
debug bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewServer(config *common.Config, deb *debrid.DebridService) *Server {
|
||||||
|
logger := common.NewLogger("QBit", os.Stdout)
|
||||||
|
q := shared.NewQBit(config, deb, logger)
|
||||||
|
return &Server{
|
||||||
|
qbit: q,
|
||||||
|
logger: logger,
|
||||||
|
debug: config.QBitTorrent.Debug,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) Start(ctx context.Context) error {
|
||||||
|
r := chi.NewRouter()
|
||||||
|
if s.debug {
|
||||||
|
r.Use(middleware.Logger)
|
||||||
|
}
|
||||||
|
r.Use(middleware.Recoverer)
|
||||||
|
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
|
||||||
|
s.Routes(r)
|
||||||
|
|
||||||
|
go s.qbit.StartWorker(context.Background())
|
||||||
|
|
||||||
|
s.logger.Printf("Starting QBit server on :%s", s.qbit.Port)
|
||||||
|
port := fmt.Sprintf(":%s", s.qbit.Port)
|
||||||
|
srv := &http.Server{
|
||||||
|
Addr: port,
|
||||||
|
Handler: r,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
|
||||||
|
defer stop()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
fmt.Printf("Error starting server: %v\n", err)
|
||||||
|
stop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
fmt.Println("Shutting down gracefully...")
|
||||||
|
return srv.Shutdown(context.Background())
|
||||||
|
}
|
||||||
334
pkg/qbit/server/static/index.html
Normal file
334
pkg/qbit/server/static/index.html
Normal file
@@ -0,0 +1,334 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>Debrid Manager</title>
|
||||||
|
<!-- Bootstrap CSS -->
|
||||||
|
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css" rel="stylesheet">
|
||||||
|
<!-- Bootstrap Icons -->
|
||||||
|
<link href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.7.2/font/bootstrap-icons.css" rel="stylesheet">
|
||||||
|
<link href="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/css/select2.min.css" rel="stylesheet"/>
|
||||||
|
<!-- Select2 Bootstrap 5 Theme CSS -->
|
||||||
|
<link rel="stylesheet"
|
||||||
|
href="https://cdn.jsdelivr.net/npm/select2-bootstrap-5-theme@1.3.0/dist/select2-bootstrap-5-theme.min.css"/>
|
||||||
|
<style>
|
||||||
|
.select2-container--bootstrap-5 .select2-results__option {
|
||||||
|
padding: 0.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.select2-result img {
|
||||||
|
border-radius: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.select2-container--bootstrap-5 .select2-results__option--highlighted {
|
||||||
|
background-color: #f8f9fa !important;
|
||||||
|
color: #000 !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
.select2-container--bootstrap-5 .select2-results__option--selected {
|
||||||
|
background-color: #e9ecef !important;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<nav class="navbar navbar-expand-lg navbar-dark bg-dark">
|
||||||
|
<div class="container-fluid">
|
||||||
|
<span class="navbar-brand">Debrid Manager</span>
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<div class="container mt-4">
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-md-8">
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="magnetURI" class="form-label">Magnet Link</label>
|
||||||
|
<textarea class="form-control" id="magnetURI" rows="3"></textarea>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="selectArr" class="form-label">Enter Category</label>
|
||||||
|
<input type="email" class="form-control" id="selectArr" placeholder="Enter Category(e.g sonarr, radarr, radarr4k)">
|
||||||
|
</div>
|
||||||
|
<div class="form-check">
|
||||||
|
<input class="form-check-input" type="checkbox" value="" id="isSymlink">
|
||||||
|
<label class="form-check-label" for="isSymlink">
|
||||||
|
Not Symlink(Download real files instead of symlinks from Debrid)
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div class="mt-3">
|
||||||
|
<button class="btn btn-primary" id="addToArr">
|
||||||
|
Add to Arr
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<!-- <div class="col-md-6">-->
|
||||||
|
|
||||||
|
<!-- <div class="mb-3 d-none">-->
|
||||||
|
<!-- <select class="form-select mb-3 select2-ajax" id="selectContent">-->
|
||||||
|
<!-- <option></option>-->
|
||||||
|
<!-- </select>-->
|
||||||
|
<!-- </div>-->
|
||||||
|
<!-- <div class="mb-3 d-none">-->
|
||||||
|
<!-- <select class="form-select mb-3 select2-multi" id="selectSeason" multiple-->
|
||||||
|
<!-- style="width: 100%; display: none;">-->
|
||||||
|
<!-- <option value="all">Select All</option>-->
|
||||||
|
<!-- </select>-->
|
||||||
|
<!-- </div>-->
|
||||||
|
<!-- <div class="mb-4 d-none">-->
|
||||||
|
<!-- <select class="form-select mb-3 select2-multi" id="selectEpisode" multiple-->
|
||||||
|
<!-- style="width: 100%; display: none;">-->
|
||||||
|
<!-- <option value="all">Select All</option>-->
|
||||||
|
<!-- </select>-->
|
||||||
|
<!-- </div>-->
|
||||||
|
|
||||||
|
<!-- </div>-->
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Bootstrap JS and Popper.js -->
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.bundle.min.js"></script>
|
||||||
|
<!-- jQuery -->
|
||||||
|
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/js/select2.min.js"></script>
|
||||||
|
<script>
|
||||||
|
$(document).ready(function () {
|
||||||
|
let $selectArr = $('#selectArr');
|
||||||
|
let $selectContent = $('#selectContent');
|
||||||
|
let $selectSeason = $('#selectSeason');
|
||||||
|
let $selectEpisode = $('#selectEpisode');
|
||||||
|
let $addBtn = $('#addToArr');
|
||||||
|
const $contentSearch = $('#contentSearch');
|
||||||
|
const $searchResults = $('#searchResults');
|
||||||
|
let isSonarr = true;
|
||||||
|
let searchTimeout;
|
||||||
|
let selectedArr, selectedContent, selectedSeasons, selectedEpisodes;
|
||||||
|
|
||||||
|
// Initially show only selectArr, hide others
|
||||||
|
$selectSeason.hide().closest('.mb-3').hide();
|
||||||
|
$selectEpisode.hide().closest('.mb-3').hide();
|
||||||
|
|
||||||
|
// Initialize Select2
|
||||||
|
$('.select2-multi').select2({
|
||||||
|
theme: 'bootstrap-5',
|
||||||
|
width: '100%',
|
||||||
|
placeholder: 'Select options',
|
||||||
|
allowClear: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Also hide the Select2 containers
|
||||||
|
$('.select2-container--bootstrap-5').hide();
|
||||||
|
|
||||||
|
$selectContent.select2({
|
||||||
|
theme: 'bootstrap-5',
|
||||||
|
width: '100%',
|
||||||
|
placeholder: 'Search shows/movies...',
|
||||||
|
allowClear: true,
|
||||||
|
minimumInputLength: 2,
|
||||||
|
ajax: {
|
||||||
|
url: '/internal/search',
|
||||||
|
dataType: 'json',
|
||||||
|
delay: 250,
|
||||||
|
data: function (params) {
|
||||||
|
return {
|
||||||
|
term: params.term
|
||||||
|
};
|
||||||
|
},
|
||||||
|
processResults: function (data) {
|
||||||
|
return {
|
||||||
|
results: data.map(function (item) {
|
||||||
|
return {
|
||||||
|
id: item.id,
|
||||||
|
text: item.media_type === 'movie' ? item.title : item.name,
|
||||||
|
media_type: item.media_type,
|
||||||
|
poster: item.poster_path ?
|
||||||
|
'https://image.tmdb.org/t/p/w92' + item.poster_path : null,
|
||||||
|
year: item.media_type === 'movie' ?
|
||||||
|
(item.release_date ? item.release_date.substring(0, 4) : '') :
|
||||||
|
(item.first_air_date ? item.first_air_date.substring(0, 4) : '')
|
||||||
|
};
|
||||||
|
})
|
||||||
|
};
|
||||||
|
},
|
||||||
|
cache: true
|
||||||
|
},
|
||||||
|
templateResult: formatResult,
|
||||||
|
templateSelection: formatSelection
|
||||||
|
});
|
||||||
|
|
||||||
|
function formatResult(item) {
|
||||||
|
if (!item.id) return item.text;
|
||||||
|
|
||||||
|
return $(`
|
||||||
|
<div class="select2-result d-flex align-items-center gap-2">
|
||||||
|
${item.poster ?
|
||||||
|
`<img src="${item.poster}" style="width: 45px; height: 68px; object-fit: cover;">` :
|
||||||
|
'<div style="width: 45px; height: 68px; background: #eee;"></div>'
|
||||||
|
}
|
||||||
|
<div>
|
||||||
|
<div class="fw-bold">${item.text}</div>
|
||||||
|
<small class="text-muted">
|
||||||
|
${item.year} • ${item.media_type === 'movie' ? 'Movie' : 'TV Series'}
|
||||||
|
</small>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`);
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatSelection(item) {
|
||||||
|
if (!item.id) return item.text;
|
||||||
|
return item.text + (item.year ? ` (${item.year})` : '');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle selection
|
||||||
|
$selectContent.on('select2:select', function (e) {
|
||||||
|
selectedContent = e.params.data.id;
|
||||||
|
const mediaType = e.params.data.media_type;
|
||||||
|
|
||||||
|
if (mediaType === 'tv') {
|
||||||
|
$selectSeason.show().closest('.mb-3').show();
|
||||||
|
$selectSeason.next('.select2-container--bootstrap-5').show();
|
||||||
|
// Fetch seasons (your existing seasons fetch code)
|
||||||
|
fetch(`/internal/seasons/${selectedContent}`)
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(seasons => {
|
||||||
|
$selectSeason.empty().append('<option value="all">Select All</option>');
|
||||||
|
seasons.forEach(season => {
|
||||||
|
$selectSeason.append(`<option value="${season}">Season ${season}</option>`);
|
||||||
|
});
|
||||||
|
$selectSeason.trigger('change.select2');
|
||||||
|
})
|
||||||
|
.catch(error => console.error('Error fetching seasons:', error));
|
||||||
|
} else {
|
||||||
|
// For movies, show the Add to Arr button directly
|
||||||
|
$selectSeason.hide().closest('.mb-3').hide();
|
||||||
|
$selectSeason.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$selectEpisode.hide().closest('.mb-3').hide();
|
||||||
|
$selectEpisode.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$addBtn.show();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Fetch Arrs
|
||||||
|
function fetchArrs() {
|
||||||
|
fetch('/internal/arrs')
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(arrs => {
|
||||||
|
$selectArr.empty().append('<option value="">Select Arr</option>');
|
||||||
|
arrs.forEach(arr => {
|
||||||
|
$selectArr.append(`<option value="${arr.name}">${arr.name}</option>`);
|
||||||
|
});
|
||||||
|
})
|
||||||
|
.catch(error => console.error('Error fetching arrs:', error));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle content selection
|
||||||
|
$selectContent.change(function () {
|
||||||
|
selectedContent = $(this).val();
|
||||||
|
selectedArr = $selectArr.val();
|
||||||
|
|
||||||
|
if (!selectedContent) {
|
||||||
|
$selectSeason.hide().closest('.mb-3').hide();
|
||||||
|
$selectSeason.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$selectEpisode.hide().closest('.mb-3').hide();
|
||||||
|
$selectEpisode.next('.select2-container--bootstrap-5').hide();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isSonarr) {
|
||||||
|
$selectSeason.show().closest('.mb-3').show();
|
||||||
|
$selectSeason.next('.select2-container--bootstrap-5').show();
|
||||||
|
// Fetch seasons
|
||||||
|
fetch(`/internal/seasons/${selectedContent}`)
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(seasons => {
|
||||||
|
$selectSeason.empty().append('<option value="all">Select All</option>');
|
||||||
|
seasons.forEach(season => {
|
||||||
|
$selectSeason.append(`<option value="${season}">Season ${season}</option>`);
|
||||||
|
});
|
||||||
|
$selectSeason.trigger('change.select2');
|
||||||
|
})
|
||||||
|
.catch(error => console.error('Error fetching seasons:', error));
|
||||||
|
} else {
|
||||||
|
// For Radarr, show the Add to Arr button directly
|
||||||
|
$selectSeason.hide().closest('.mb-3').hide();
|
||||||
|
$selectSeason.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$selectEpisode.hide().closest('.mb-3').hide();
|
||||||
|
$selectEpisode.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$addBtn.show();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle season selection
|
||||||
|
$selectSeason.change(function () {
|
||||||
|
selectedSeasons = $(this).val();
|
||||||
|
console.log('Selected seasons:', selectedSeasons);
|
||||||
|
|
||||||
|
if (!selectedSeasons || selectedSeasons.includes('all')) {
|
||||||
|
$selectEpisode.hide().closest('.mb-3').hide();
|
||||||
|
$selectEpisode.next('.select2-container--bootstrap-5').hide();
|
||||||
|
$addBtn.show();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
$selectEpisode.show().closest('.mb-3').show();
|
||||||
|
$selectEpisode.next('.select2-container--bootstrap-5').show();
|
||||||
|
fetch(`/internal/episodes/${selectedContent}?seasons=${selectedSeasons.join(',')}`)
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(episodes => {
|
||||||
|
$selectEpisode.empty().append('<option value="all">Select All</option>');
|
||||||
|
episodes.forEach(episode => {
|
||||||
|
$selectEpisode.append(`<option value="${episode}">Episode ${episode}</option>`);
|
||||||
|
});
|
||||||
|
$selectEpisode.trigger('change.select2');
|
||||||
|
})
|
||||||
|
.catch(error => console.error('Error fetching episodes:', error));
|
||||||
|
$addBtn.show();
|
||||||
|
});
|
||||||
|
|
||||||
|
$addBtn.click(function () {
|
||||||
|
let oldText = $(this).text();
|
||||||
|
$(this).prop('disabled', true).prepend('<span class="spinner-border spinner-border-sm me-2" role="status" aria-hidden="true"></span>');
|
||||||
|
let magnet = $('#magnetURI').val();
|
||||||
|
if (!magnet) {
|
||||||
|
$(this).prop('disabled', false).text(oldText);
|
||||||
|
alert('Please provide a magnet link or upload a torrent file!');
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let data = {
|
||||||
|
arr: $selectArr.val(),
|
||||||
|
url: magnet,
|
||||||
|
notSymlink: $('#isSymlink').is(':checked'),
|
||||||
|
};
|
||||||
|
console.log('Adding to Arr:', data);
|
||||||
|
fetch('/internal/add', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
},
|
||||||
|
body: JSON.stringify(data)
|
||||||
|
})
|
||||||
|
.then(async response => {
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text();
|
||||||
|
throw new Error(errorText);
|
||||||
|
}
|
||||||
|
return response.json();
|
||||||
|
})
|
||||||
|
.then(result => {
|
||||||
|
console.log('Added to Arr:', result);
|
||||||
|
$(this).prop('disabled', false).text(oldText);
|
||||||
|
alert('Added to Arr successfully!');
|
||||||
|
})
|
||||||
|
.catch(error => {
|
||||||
|
$(this).prop('disabled', false).text(oldText);
|
||||||
|
alert(`Error adding to Arr: ${error.message || error}`);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Initial fetch of Arrs
|
||||||
|
//fetchArrs();
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
@@ -1,44 +1,46 @@
|
|||||||
package qbit
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/qbit/shared"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
|
||||||
//log all url params
|
//log all url params
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
category := strings.Trim(r.URL.Query().Get("category"), "")
|
category := ctx.Value("category").(string)
|
||||||
filter := strings.Trim(r.URL.Query().Get("filter"), "")
|
filter := strings.Trim(r.URL.Query().Get("filter"), "")
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
torrents := q.storage.GetAll(category, filter, hashes)
|
torrents := s.qbit.Storage.GetAll(category, filter, hashes)
|
||||||
JSONResponse(w, torrents, http.StatusOK)
|
common.JSONResponse(w, torrents, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
contentType := strings.Split(r.Header.Get("Content-Type"), ";")[0]
|
contentType := strings.Split(r.Header.Get("Content-Type"), ";")[0]
|
||||||
switch contentType {
|
switch contentType {
|
||||||
case "multipart/form-data":
|
case "multipart/form-data":
|
||||||
err := r.ParseMultipartForm(32 << 20) // 32MB max memory
|
err := r.ParseMultipartForm(32 << 20) // 32MB max memory
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.logger.Printf("Error parsing form: %v\n", err)
|
s.logger.Printf("Error parsing form: %v\n", err)
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case "application/x-www-form-urlencoded":
|
case "application/x-www-form-urlencoded":
|
||||||
err := r.ParseForm()
|
err := r.ParseForm()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.logger.Printf("Error parsing form: %v\n", err)
|
s.logger.Printf("Error parsing form: %v\n", err)
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
|
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
|
||||||
q.logger.Printf("isSymlink: %v\n", isSymlink)
|
s.logger.Printf("isSymlink: %v\n", isSymlink)
|
||||||
urls := r.FormValue("urls")
|
urls := r.FormValue("urls")
|
||||||
category := r.FormValue("category")
|
category := r.FormValue("category")
|
||||||
|
|
||||||
@@ -48,20 +50,19 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
|
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
|
||||||
|
|
||||||
for _, url := range urlList {
|
for _, url := range urlList {
|
||||||
if err := q.AddMagnet(ctx, url, category); err != nil {
|
if err := s.qbit.AddMagnet(ctx, url, category); err != nil {
|
||||||
q.logger.Printf("Error adding magnet: %v\n", err)
|
s.logger.Printf("Error adding magnet: %v\n", err)
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if contentType == "multipart/form-data" {
|
if contentType == "multipart/form-data" && len(r.MultipartForm.File["torrents"]) > 0 {
|
||||||
files := r.MultipartForm.File["torrents"]
|
files := r.MultipartForm.File["torrents"]
|
||||||
for _, fileHeader := range files {
|
for _, fileHeader := range files {
|
||||||
if err := q.AddTorrent(ctx, fileHeader, category); err != nil {
|
if err := s.qbit.AddTorrent(ctx, fileHeader, category); err != nil {
|
||||||
q.logger.Printf("Error adding torrent: %v\n", err)
|
s.logger.Printf("Error adding torrent: %v\n", err)
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -71,7 +72,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
|||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
if len(hashes) == 0 {
|
if len(hashes) == 0 {
|
||||||
@@ -79,67 +80,67 @@ func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
q.storage.Delete(hash)
|
s.qbit.Storage.Delete(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
torrent := q.storage.Get(hash)
|
torrent := s.qbit.Storage.Get(hash)
|
||||||
if torrent == nil {
|
if torrent == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
go q.PauseTorrent(torrent)
|
go s.qbit.PauseTorrent(torrent)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
torrent := q.storage.Get(hash)
|
torrent := s.qbit.Storage.Get(hash)
|
||||||
if torrent == nil {
|
if torrent == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
go q.ResumeTorrent(torrent)
|
go s.qbit.ResumeTorrent(torrent)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
hashes, _ := ctx.Value("hashes").([]string)
|
hashes, _ := ctx.Value("hashes").([]string)
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
torrent := q.storage.Get(hash)
|
torrent := s.qbit.Storage.Get(hash)
|
||||||
if torrent == nil {
|
if torrent == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
go q.RefreshTorrent(torrent)
|
go s.qbit.RefreshTorrent(torrent)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleCategories(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleCategories(w http.ResponseWriter, r *http.Request) {
|
||||||
var categories = map[string]TorrentCategory{}
|
var categories = map[string]shared.TorrentCategory{}
|
||||||
for _, cat := range q.Categories {
|
for _, cat := range s.qbit.Categories {
|
||||||
path := filepath.Join(q.DownloadFolder, cat)
|
path := filepath.Join(s.qbit.DownloadFolder, cat)
|
||||||
categories[cat] = TorrentCategory{
|
categories[cat] = shared.TorrentCategory{
|
||||||
Name: cat,
|
Name: cat,
|
||||||
SavePath: path,
|
SavePath: path,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
JSONResponse(w, categories, http.StatusOK)
|
common.JSONResponse(w, categories, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
|
||||||
err := r.ParseForm()
|
err := r.ParseForm()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
|
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
|
||||||
@@ -152,24 +153,24 @@ func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
q.Categories = append(q.Categories, name)
|
s.qbit.Categories = append(s.qbit.Categories, name)
|
||||||
|
|
||||||
JSONResponse(w, nil, http.StatusOK)
|
common.JSONResponse(w, nil, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
|
||||||
hash := r.URL.Query().Get("hash")
|
hash := r.URL.Query().Get("hash")
|
||||||
torrent := q.storage.Get(hash)
|
torrent := s.qbit.Storage.Get(hash)
|
||||||
properties := q.GetTorrentProperties(torrent)
|
properties := s.qbit.GetTorrentProperties(torrent)
|
||||||
JSONResponse(w, properties, http.StatusOK)
|
common.JSONResponse(w, properties, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
|
||||||
hash := r.URL.Query().Get("hash")
|
hash := r.URL.Query().Get("hash")
|
||||||
torrent := q.storage.Get(hash)
|
torrent := s.qbit.Storage.Get(hash)
|
||||||
if torrent == nil {
|
if torrent == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
files := q.GetTorrentFiles(torrent)
|
files := s.qbit.GetTorrentFiles(torrent)
|
||||||
JSONResponse(w, files, http.StatusOK)
|
common.JSONResponse(w, files, http.StatusOK)
|
||||||
}
|
}
|
||||||
149
pkg/qbit/server/ui_handlers.go
Normal file
149
pkg/qbit/server/ui_handlers.go
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"embed"
|
||||||
|
"encoding/json"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"html/template"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AddRequest struct {
|
||||||
|
Url string `json:"url"`
|
||||||
|
Arr string `json:"arr"`
|
||||||
|
File string `json:"file"`
|
||||||
|
NotSymlink bool `json:"notSymlink"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
Seasons []string `json:"seasons"`
|
||||||
|
Episodes []string `json:"episodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ArrResponse struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContentResponse struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
ArrID string `json:"arr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:embed static/index.html
|
||||||
|
var content embed.FS
|
||||||
|
|
||||||
|
func (s *Server) handleHome(w http.ResponseWriter, r *http.Request) {
|
||||||
|
tmpl, err := template.ParseFS(content, "static/index.html")
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tmpl.Execute(w, nil)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleGetArrs(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
common.JSONResponse(w, s.qbit.Arrs.GetAll(), http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleContent(w http.ResponseWriter, r *http.Request) {
|
||||||
|
arrName := r.URL.Query().Get("arr")
|
||||||
|
_arr := s.qbit.Arrs.Get(arrName)
|
||||||
|
if _arr == nil {
|
||||||
|
http.Error(w, "Invalid arr", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
contents := _arr.GetContents()
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
common.JSONResponse(w, contents, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleSearch(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// arrName := r.URL.Query().Get("arr")
|
||||||
|
term := r.URL.Query().Get("term")
|
||||||
|
results, err := arr.SearchTMDB(term)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
common.JSONResponse(w, results.Results, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleSeasons(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// arrId := r.URL.Query().Get("arrId")
|
||||||
|
// contentId := chi.URLParam(r, "contentId")
|
||||||
|
seasons := []string{"Season 1", "Season 2", "Season 3", "Season 4", "Season 5"}
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
common.JSONResponse(w, seasons, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleEpisodes(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// arrId := r.URL.Query().Get("arrId")
|
||||||
|
// contentId := chi.URLParam(r, "contentId")
|
||||||
|
// seasonIds := strings.Split(r.URL.Query().Get("seasons"), ",")
|
||||||
|
episodes := []string{"Episode 1", "Episode 2", "Episode 3", "Episode 4", "Episode 5"}
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
common.JSONResponse(w, episodes, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var req AddRequest
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_arr := s.qbit.Arrs.Get(req.Arr)
|
||||||
|
if _arr == nil {
|
||||||
|
_arr = arr.NewArr(req.Arr, "", "", arr.Sonarr)
|
||||||
|
}
|
||||||
|
importReq := NewImportRequest(req.Url, _arr, !req.NotSymlink)
|
||||||
|
err := importReq.Process(s)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
common.JSONResponse(w, importReq, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleCheckCached(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
_hashes := r.URL.Query().Get("hash")
|
||||||
|
if _hashes == "" {
|
||||||
|
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
hashes := strings.Split(_hashes, ",")
|
||||||
|
if len(hashes) == 0 {
|
||||||
|
http.Error(w, "No hashes provided", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
db := r.URL.Query().Get("debrid")
|
||||||
|
var deb debrid.Service
|
||||||
|
if db == "" {
|
||||||
|
// use the first debrid
|
||||||
|
deb = s.qbit.Debrid.Get()
|
||||||
|
} else {
|
||||||
|
deb = s.qbit.Debrid.GetByName(db)
|
||||||
|
}
|
||||||
|
if deb == nil {
|
||||||
|
http.Error(w, "Invalid debrid", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
res := deb.IsAvailable(hashes)
|
||||||
|
result := make(map[string]bool)
|
||||||
|
for _, h := range hashes {
|
||||||
|
_, exists := res[h]
|
||||||
|
result[h] = exists
|
||||||
|
}
|
||||||
|
common.JSONResponse(w, result, http.StatusOK)
|
||||||
|
}
|
||||||
@@ -1,29 +1,27 @@
|
|||||||
package qbit
|
package shared
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
"goBlack/pkg/debrid"
|
"goBlack/pkg/debrid"
|
||||||
"goBlack/pkg/qbit/downloaders"
|
"goBlack/pkg/downloaders"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (q *QBit) processManualFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr) {
|
func (q *QBit) processManualFiles(debridTorrent *debrid.Torrent) (string, error) {
|
||||||
q.logger.Printf("Downloading %d files...", len(debridTorrent.DownloadLinks))
|
q.logger.Printf("Downloading %d files...", len(debridTorrent.DownloadLinks))
|
||||||
torrentPath := common.RemoveExtension(debridTorrent.OriginalFilename)
|
torrentPath := common.RemoveExtension(debridTorrent.OriginalFilename)
|
||||||
parent := common.RemoveInvalidChars(filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath))
|
parent := common.RemoveInvalidChars(filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath))
|
||||||
err := os.MkdirAll(parent, os.ModePerm)
|
err := os.MkdirAll(parent, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.logger.Printf("Failed to create directory: %s\n", parent)
|
// add previous error to the error and return
|
||||||
q.MarkAsFailed(torrent)
|
return "", fmt.Errorf("failed to create directory: %s: %v", parent, err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
torrent.TorrentPath = torrentPath
|
|
||||||
q.downloadFiles(debridTorrent, parent)
|
q.downloadFiles(debridTorrent, parent)
|
||||||
q.UpdateTorrent(torrent, debridTorrent)
|
return torrentPath, nil
|
||||||
q.RefreshArr(arr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) downloadFiles(debridTorrent *debrid.Torrent, parent string) {
|
func (q *QBit) downloadFiles(debridTorrent *debrid.Torrent, parent string) {
|
||||||
@@ -52,7 +50,7 @@ func (q *QBit) downloadFiles(debridTorrent *debrid.Torrent, parent string) {
|
|||||||
q.logger.Printf("Downloaded all files for %s\n", debridTorrent.Name)
|
q.logger.Printf("Downloaded all files for %s\n", debridTorrent.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) processSymlink(torrent *Torrent, debridTorrent *debrid.Torrent, arr *debrid.Arr) {
|
func (q *QBit) ProcessSymlink(debridTorrent *debrid.Torrent) (string, error) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
files := debridTorrent.Files
|
files := debridTorrent.Files
|
||||||
ready := make(chan debrid.TorrentFile, len(files))
|
ready := make(chan debrid.TorrentFile, len(files))
|
||||||
@@ -61,17 +59,12 @@ func (q *QBit) processSymlink(torrent *Torrent, debridTorrent *debrid.Torrent, a
|
|||||||
rCloneBase := debridTorrent.Debrid.GetMountPath()
|
rCloneBase := debridTorrent.Debrid.GetMountPath()
|
||||||
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
|
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.MarkAsFailed(torrent)
|
return "", fmt.Errorf("failed to get torrent path: %v", err)
|
||||||
q.logger.Printf("Error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath) // /mnt/symlinks/{category}/MyTVShow/
|
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentPath) // /mnt/symlinks/{category}/MyTVShow/
|
||||||
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
q.logger.Printf("Failed to create directory: %s\n", torrentSymlinkPath)
|
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
|
||||||
q.MarkAsFailed(torrent)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
torrentRclonePath := filepath.Join(rCloneBase, torrentPath)
|
torrentRclonePath := filepath.Join(rCloneBase, torrentPath)
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
@@ -88,32 +81,16 @@ func (q *QBit) processSymlink(torrent *Torrent, debridTorrent *debrid.Torrent, a
|
|||||||
q.logger.Println("File is ready:", f.Path)
|
q.logger.Println("File is ready:", f.Path)
|
||||||
q.createSymLink(torrentSymlinkPath, torrentRclonePath, f)
|
q.createSymLink(torrentSymlinkPath, torrentRclonePath, f)
|
||||||
}
|
}
|
||||||
// Update the torrent when all files are ready
|
return torrentPath, nil
|
||||||
torrent.TorrentPath = filepath.Base(torrentPath) // Quite important
|
|
||||||
q.UpdateTorrent(torrent, debridTorrent)
|
|
||||||
q.RefreshArr(arr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
|
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
|
||||||
pathChan := make(chan string)
|
for {
|
||||||
errChan := make(chan error)
|
torrentPath := debridTorrent.GetMountFolder(rclonePath)
|
||||||
|
if torrentPath != "" {
|
||||||
go func() {
|
return torrentPath, nil
|
||||||
for {
|
|
||||||
torrentPath := debridTorrent.GetMountFolder(rclonePath)
|
|
||||||
if torrentPath != "" {
|
|
||||||
pathChan <- torrentPath
|
|
||||||
return
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
}
|
}
|
||||||
}()
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
select {
|
|
||||||
case path := <-pathChan:
|
|
||||||
return path, nil
|
|
||||||
case err := <-errChan:
|
|
||||||
return "", err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
44
pkg/qbit/shared/qbit.go
Normal file
44
pkg/qbit/shared/qbit.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package shared
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type QBit struct {
|
||||||
|
Username string `json:"username"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
Port string `json:"port"`
|
||||||
|
DownloadFolder string `json:"download_folder"`
|
||||||
|
Categories []string `json:"categories"`
|
||||||
|
Debrid *debrid.DebridService
|
||||||
|
Storage *TorrentStorage
|
||||||
|
debug bool
|
||||||
|
logger *log.Logger
|
||||||
|
Arrs *arr.Storage
|
||||||
|
RefreshInterval int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewQBit(config *common.Config, deb *debrid.DebridService, logger *log.Logger) *QBit {
|
||||||
|
cfg := config.QBitTorrent
|
||||||
|
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8182")
|
||||||
|
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
|
||||||
|
arrs := arr.NewStorage()
|
||||||
|
return &QBit{
|
||||||
|
Username: cfg.Username,
|
||||||
|
Password: cfg.Password,
|
||||||
|
Port: port,
|
||||||
|
DownloadFolder: cfg.DownloadFolder,
|
||||||
|
Categories: cfg.Categories,
|
||||||
|
Debrid: deb,
|
||||||
|
debug: cfg.Debug,
|
||||||
|
Storage: NewTorrentStorage("torrents.json"),
|
||||||
|
logger: logger,
|
||||||
|
Arrs: arrs,
|
||||||
|
RefreshInterval: refreshInterval,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package qbit
|
package shared
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@@ -10,6 +10,7 @@ type TorrentStorage struct {
|
|||||||
torrents map[string]*Torrent
|
torrents map[string]*Torrent
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
order []string
|
order []string
|
||||||
|
filename string // Added to store the filename for persistence
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadTorrentsFromJSON(filename string) (map[string]*Torrent, error) {
|
func loadTorrentsFromJSON(filename string) (map[string]*Torrent, error) {
|
||||||
@@ -25,7 +26,7 @@ func loadTorrentsFromJSON(filename string) (map[string]*Torrent, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewTorrentStorage(filename string) *TorrentStorage {
|
func NewTorrentStorage(filename string) *TorrentStorage {
|
||||||
// Open the json file and read the data
|
// Open the JSON file and read the data
|
||||||
torrents, err := loadTorrentsFromJSON(filename)
|
torrents, err := loadTorrentsFromJSON(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
torrents = make(map[string]*Torrent)
|
torrents = make(map[string]*Torrent)
|
||||||
@@ -38,6 +39,7 @@ func NewTorrentStorage(filename string) *TorrentStorage {
|
|||||||
return &TorrentStorage{
|
return &TorrentStorage{
|
||||||
torrents: torrents,
|
torrents: torrents,
|
||||||
order: order,
|
order: order,
|
||||||
|
filename: filename,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,6 +48,7 @@ func (ts *TorrentStorage) Add(torrent *Torrent) {
|
|||||||
defer ts.mu.Unlock()
|
defer ts.mu.Unlock()
|
||||||
ts.torrents[torrent.Hash] = torrent
|
ts.torrents[torrent.Hash] = torrent
|
||||||
ts.order = append(ts.order, torrent.Hash)
|
ts.order = append(ts.order, torrent.Hash)
|
||||||
|
_ = ts.saveToFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) AddOrUpdate(torrent *Torrent) {
|
func (ts *TorrentStorage) AddOrUpdate(torrent *Torrent) {
|
||||||
@@ -55,6 +58,7 @@ func (ts *TorrentStorage) AddOrUpdate(torrent *Torrent) {
|
|||||||
ts.order = append(ts.order, torrent.Hash)
|
ts.order = append(ts.order, torrent.Hash)
|
||||||
}
|
}
|
||||||
ts.torrents[torrent.Hash] = torrent
|
ts.torrents[torrent.Hash] = torrent
|
||||||
|
_ = ts.saveToFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) GetByID(id string) *Torrent {
|
func (ts *TorrentStorage) GetByID(id string) *Torrent {
|
||||||
@@ -104,6 +108,7 @@ func (ts *TorrentStorage) Update(torrent *Torrent) {
|
|||||||
ts.mu.Lock()
|
ts.mu.Lock()
|
||||||
defer ts.mu.Unlock()
|
defer ts.mu.Unlock()
|
||||||
ts.torrents[torrent.Hash] = torrent
|
ts.torrents[torrent.Hash] = torrent
|
||||||
|
_ = ts.saveToFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) Delete(hash string) {
|
func (ts *TorrentStorage) Delete(hash string) {
|
||||||
@@ -122,16 +127,25 @@ func (ts *TorrentStorage) Delete(hash string) {
|
|||||||
}
|
}
|
||||||
// Delete the torrent folder
|
// Delete the torrent folder
|
||||||
if torrent.ContentPath != "" {
|
if torrent.ContentPath != "" {
|
||||||
os.RemoveAll(torrent.ContentPath)
|
err := os.RemoveAll(torrent.ContentPath)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
_ = ts.saveToFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *TorrentStorage) Save(filename string) error {
|
func (ts *TorrentStorage) Save() error {
|
||||||
ts.mu.RLock()
|
ts.mu.RLock()
|
||||||
defer ts.mu.RUnlock()
|
defer ts.mu.RUnlock()
|
||||||
data, err := json.Marshal(ts.torrents)
|
return ts.saveToFile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// saveToFile is a helper function to write the current state to the JSON file
|
||||||
|
func (ts *TorrentStorage) saveToFile() error {
|
||||||
|
data, err := json.MarshalIndent(ts.torrents, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return os.WriteFile(filename, data, 0644)
|
return os.WriteFile(ts.filename, data, 0644)
|
||||||
}
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package qbit
|
package shared
|
||||||
|
|
||||||
import "goBlack/pkg/debrid"
|
import "goBlack/pkg/debrid"
|
||||||
|
|
||||||
@@ -14,40 +14,40 @@ type BuildInfo struct {
|
|||||||
type AppPreferences struct {
|
type AppPreferences struct {
|
||||||
AddTrackers string `json:"add_trackers"`
|
AddTrackers string `json:"add_trackers"`
|
||||||
AddTrackersEnabled bool `json:"add_trackers_enabled"`
|
AddTrackersEnabled bool `json:"add_trackers_enabled"`
|
||||||
AltDlLimit int64 `json:"alt_dl_limit"`
|
AltDlLimit int `json:"alt_dl_limit"`
|
||||||
AltUpLimit int64 `json:"alt_up_limit"`
|
AltUpLimit int `json:"alt_up_limit"`
|
||||||
AlternativeWebuiEnabled bool `json:"alternative_webui_enabled"`
|
AlternativeWebuiEnabled bool `json:"alternative_webui_enabled"`
|
||||||
AlternativeWebuiPath string `json:"alternative_webui_path"`
|
AlternativeWebuiPath string `json:"alternative_webui_path"`
|
||||||
AnnounceIp string `json:"announce_ip"`
|
AnnounceIp string `json:"announce_ip"`
|
||||||
AnnounceToAllTiers bool `json:"announce_to_all_tiers"`
|
AnnounceToAllTiers bool `json:"announce_to_all_tiers"`
|
||||||
AnnounceToAllTrackers bool `json:"announce_to_all_trackers"`
|
AnnounceToAllTrackers bool `json:"announce_to_all_trackers"`
|
||||||
AnonymousMode bool `json:"anonymous_mode"`
|
AnonymousMode bool `json:"anonymous_mode"`
|
||||||
AsyncIoThreads int64 `json:"async_io_threads"`
|
AsyncIoThreads int `json:"async_io_threads"`
|
||||||
AutoDeleteMode int64 `json:"auto_delete_mode"`
|
AutoDeleteMode int `json:"auto_delete_mode"`
|
||||||
AutoTmmEnabled bool `json:"auto_tmm_enabled"`
|
AutoTmmEnabled bool `json:"auto_tmm_enabled"`
|
||||||
AutorunEnabled bool `json:"autorun_enabled"`
|
AutorunEnabled bool `json:"autorun_enabled"`
|
||||||
AutorunProgram string `json:"autorun_program"`
|
AutorunProgram string `json:"autorun_program"`
|
||||||
BannedIPs string `json:"banned_IPs"`
|
BannedIPs string `json:"banned_IPs"`
|
||||||
BittorrentProtocol int64 `json:"bittorrent_protocol"`
|
BittorrentProtocol int `json:"bittorrent_protocol"`
|
||||||
BypassAuthSubnetWhitelist string `json:"bypass_auth_subnet_whitelist"`
|
BypassAuthSubnetWhitelist string `json:"bypass_auth_subnet_whitelist"`
|
||||||
BypassAuthSubnetWhitelistEnabled bool `json:"bypass_auth_subnet_whitelist_enabled"`
|
BypassAuthSubnetWhitelistEnabled bool `json:"bypass_auth_subnet_whitelist_enabled"`
|
||||||
BypassLocalAuth bool `json:"bypass_local_auth"`
|
BypassLocalAuth bool `json:"bypass_local_auth"`
|
||||||
CategoryChangedTmmEnabled bool `json:"category_changed_tmm_enabled"`
|
CategoryChangedTmmEnabled bool `json:"category_changed_tmm_enabled"`
|
||||||
CheckingMemoryUse int64 `json:"checking_memory_use"`
|
CheckingMemoryUse int `json:"checking_memory_use"`
|
||||||
CreateSubfolderEnabled bool `json:"create_subfolder_enabled"`
|
CreateSubfolderEnabled bool `json:"create_subfolder_enabled"`
|
||||||
CurrentInterfaceAddress string `json:"current_interface_address"`
|
CurrentInterfaceAddress string `json:"current_interface_address"`
|
||||||
CurrentNetworkInterface string `json:"current_network_interface"`
|
CurrentNetworkInterface string `json:"current_network_interface"`
|
||||||
Dht bool `json:"dht"`
|
Dht bool `json:"dht"`
|
||||||
DiskCache int64 `json:"disk_cache"`
|
DiskCache int `json:"disk_cache"`
|
||||||
DiskCacheTtl int64 `json:"disk_cache_ttl"`
|
DiskCacheTtl int `json:"disk_cache_ttl"`
|
||||||
DlLimit int64 `json:"dl_limit"`
|
DlLimit int `json:"dl_limit"`
|
||||||
DontCountSlowTorrents bool `json:"dont_count_slow_torrents"`
|
DontCountSlowTorrents bool `json:"dont_count_slow_torrents"`
|
||||||
DyndnsDomain string `json:"dyndns_domain"`
|
DyndnsDomain string `json:"dyndns_domain"`
|
||||||
DyndnsEnabled bool `json:"dyndns_enabled"`
|
DyndnsEnabled bool `json:"dyndns_enabled"`
|
||||||
DyndnsPassword string `json:"dyndns_password"`
|
DyndnsPassword string `json:"dyndns_password"`
|
||||||
DyndnsService int64 `json:"dyndns_service"`
|
DyndnsService int `json:"dyndns_service"`
|
||||||
DyndnsUsername string `json:"dyndns_username"`
|
DyndnsUsername string `json:"dyndns_username"`
|
||||||
EmbeddedTrackerPort int64 `json:"embedded_tracker_port"`
|
EmbeddedTrackerPort int `json:"embedded_tracker_port"`
|
||||||
EnableCoalesceReadWrite bool `json:"enable_coalesce_read_write"`
|
EnableCoalesceReadWrite bool `json:"enable_coalesce_read_write"`
|
||||||
EnableEmbeddedTracker bool `json:"enable_embedded_tracker"`
|
EnableEmbeddedTracker bool `json:"enable_embedded_tracker"`
|
||||||
EnableMultiConnectionsFromSameIp bool `json:"enable_multi_connections_from_same_ip"`
|
EnableMultiConnectionsFromSameIp bool `json:"enable_multi_connections_from_same_ip"`
|
||||||
@@ -55,10 +55,10 @@ type AppPreferences struct {
|
|||||||
EnablePieceExtentAffinity bool `json:"enable_piece_extent_affinity"`
|
EnablePieceExtentAffinity bool `json:"enable_piece_extent_affinity"`
|
||||||
EnableSuperSeeding bool `json:"enable_super_seeding"`
|
EnableSuperSeeding bool `json:"enable_super_seeding"`
|
||||||
EnableUploadSuggestions bool `json:"enable_upload_suggestions"`
|
EnableUploadSuggestions bool `json:"enable_upload_suggestions"`
|
||||||
Encryption int64 `json:"encryption"`
|
Encryption int `json:"encryption"`
|
||||||
ExportDir string `json:"export_dir"`
|
ExportDir string `json:"export_dir"`
|
||||||
ExportDirFin string `json:"export_dir_fin"`
|
ExportDirFin string `json:"export_dir_fin"`
|
||||||
FilePoolSize int64 `json:"file_pool_size"`
|
FilePoolSize int `json:"file_pool_size"`
|
||||||
IncompleteFilesExt bool `json:"incomplete_files_ext"`
|
IncompleteFilesExt bool `json:"incomplete_files_ext"`
|
||||||
IpFilterEnabled bool `json:"ip_filter_enabled"`
|
IpFilterEnabled bool `json:"ip_filter_enabled"`
|
||||||
IpFilterPath string `json:"ip_filter_path"`
|
IpFilterPath string `json:"ip_filter_path"`
|
||||||
@@ -66,7 +66,7 @@ type AppPreferences struct {
|
|||||||
LimitLanPeers bool `json:"limit_lan_peers"`
|
LimitLanPeers bool `json:"limit_lan_peers"`
|
||||||
LimitTcpOverhead bool `json:"limit_tcp_overhead"`
|
LimitTcpOverhead bool `json:"limit_tcp_overhead"`
|
||||||
LimitUtpRate bool `json:"limit_utp_rate"`
|
LimitUtpRate bool `json:"limit_utp_rate"`
|
||||||
ListenPort int64 `json:"listen_port"`
|
ListenPort int `json:"listen_port"`
|
||||||
Locale string `json:"locale"`
|
Locale string `json:"locale"`
|
||||||
Lsd bool `json:"lsd"`
|
Lsd bool `json:"lsd"`
|
||||||
MailNotificationAuthEnabled bool `json:"mail_notification_auth_enabled"`
|
MailNotificationAuthEnabled bool `json:"mail_notification_auth_enabled"`
|
||||||
@@ -77,79 +77,79 @@ type AppPreferences struct {
|
|||||||
MailNotificationSmtp string `json:"mail_notification_smtp"`
|
MailNotificationSmtp string `json:"mail_notification_smtp"`
|
||||||
MailNotificationSslEnabled bool `json:"mail_notification_ssl_enabled"`
|
MailNotificationSslEnabled bool `json:"mail_notification_ssl_enabled"`
|
||||||
MailNotificationUsername string `json:"mail_notification_username"`
|
MailNotificationUsername string `json:"mail_notification_username"`
|
||||||
MaxActiveDownloads int64 `json:"max_active_downloads"`
|
MaxActiveDownloads int `json:"max_active_downloads"`
|
||||||
MaxActiveTorrents int64 `json:"max_active_torrents"`
|
MaxActiveTorrents int `json:"max_active_torrents"`
|
||||||
MaxActiveUploads int64 `json:"max_active_uploads"`
|
MaxActiveUploads int `json:"max_active_uploads"`
|
||||||
MaxConnec int64 `json:"max_connec"`
|
MaxConnec int `json:"max_connec"`
|
||||||
MaxConnecPerTorrent int64 `json:"max_connec_per_torrent"`
|
MaxConnecPerTorrent int `json:"max_connec_per_torrent"`
|
||||||
MaxRatio int64 `json:"max_ratio"`
|
MaxRatio int `json:"max_ratio"`
|
||||||
MaxRatioAct int64 `json:"max_ratio_act"`
|
MaxRatioAct int `json:"max_ratio_act"`
|
||||||
MaxRatioEnabled bool `json:"max_ratio_enabled"`
|
MaxRatioEnabled bool `json:"max_ratio_enabled"`
|
||||||
MaxSeedingTime int64 `json:"max_seeding_time"`
|
MaxSeedingTime int `json:"max_seeding_time"`
|
||||||
MaxSeedingTimeEnabled bool `json:"max_seeding_time_enabled"`
|
MaxSeedingTimeEnabled bool `json:"max_seeding_time_enabled"`
|
||||||
MaxUploads int64 `json:"max_uploads"`
|
MaxUploads int `json:"max_uploads"`
|
||||||
MaxUploadsPerTorrent int64 `json:"max_uploads_per_torrent"`
|
MaxUploadsPerTorrent int `json:"max_uploads_per_torrent"`
|
||||||
OutgoingPortsMax int64 `json:"outgoing_ports_max"`
|
OutgoingPortsMax int `json:"outgoing_ports_max"`
|
||||||
OutgoingPortsMin int64 `json:"outgoing_ports_min"`
|
OutgoingPortsMin int `json:"outgoing_ports_min"`
|
||||||
Pex bool `json:"pex"`
|
Pex bool `json:"pex"`
|
||||||
PreallocateAll bool `json:"preallocate_all"`
|
PreallocateAll bool `json:"preallocate_all"`
|
||||||
ProxyAuthEnabled bool `json:"proxy_auth_enabled"`
|
ProxyAuthEnabled bool `json:"proxy_auth_enabled"`
|
||||||
ProxyIp string `json:"proxy_ip"`
|
ProxyIp string `json:"proxy_ip"`
|
||||||
ProxyPassword string `json:"proxy_password"`
|
ProxyPassword string `json:"proxy_password"`
|
||||||
ProxyPeerConnections bool `json:"proxy_peer_connections"`
|
ProxyPeerConnections bool `json:"proxy_peer_connections"`
|
||||||
ProxyPort int64 `json:"proxy_port"`
|
ProxyPort int `json:"proxy_port"`
|
||||||
ProxyTorrentsOnly bool `json:"proxy_torrents_only"`
|
ProxyTorrentsOnly bool `json:"proxy_torrents_only"`
|
||||||
ProxyType int64 `json:"proxy_type"`
|
ProxyType int `json:"proxy_type"`
|
||||||
ProxyUsername string `json:"proxy_username"`
|
ProxyUsername string `json:"proxy_username"`
|
||||||
QueueingEnabled bool `json:"queueing_enabled"`
|
QueueingEnabled bool `json:"queueing_enabled"`
|
||||||
RandomPort bool `json:"random_port"`
|
RandomPort bool `json:"random_port"`
|
||||||
RecheckCompletedTorrents bool `json:"recheck_completed_torrents"`
|
RecheckCompletedTorrents bool `json:"recheck_completed_torrents"`
|
||||||
ResolvePeerCountries bool `json:"resolve_peer_countries"`
|
ResolvePeerCountries bool `json:"resolve_peer_countries"`
|
||||||
RssAutoDownloadingEnabled bool `json:"rss_auto_downloading_enabled"`
|
RssAutoDownloadingEnabled bool `json:"rss_auto_downloading_enabled"`
|
||||||
RssMaxArticlesPerFeed int64 `json:"rss_max_articles_per_feed"`
|
RssMaxArticlesPerFeed int `json:"rss_max_articles_per_feed"`
|
||||||
RssProcessingEnabled bool `json:"rss_processing_enabled"`
|
RssProcessingEnabled bool `json:"rss_processing_enabled"`
|
||||||
RssRefreshInterval int64 `json:"rss_refresh_interval"`
|
RssRefreshInterval int `json:"rss_refresh_interval"`
|
||||||
SavePath string `json:"save_path"`
|
SavePath string `json:"save_path"`
|
||||||
SavePathChangedTmmEnabled bool `json:"save_path_changed_tmm_enabled"`
|
SavePathChangedTmmEnabled bool `json:"save_path_changed_tmm_enabled"`
|
||||||
SaveResumeDataInterval int64 `json:"save_resume_data_interval"`
|
SaveResumeDataInterval int `json:"save_resume_data_interval"`
|
||||||
ScanDirs ScanDirs `json:"scan_dirs"`
|
ScanDirs ScanDirs `json:"scan_dirs"`
|
||||||
ScheduleFromHour int64 `json:"schedule_from_hour"`
|
ScheduleFromHour int `json:"schedule_from_hour"`
|
||||||
ScheduleFromMin int64 `json:"schedule_from_min"`
|
ScheduleFromMin int `json:"schedule_from_min"`
|
||||||
ScheduleToHour int64 `json:"schedule_to_hour"`
|
ScheduleToHour int `json:"schedule_to_hour"`
|
||||||
ScheduleToMin int64 `json:"schedule_to_min"`
|
ScheduleToMin int `json:"schedule_to_min"`
|
||||||
SchedulerDays int64 `json:"scheduler_days"`
|
SchedulerDays int `json:"scheduler_days"`
|
||||||
SchedulerEnabled bool `json:"scheduler_enabled"`
|
SchedulerEnabled bool `json:"scheduler_enabled"`
|
||||||
SendBufferLowWatermark int64 `json:"send_buffer_low_watermark"`
|
SendBufferLowWatermark int `json:"send_buffer_low_watermark"`
|
||||||
SendBufferWatermark int64 `json:"send_buffer_watermark"`
|
SendBufferWatermark int `json:"send_buffer_watermark"`
|
||||||
SendBufferWatermarkFactor int64 `json:"send_buffer_watermark_factor"`
|
SendBufferWatermarkFactor int `json:"send_buffer_watermark_factor"`
|
||||||
SlowTorrentDlRateThreshold int64 `json:"slow_torrent_dl_rate_threshold"`
|
SlowTorrentDlRateThreshold int `json:"slow_torrent_dl_rate_threshold"`
|
||||||
SlowTorrentInactiveTimer int64 `json:"slow_torrent_inactive_timer"`
|
SlowTorrentInactiveTimer int `json:"slow_torrent_inactive_timer"`
|
||||||
SlowTorrentUlRateThreshold int64 `json:"slow_torrent_ul_rate_threshold"`
|
SlowTorrentUlRateThreshold int `json:"slow_torrent_ul_rate_threshold"`
|
||||||
SocketBacklogSize int64 `json:"socket_backlog_size"`
|
SocketBacklogSize int `json:"socket_backlog_size"`
|
||||||
StartPausedEnabled bool `json:"start_paused_enabled"`
|
StartPausedEnabled bool `json:"start_paused_enabled"`
|
||||||
StopTrackerTimeout int64 `json:"stop_tracker_timeout"`
|
StopTrackerTimeout int `json:"stop_tracker_timeout"`
|
||||||
TempPath string `json:"temp_path"`
|
TempPath string `json:"temp_path"`
|
||||||
TempPathEnabled bool `json:"temp_path_enabled"`
|
TempPathEnabled bool `json:"temp_path_enabled"`
|
||||||
TorrentChangedTmmEnabled bool `json:"torrent_changed_tmm_enabled"`
|
TorrentChangedTmmEnabled bool `json:"torrent_changed_tmm_enabled"`
|
||||||
UpLimit int64 `json:"up_limit"`
|
UpLimit int `json:"up_limit"`
|
||||||
UploadChokingAlgorithm int64 `json:"upload_choking_algorithm"`
|
UploadChokingAlgorithm int `json:"upload_choking_algorithm"`
|
||||||
UploadSlotsBehavior int64 `json:"upload_slots_behavior"`
|
UploadSlotsBehavior int `json:"upload_slots_behavior"`
|
||||||
Upnp bool `json:"upnp"`
|
Upnp bool `json:"upnp"`
|
||||||
UpnpLeaseDuration int64 `json:"upnp_lease_duration"`
|
UpnpLeaseDuration int `json:"upnp_lease_duration"`
|
||||||
UseHttps bool `json:"use_https"`
|
UseHttps bool `json:"use_https"`
|
||||||
UtpTcpMixedMode int64 `json:"utp_tcp_mixed_mode"`
|
UtpTcpMixedMode int `json:"utp_tcp_mixed_mode"`
|
||||||
WebUiAddress string `json:"web_ui_address"`
|
WebUiAddress string `json:"web_ui_address"`
|
||||||
WebUiBanDuration int64 `json:"web_ui_ban_duration"`
|
WebUiBanDuration int `json:"web_ui_ban_duration"`
|
||||||
WebUiClickjackingProtectionEnabled bool `json:"web_ui_clickjacking_protection_enabled"`
|
WebUiClickjackingProtectionEnabled bool `json:"web_ui_clickjacking_protection_enabled"`
|
||||||
WebUiCsrfProtectionEnabled bool `json:"web_ui_csrf_protection_enabled"`
|
WebUiCsrfProtectionEnabled bool `json:"web_ui_csrf_protection_enabled"`
|
||||||
WebUiDomainList string `json:"web_ui_domain_list"`
|
WebUiDomainList string `json:"web_ui_domain_list"`
|
||||||
WebUiHostHeaderValidationEnabled bool `json:"web_ui_host_header_validation_enabled"`
|
WebUiHostHeaderValidationEnabled bool `json:"web_ui_host_header_validation_enabled"`
|
||||||
WebUiHttpsCertPath string `json:"web_ui_https_cert_path"`
|
WebUiHttpsCertPath string `json:"web_ui_https_cert_path"`
|
||||||
WebUiHttpsKeyPath string `json:"web_ui_https_key_path"`
|
WebUiHttpsKeyPath string `json:"web_ui_https_key_path"`
|
||||||
WebUiMaxAuthFailCount int64 `json:"web_ui_max_auth_fail_count"`
|
WebUiMaxAuthFailCount int `json:"web_ui_max_auth_fail_count"`
|
||||||
WebUiPort int64 `json:"web_ui_port"`
|
WebUiPort int `json:"web_ui_port"`
|
||||||
WebUiSecureCookieEnabled bool `json:"web_ui_secure_cookie_enabled"`
|
WebUiSecureCookieEnabled bool `json:"web_ui_secure_cookie_enabled"`
|
||||||
WebUiSessionTimeout int64 `json:"web_ui_session_timeout"`
|
WebUiSessionTimeout int `json:"web_ui_session_timeout"`
|
||||||
WebUiUpnp bool `json:"web_ui_upnp"`
|
WebUiUpnp bool `json:"web_ui_upnp"`
|
||||||
WebUiUsername string `json:"web_ui_username"`
|
WebUiUsername string `json:"web_ui_username"`
|
||||||
WebUiPassword string `json:"web_ui_password"`
|
WebUiPassword string `json:"web_ui_password"`
|
||||||
@@ -179,44 +179,44 @@ type Torrent struct {
|
|||||||
Availability float64 `json:"availability,omitempty"`
|
Availability float64 `json:"availability,omitempty"`
|
||||||
Category string `json:"category,omitempty"`
|
Category string `json:"category,omitempty"`
|
||||||
Completed int64 `json:"completed"`
|
Completed int64 `json:"completed"`
|
||||||
CompletionOn int64 `json:"completion_on,omitempty"`
|
CompletionOn int `json:"completion_on,omitempty"`
|
||||||
ContentPath string `json:"content_path"`
|
ContentPath string `json:"content_path"`
|
||||||
DlLimit int64 `json:"dl_limit"`
|
DlLimit int `json:"dl_limit"`
|
||||||
Dlspeed int64 `json:"dlspeed"`
|
Dlspeed int `json:"dlspeed"`
|
||||||
Downloaded int64 `json:"downloaded"`
|
Downloaded int64 `json:"downloaded"`
|
||||||
DownloadedSession int64 `json:"downloaded_session"`
|
DownloadedSession int64 `json:"downloaded_session"`
|
||||||
Eta int64 `json:"eta"`
|
Eta int `json:"eta"`
|
||||||
FlPiecePrio bool `json:"f_l_piece_prio,omitempty"`
|
FlPiecePrio bool `json:"f_l_piece_prio,omitempty"`
|
||||||
ForceStart bool `json:"force_start,omitempty"`
|
ForceStart bool `json:"force_start,omitempty"`
|
||||||
Hash string `json:"hash"`
|
Hash string `json:"hash"`
|
||||||
LastActivity int64 `json:"last_activity,omitempty"`
|
LastActivity int64 `json:"last_activity,omitempty"`
|
||||||
MagnetUri string `json:"magnet_uri,omitempty"`
|
MagnetUri string `json:"magnet_uri,omitempty"`
|
||||||
MaxRatio int64 `json:"max_ratio,omitempty"`
|
MaxRatio int `json:"max_ratio,omitempty"`
|
||||||
MaxSeedingTime int64 `json:"max_seeding_time,omitempty"`
|
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
NumComplete int64 `json:"num_complete,omitempty"`
|
NumComplete int `json:"num_complete,omitempty"`
|
||||||
NumIncomplete int64 `json:"num_incomplete,omitempty"`
|
NumIncomplete int `json:"num_incomplete,omitempty"`
|
||||||
NumLeechs int64 `json:"num_leechs,omitempty"`
|
NumLeechs int `json:"num_leechs,omitempty"`
|
||||||
NumSeeds int64 `json:"num_seeds,omitempty"`
|
NumSeeds int `json:"num_seeds,omitempty"`
|
||||||
Priority int64 `json:"priority,omitempty"`
|
Priority int `json:"priority,omitempty"`
|
||||||
Progress float32 `json:"progress"`
|
Progress float64 `json:"progress"`
|
||||||
Ratio int64 `json:"ratio,omitempty"`
|
Ratio int `json:"ratio,omitempty"`
|
||||||
RatioLimit int64 `json:"ratio_limit,omitempty"`
|
RatioLimit int `json:"ratio_limit,omitempty"`
|
||||||
SavePath string `json:"save_path"`
|
SavePath string `json:"save_path"`
|
||||||
SeedingTimeLimit int64 `json:"seeding_time_limit,omitempty"`
|
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
|
||||||
SeenComplete int64 `json:"seen_complete,omitempty"`
|
SeenComplete int64 `json:"seen_complete,omitempty"`
|
||||||
SeqDl bool `json:"seq_dl"`
|
SeqDl bool `json:"seq_dl"`
|
||||||
Size int64 `json:"size,omitempty"`
|
Size int64 `json:"size,omitempty"`
|
||||||
State string `json:"state,omitempty"`
|
State string `json:"state,omitempty"`
|
||||||
SuperSeeding bool `json:"super_seeding"`
|
SuperSeeding bool `json:"super_seeding"`
|
||||||
Tags string `json:"tags,omitempty"`
|
Tags string `json:"tags,omitempty"`
|
||||||
TimeActive int64 `json:"time_active,omitempty"`
|
TimeActive int `json:"time_active,omitempty"`
|
||||||
TotalSize int64 `json:"total_size,omitempty"`
|
TotalSize int64 `json:"total_size,omitempty"`
|
||||||
Tracker string `json:"tracker,omitempty"`
|
Tracker string `json:"tracker,omitempty"`
|
||||||
UpLimit int64 `json:"up_limit,omitempty"`
|
UpLimit int64 `json:"up_limit,omitempty"`
|
||||||
Uploaded int64 `json:"uploaded,omitempty"`
|
Uploaded int64 `json:"uploaded,omitempty"`
|
||||||
UploadedSession int64 `json:"uploaded_session,omitempty"`
|
UploadedSession int64 `json:"uploaded_session,omitempty"`
|
||||||
Upspeed int64 `json:"upspeed,omitempty"`
|
Upspeed int `json:"upspeed,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) IsReady() bool {
|
func (t *Torrent) IsReady() bool {
|
||||||
@@ -229,24 +229,24 @@ type TorrentProperties struct {
|
|||||||
CompletionDate int64 `json:"completion_date,omitempty"`
|
CompletionDate int64 `json:"completion_date,omitempty"`
|
||||||
CreatedBy string `json:"created_by,omitempty"`
|
CreatedBy string `json:"created_by,omitempty"`
|
||||||
CreationDate int64 `json:"creation_date,omitempty"`
|
CreationDate int64 `json:"creation_date,omitempty"`
|
||||||
DlLimit int64 `json:"dl_limit,omitempty"`
|
DlLimit int `json:"dl_limit,omitempty"`
|
||||||
DlSpeed int64 `json:"dl_speed,omitempty"`
|
DlSpeed int `json:"dl_speed,omitempty"`
|
||||||
DlSpeedAvg int64 `json:"dl_speed_avg,omitempty"`
|
DlSpeedAvg int `json:"dl_speed_avg,omitempty"`
|
||||||
Eta int64 `json:"eta,omitempty"`
|
Eta int `json:"eta,omitempty"`
|
||||||
LastSeen int64 `json:"last_seen,omitempty"`
|
LastSeen int64 `json:"last_seen,omitempty"`
|
||||||
NbConnections int64 `json:"nb_connections,omitempty"`
|
NbConnections int `json:"nb_connections,omitempty"`
|
||||||
NbConnectionsLimit int64 `json:"nb_connections_limit,omitempty"`
|
NbConnectionsLimit int `json:"nb_connections_limit,omitempty"`
|
||||||
Peers int64 `json:"peers,omitempty"`
|
Peers int `json:"peers,omitempty"`
|
||||||
PeersTotal int64 `json:"peers_total,omitempty"`
|
PeersTotal int `json:"peers_total,omitempty"`
|
||||||
PieceSize int64 `json:"piece_size,omitempty"`
|
PieceSize int64 `json:"piece_size,omitempty"`
|
||||||
PiecesHave int64 `json:"pieces_have,omitempty"`
|
PiecesHave int64 `json:"pieces_have,omitempty"`
|
||||||
PiecesNum int64 `json:"pieces_num,omitempty"`
|
PiecesNum int64 `json:"pieces_num,omitempty"`
|
||||||
Reannounce int64 `json:"reannounce,omitempty"`
|
Reannounce int `json:"reannounce,omitempty"`
|
||||||
SavePath string `json:"save_path,omitempty"`
|
SavePath string `json:"save_path,omitempty"`
|
||||||
SeedingTime int64 `json:"seeding_time,omitempty"`
|
SeedingTime int `json:"seeding_time,omitempty"`
|
||||||
Seeds int64 `json:"seeds,omitempty"`
|
Seeds int `json:"seeds,omitempty"`
|
||||||
SeedsTotal int64 `json:"seeds_total,omitempty"`
|
SeedsTotal int `json:"seeds_total,omitempty"`
|
||||||
ShareRatio int64 `json:"share_ratio,omitempty"`
|
ShareRatio int `json:"share_ratio,omitempty"`
|
||||||
TimeElapsed int64 `json:"time_elapsed,omitempty"`
|
TimeElapsed int64 `json:"time_elapsed,omitempty"`
|
||||||
TotalDownloaded int64 `json:"total_downloaded,omitempty"`
|
TotalDownloaded int64 `json:"total_downloaded,omitempty"`
|
||||||
TotalDownloadedSession int64 `json:"total_downloaded_session,omitempty"`
|
TotalDownloadedSession int64 `json:"total_downloaded_session,omitempty"`
|
||||||
@@ -254,19 +254,19 @@ type TorrentProperties struct {
|
|||||||
TotalUploaded int64 `json:"total_uploaded,omitempty"`
|
TotalUploaded int64 `json:"total_uploaded,omitempty"`
|
||||||
TotalUploadedSession int64 `json:"total_uploaded_session,omitempty"`
|
TotalUploadedSession int64 `json:"total_uploaded_session,omitempty"`
|
||||||
TotalWasted int64 `json:"total_wasted,omitempty"`
|
TotalWasted int64 `json:"total_wasted,omitempty"`
|
||||||
UpLimit int64 `json:"up_limit,omitempty"`
|
UpLimit int `json:"up_limit,omitempty"`
|
||||||
UpSpeed int64 `json:"up_speed,omitempty"`
|
UpSpeed int `json:"up_speed,omitempty"`
|
||||||
UpSpeedAvg int64 `json:"up_speed_avg,omitempty"`
|
UpSpeedAvg int `json:"up_speed_avg,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TorrentFile struct {
|
type TorrentFile struct {
|
||||||
Index int `json:"index,omitempty"`
|
Index int `json:"index,omitempty"`
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
Size int64 `json:"size,omitempty"`
|
Size int64 `json:"size,omitempty"`
|
||||||
Progress int64 `json:"progress,omitempty"`
|
Progress int `json:"progress,omitempty"`
|
||||||
Priority int64 `json:"priority,omitempty"`
|
Priority int `json:"priority,omitempty"`
|
||||||
IsSeed bool `json:"is_seed,omitempty"`
|
IsSeed bool `json:"is_seed,omitempty"`
|
||||||
PieceRange []int64 `json:"piece_range,omitempty"`
|
PieceRange []int `json:"piece_range,omitempty"`
|
||||||
Availability float64 `json:"availability,omitempty"`
|
Availability float64 `json:"availability,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
274
pkg/qbit/shared/torrent.go
Normal file
274
pkg/qbit/shared/torrent.go
Normal file
@@ -0,0 +1,274 @@
|
|||||||
|
package shared
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"goBlack/common"
|
||||||
|
"goBlack/pkg/arr"
|
||||||
|
"goBlack/pkg/debrid"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// All torrent related helpers goes here
|
||||||
|
|
||||||
|
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
|
||||||
|
magnet, err := common.GetMagnetFromUrl(url)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing magnet link: %w", err)
|
||||||
|
}
|
||||||
|
err = q.Process(ctx, magnet, category)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to process torrent: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
|
||||||
|
file, _ := fileHeader.Open()
|
||||||
|
defer file.Close()
|
||||||
|
var reader io.Reader = file
|
||||||
|
magnet, err := common.GetMagnetFromFile(reader, fileHeader.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
|
||||||
|
}
|
||||||
|
err = q.Process(ctx, magnet, category)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to process torrent: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category string) error {
|
||||||
|
torrent := q.CreateTorrentFromMagnet(magnet, category)
|
||||||
|
a, ok := ctx.Value("arr").(*arr.Arr)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("arr not found in context")
|
||||||
|
}
|
||||||
|
isSymlink := ctx.Value("isSymlink").(bool)
|
||||||
|
debridTorrent, err := debrid.ProcessTorrent(q.Debrid, magnet, a, isSymlink)
|
||||||
|
if err != nil || debridTorrent == nil {
|
||||||
|
if debridTorrent != nil {
|
||||||
|
go debridTorrent.Delete()
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = fmt.Errorf("failed to process torrent")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
||||||
|
q.Storage.AddOrUpdate(torrent)
|
||||||
|
go q.ProcessFiles(torrent, debridTorrent, a, isSymlink) // We can send async for file processing not to delay the response
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) CreateTorrentFromMagnet(magnet *common.Magnet, category string) *Torrent {
|
||||||
|
torrent := &Torrent{
|
||||||
|
ID: uuid.NewString(),
|
||||||
|
Hash: strings.ToLower(magnet.InfoHash),
|
||||||
|
Name: magnet.Name,
|
||||||
|
Size: magnet.Size,
|
||||||
|
Category: category,
|
||||||
|
State: "downloading",
|
||||||
|
MagnetUri: magnet.Link,
|
||||||
|
|
||||||
|
Tracker: "udp://tracker.opentrackr.org:1337",
|
||||||
|
UpLimit: -1,
|
||||||
|
DlLimit: -1,
|
||||||
|
AutoTmm: false,
|
||||||
|
Ratio: 1,
|
||||||
|
RatioLimit: 1,
|
||||||
|
}
|
||||||
|
return torrent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *arr.Arr, isSymlink bool) {
|
||||||
|
for debridTorrent.Status != "downloaded" {
|
||||||
|
progress := debridTorrent.Progress
|
||||||
|
q.logger.Printf("%s Download Progress: %.2f%%", debridTorrent.Debrid.GetName(), progress)
|
||||||
|
time.Sleep(4 * time.Second)
|
||||||
|
dbT, err := debridTorrent.Debrid.CheckStatus(debridTorrent, isSymlink)
|
||||||
|
if err != nil {
|
||||||
|
q.logger.Printf("Error checking status: %v", err)
|
||||||
|
go debridTorrent.Delete()
|
||||||
|
q.MarkAsFailed(torrent)
|
||||||
|
_ = arr.Refresh()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
debridTorrent = dbT
|
||||||
|
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
torrentPath string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
debridTorrent.Arr = arr
|
||||||
|
if isSymlink {
|
||||||
|
torrentPath, err = q.ProcessSymlink(debridTorrent)
|
||||||
|
} else {
|
||||||
|
torrentPath, err = q.processManualFiles(debridTorrent)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
q.MarkAsFailed(torrent)
|
||||||
|
go debridTorrent.Delete()
|
||||||
|
q.logger.Printf("Error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
torrent.TorrentPath = filepath.Base(torrentPath)
|
||||||
|
q.UpdateTorrent(torrent, debridTorrent)
|
||||||
|
_ = arr.Refresh()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
|
||||||
|
t.State = "error"
|
||||||
|
q.Storage.AddOrUpdate(t)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
||||||
|
if debridTorrent == nil {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
|
||||||
|
if err != nil {
|
||||||
|
addedOn = time.Now()
|
||||||
|
}
|
||||||
|
totalSize := debridTorrent.Bytes
|
||||||
|
progress := cmp.Or(debridTorrent.Progress, 100)
|
||||||
|
progress = progress / 100.0
|
||||||
|
sizeCompleted := int64(float64(totalSize) * progress)
|
||||||
|
|
||||||
|
var speed int
|
||||||
|
if debridTorrent.Speed != 0 {
|
||||||
|
speed = debridTorrent.Speed
|
||||||
|
}
|
||||||
|
var eta int
|
||||||
|
if speed != 0 {
|
||||||
|
eta = int(totalSize-sizeCompleted) / speed
|
||||||
|
}
|
||||||
|
t.ID = debridTorrent.Id
|
||||||
|
t.Name = debridTorrent.Name
|
||||||
|
t.AddedOn = addedOn.Unix()
|
||||||
|
t.DebridTorrent = debridTorrent
|
||||||
|
t.Size = totalSize
|
||||||
|
t.Completed = sizeCompleted
|
||||||
|
t.Downloaded = sizeCompleted
|
||||||
|
t.DownloadedSession = sizeCompleted
|
||||||
|
t.Uploaded = sizeCompleted
|
||||||
|
t.UploadedSession = sizeCompleted
|
||||||
|
t.AmountLeft = totalSize - sizeCompleted
|
||||||
|
t.Progress = progress
|
||||||
|
t.Eta = eta
|
||||||
|
t.Dlspeed = speed
|
||||||
|
t.Upspeed = speed
|
||||||
|
t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
|
||||||
|
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
||||||
|
db := debridTorrent.Debrid
|
||||||
|
rcLoneMount := db.GetMountPath()
|
||||||
|
if debridTorrent == nil && t.ID != "" {
|
||||||
|
debridTorrent, _ = db.GetTorrent(t.ID)
|
||||||
|
}
|
||||||
|
if debridTorrent == nil {
|
||||||
|
q.logger.Printf("Torrent with ID %s not found in %s", t.ID, db.GetName())
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
if debridTorrent.Status != "downloaded" {
|
||||||
|
debridTorrent, _ = db.GetTorrent(t.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.TorrentPath == "" {
|
||||||
|
t.TorrentPath = filepath.Base(debridTorrent.GetMountFolder(rcLoneMount))
|
||||||
|
}
|
||||||
|
savePath := filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
|
||||||
|
torrentPath := filepath.Join(savePath, t.TorrentPath) + string(os.PathSeparator)
|
||||||
|
t = q.UpdateTorrentMin(t, debridTorrent)
|
||||||
|
t.ContentPath = torrentPath
|
||||||
|
|
||||||
|
if t.IsReady() {
|
||||||
|
t.State = "pausedUP"
|
||||||
|
q.Storage.Update(t)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(2 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
if t.IsReady() {
|
||||||
|
t.State = "pausedUP"
|
||||||
|
q.Storage.Update(t)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
updatedT := q.UpdateTorrent(t, debridTorrent)
|
||||||
|
t = updatedT
|
||||||
|
|
||||||
|
case <-time.After(10 * time.Minute): // Add a timeout
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) ResumeTorrent(t *Torrent) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) PauseTorrent(t *Torrent) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) RefreshTorrent(t *Torrent) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
|
||||||
|
return &TorrentProperties{
|
||||||
|
AdditionDate: t.AddedOn,
|
||||||
|
Comment: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
||||||
|
CreatedBy: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
||||||
|
CreationDate: t.AddedOn,
|
||||||
|
DlLimit: -1,
|
||||||
|
UpLimit: -1,
|
||||||
|
DlSpeed: t.Dlspeed,
|
||||||
|
UpSpeed: t.Upspeed,
|
||||||
|
TotalSize: t.Size,
|
||||||
|
TotalUploaded: t.Uploaded,
|
||||||
|
TotalDownloaded: t.Downloaded,
|
||||||
|
TotalUploadedSession: t.UploadedSession,
|
||||||
|
TotalDownloadedSession: t.DownloadedSession,
|
||||||
|
LastSeen: time.Now().Unix(),
|
||||||
|
NbConnectionsLimit: 100,
|
||||||
|
Peers: 0,
|
||||||
|
PeersTotal: 2,
|
||||||
|
SeedingTime: 1,
|
||||||
|
Seeds: 100,
|
||||||
|
ShareRatio: 100,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
|
||||||
|
files := make([]*TorrentFile, 0)
|
||||||
|
if t.DebridTorrent == nil {
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
for index, file := range t.DebridTorrent.Files {
|
||||||
|
files = append(files, &TorrentFile{
|
||||||
|
Index: index,
|
||||||
|
Name: file.Path,
|
||||||
|
Size: file.Size,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return files
|
||||||
|
}
|
||||||
@@ -1,29 +1,13 @@
|
|||||||
package qbit
|
package shared
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"goBlack/common"
|
"goBlack/common"
|
||||||
"goBlack/pkg/debrid"
|
"goBlack/pkg/debrid"
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
//func generateSID() (string, error) {
|
|
||||||
// bytes := make([]byte, sidLength)
|
|
||||||
// if _, err := rand.Read(bytes); err != nil {
|
|
||||||
// return "", err
|
|
||||||
// }
|
|
||||||
// return hex.EncodeToString(bytes), nil
|
|
||||||
//}
|
|
||||||
|
|
||||||
func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
w.WriteHeader(code)
|
|
||||||
json.NewEncoder(w).Encode(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.TorrentFile, ready chan<- debrid.TorrentFile) {
|
func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.TorrentFile, ready chan<- debrid.TorrentFile) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
ticker := time.NewTicker(1 * time.Second) // Check every second
|
ticker := time.NewTicker(1 * time.Second) // Check every second
|
||||||
@@ -1,8 +1,7 @@
|
|||||||
package qbit
|
package shared
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"goBlack/pkg/debrid"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -20,7 +19,7 @@ func (q *QBit) StartRefreshWorker(ctx context.Context) {
|
|||||||
q.logger.Println("Qbit Refresh Worker stopped")
|
q.logger.Println("Qbit Refresh Worker stopped")
|
||||||
return
|
return
|
||||||
case <-refreshTicker.C:
|
case <-refreshTicker.C:
|
||||||
torrents := q.storage.GetAll("", "", nil)
|
torrents := q.Storage.GetAll("", "", nil)
|
||||||
if len(torrents) > 0 {
|
if len(torrents) > 0 {
|
||||||
q.RefreshArrs()
|
q.RefreshArrs()
|
||||||
}
|
}
|
||||||
@@ -29,18 +28,10 @@ func (q *QBit) StartRefreshWorker(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) RefreshArrs() {
|
func (q *QBit) RefreshArrs() {
|
||||||
q.arrs.Range(func(key, value interface{}) bool {
|
for _, arr := range q.Arrs.GetAll() {
|
||||||
host, ok := key.(string)
|
err := arr.Refresh()
|
||||||
token, ok2 := value.(string)
|
if err != nil {
|
||||||
if !ok || !ok2 {
|
return
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
arr := &debrid.Arr{
|
}
|
||||||
Name: "",
|
|
||||||
Token: token,
|
|
||||||
Host: host,
|
|
||||||
}
|
|
||||||
q.RefreshArr(arr)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
@@ -1,159 +0,0 @@
|
|||||||
package qbit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cmp"
|
|
||||||
"goBlack/pkg/debrid"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// All torrent related helpers goes here
|
|
||||||
|
|
||||||
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
|
|
||||||
t.State = "error"
|
|
||||||
q.storage.AddOrUpdate(t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
|
||||||
if debridTorrent == nil {
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
|
|
||||||
if err != nil {
|
|
||||||
addedOn = time.Now()
|
|
||||||
}
|
|
||||||
totalSize := float64(debridTorrent.Bytes)
|
|
||||||
progress := cmp.Or(debridTorrent.Progress, 100.0)
|
|
||||||
progress = progress / 100.0
|
|
||||||
sizeCompleted := int64(totalSize * progress)
|
|
||||||
|
|
||||||
var speed int64
|
|
||||||
if debridTorrent.Speed != 0 {
|
|
||||||
speed = debridTorrent.Speed
|
|
||||||
}
|
|
||||||
var eta int64
|
|
||||||
if speed != 0 {
|
|
||||||
eta = int64((totalSize - float64(sizeCompleted)) / float64(speed))
|
|
||||||
}
|
|
||||||
t.ID = debridTorrent.Id
|
|
||||||
t.Name = debridTorrent.Name
|
|
||||||
t.AddedOn = addedOn.Unix()
|
|
||||||
t.DebridTorrent = debridTorrent
|
|
||||||
t.Size = int64(totalSize)
|
|
||||||
t.Completed = sizeCompleted
|
|
||||||
t.Downloaded = sizeCompleted
|
|
||||||
t.DownloadedSession = sizeCompleted
|
|
||||||
t.Uploaded = sizeCompleted
|
|
||||||
t.UploadedSession = sizeCompleted
|
|
||||||
t.AmountLeft = int64(totalSize) - sizeCompleted
|
|
||||||
t.Progress = float32(progress)
|
|
||||||
t.Eta = eta
|
|
||||||
t.Dlspeed = speed
|
|
||||||
t.Upspeed = speed
|
|
||||||
t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
|
|
||||||
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent {
|
|
||||||
db := debridTorrent.Debrid
|
|
||||||
rcLoneMount := db.GetMountPath()
|
|
||||||
if debridTorrent == nil && t.ID != "" {
|
|
||||||
debridTorrent, _ = db.GetTorrent(t.ID)
|
|
||||||
}
|
|
||||||
if debridTorrent == nil {
|
|
||||||
q.logger.Printf("Torrent with ID %s not found in %s", t.ID, db.GetName())
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
if debridTorrent.Status != "downloaded" {
|
|
||||||
debridTorrent, _ = db.GetTorrent(t.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.TorrentPath == "" {
|
|
||||||
t.TorrentPath = filepath.Base(debridTorrent.GetMountFolder(rcLoneMount))
|
|
||||||
}
|
|
||||||
savePath := filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
|
|
||||||
torrentPath := filepath.Join(savePath, t.TorrentPath) + string(os.PathSeparator)
|
|
||||||
t = q.UpdateTorrentMin(t, debridTorrent)
|
|
||||||
t.ContentPath = torrentPath
|
|
||||||
|
|
||||||
if t.IsReady() {
|
|
||||||
t.State = "pausedUP"
|
|
||||||
q.storage.Update(t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
ticker := time.NewTicker(2 * time.Second)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
if t.IsReady() {
|
|
||||||
t.State = "pausedUP"
|
|
||||||
q.storage.Update(t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
updatedT := q.UpdateTorrent(t, debridTorrent)
|
|
||||||
t = updatedT
|
|
||||||
|
|
||||||
case <-time.After(10 * time.Minute): // Add a timeout
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) ResumeTorrent(t *Torrent) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) PauseTorrent(t *Torrent) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) RefreshTorrent(t *Torrent) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
|
|
||||||
return &TorrentProperties{
|
|
||||||
AdditionDate: t.AddedOn,
|
|
||||||
Comment: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
|
||||||
CreatedBy: "Debrid Blackhole <https://github.com/sirrobot01/debrid-blackhole>",
|
|
||||||
CreationDate: t.AddedOn,
|
|
||||||
DlLimit: -1,
|
|
||||||
UpLimit: -1,
|
|
||||||
DlSpeed: t.Dlspeed,
|
|
||||||
UpSpeed: t.Upspeed,
|
|
||||||
TotalSize: t.Size,
|
|
||||||
TotalUploaded: t.Uploaded,
|
|
||||||
TotalDownloaded: t.Downloaded,
|
|
||||||
TotalUploadedSession: t.UploadedSession,
|
|
||||||
TotalDownloadedSession: t.DownloadedSession,
|
|
||||||
LastSeen: time.Now().Unix(),
|
|
||||||
NbConnectionsLimit: 100,
|
|
||||||
Peers: 0,
|
|
||||||
PeersTotal: 2,
|
|
||||||
SeedingTime: 1,
|
|
||||||
Seeds: 100,
|
|
||||||
ShareRatio: 100,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
|
|
||||||
files := make([]*TorrentFile, 0)
|
|
||||||
if t.DebridTorrent == nil {
|
|
||||||
return files
|
|
||||||
}
|
|
||||||
for index, file := range t.DebridTorrent.Files {
|
|
||||||
files = append(files, &TorrentFile{
|
|
||||||
Index: index,
|
|
||||||
Name: file.Path,
|
|
||||||
Size: file.Size,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return files
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user