From 16c825d5bacf8c0bb8c99da3b2e95e9c90260913 Mon Sep 17 00:00:00 2001
From: Mukhtar Akere <32229538+sirrobot01@users.noreply.github.com>
Date: Tue, 4 Feb 2025 11:07:19 +0100
Subject: [PATCH] feat: restructure code; add size and ext checks (#39)
- Refractor code
- Add file size and extension checkers
- Change repair workflow to use zurg
---
README.md | 84 ++---
cmd/main.go | 24 +-
common/regex.go | 20 +-
doc/config.full.json | 79 +++++
{common => internal/config}/config.go | 159 ++++++---
internal/config/misc.go | 75 +++++
{common => internal/logger}/logger.go | 2 +-
{common => internal/request}/request.go | 23 +-
internal/utils/file.go | 2 +
common/utils.go => internal/utils/magnet.go | 53 +--
internal/utils/misc.go | 15 +
internal/utils/regex.go | 49 +++
main.go | 12 +-
pkg/arr/arr.go | 20 +-
pkg/arr/content.go | 104 +++++-
pkg/arr/refresh.go | 4 +-
pkg/arr/repair.go | 342 -------------------
pkg/arr/structs.go | 14 +-
pkg/debrid/alldebrid.go | 20 +-
pkg/debrid/debrid.go | 24 +-
pkg/debrid/debrid_link.go | 16 +-
pkg/debrid/realdebrid.go | 21 +-
pkg/debrid/torbox.go | 19 +-
pkg/debrid/torrent.go | 6 +-
pkg/proxy/proxy.go | 20 +-
pkg/qbit/main.go | 6 +-
pkg/qbit/server/import.go | 4 +-
pkg/qbit/server/qbit_handlers.go | 74 ++--
pkg/qbit/server/qbit_routes.go | 2 +-
pkg/qbit/server/server.go | 31 +-
pkg/qbit/server/templates/config.html | 48 +++
pkg/qbit/server/ui_handlers.go | 88 ++---
pkg/qbit/server/ui_routes.go | 2 +-
pkg/qbit/shared/qbit.go | 11 +-
pkg/qbit/shared/torrent.go | 16 +-
pkg/qbit/shared/utils.go | 5 +-
pkg/repair/{utils.go => misc.go} | 61 ++++
pkg/repair/repair.go | 352 ++++++++++++++++++--
38 files changed, 1138 insertions(+), 769 deletions(-)
create mode 100644 doc/config.full.json
rename {common => internal/config}/config.go (52%)
create mode 100644 internal/config/misc.go
rename {common => internal/logger}/logger.go (99%)
rename {common => internal/request}/request.go (85%)
create mode 100644 internal/utils/file.go
rename common/utils.go => internal/utils/magnet.go (83%)
create mode 100644 internal/utils/misc.go
create mode 100644 internal/utils/regex.go
delete mode 100644 pkg/arr/repair.go
rename pkg/repair/{utils.go => misc.go} (60%)
diff --git a/README.md b/README.md
index fcf7536..2dc83c3 100644
--- a/README.md
+++ b/README.md
@@ -102,95 +102,49 @@ Download the binary from the releases page and run it with the config file.
- Test
- Save
-#### Sample Config
+#### Basic Sample Config
This is the default config file. You can create a `config.json` file in the root directory of the project or mount it in the docker-compose file.
```json
{
"debrids": [
- {
- "name": "torbox",
- "host": "https://api.torbox.app/v1",
- "api_key": "torbox_api_key",
- "folder": "/mnt/remote/torbox/torrents/",
- "rate_limit": "250/minute",
- "download_uncached": false,
- "check_cached": true
- },
{
"name": "realdebrid",
"host": "https://api.real-debrid.com/rest/1.0",
"api_key": "realdebrid_key",
- "folder": "/mnt/remote/realdebrid/__all__/",
- "rate_limit": "250/minute",
- "download_uncached": false,
- "check_cached": false
- },
- {
- "name": "debridlink",
- "host": "https://debrid-link.com/api/v2",
- "api_key": "debridlink_key",
- "folder": "/mnt/remote/debridlink/torrents/",
- "rate_limit": "250/minute",
- "download_uncached": false,
- "check_cached": false
- },
- {
- "name": "alldebrid",
- "host": "http://api.alldebrid.com/v4.1",
- "api_key": "alldebrid_key",
- "folder": "/mnt/remote/alldebrid/magnet/",
- "rate_limit": "600/minute",
- "download_uncached": false,
- "check_cached": false
+ "folder": "/mnt/remote/realdebrid/__all__/"
}
],
"proxy": {
"enabled": true,
"port": "8100",
- "log_level": "info",
"username": "username",
- "password": "password",
- "cached_only": true
+ "password": "password"
},
- "max_cache_size": 1000,
"qbittorrent": {
"port": "8282",
"download_folder": "/mnt/symlinks/",
- "categories": ["sonarr", "radarr"],
- "refresh_interval": 5,
- "log_level": "info"
+ "categories": ["sonarr", "radarr"]
},
- "arrs": [
- {
- "name": "sonarr",
- "host": "http://host:8989",
- "token": "arr_key"
- },
- {
- "name": "radarr",
- "host": "http://host:7878",
- "token": "arr_key"
- }
- ],
"repair": {
"enabled": false,
"interval": "12h",
"run_on_start": false
- },
- "log_level": "info"
+ }
}
```
-#### Config Notes
+Full config are [here](doc/config.full.json)
-##### Log Level
-- The `log_level` key is used to set the log level of the application. The default value is `info`
-- The log level can be set to `debug`, `info`, `warn`, `error`
-##### Max Cache Size
-- The `max_cache_size` key is used to set the maximum number of infohashes that can be stored in the availability cache. This is used to prevent round trip to the debrid provider when using the proxy/Qbittorrent
-- The default value is `1000`
-- The cache is stored in memory and is not persisted on restart
+
+
+
+ Click Here for the full config notes
+
+
+- The `log_level` key is used to set the log level of the application. The default value is `info`. log level can be set to `debug`, `info`, `warn`, `error`
+- The `max_cache_size` key is used to set the maximum number of infohashes that can be stored in the availability cache. This is used to prevent round trip to the debrid provider when using the proxy/Qbittorrent. The default value is `1000`
+- The `allowed_file_types` key is an array of allowed file types that can be downloaded. By default, all movie, tv show and music file types are allowed
##### Debrid Config
- The `debrids` key is an array of debrid providers
@@ -202,11 +156,13 @@ This is the default config file. You can create a `config.json` file in the root
- The `download_uncached` bool key is used to download uncached torrents(disabled by default)
- The `check_cached` bool key is used to check if the torrent is cached(disabled by default)
-##### Repair Config (**NEW**)
+##### Repair Config (**BETA**)
The `repair` key is used to enable the repair worker
- The `enabled` key is used to enable the repair worker
- The `interval` key is the interval in either minutes, seconds, hours, days. Use any of this format, e.g 12:00, 5:00, 1h, 1d, 1m, 1s.
- The `run_on_start` key is used to run the repair worker on start
+- The `zurg_url` is the url of the zurg server. Typically `http://localhost:9999` or `http://zurg:9999`
+- The `skip_deletion`: true if you don't want to delete the files
##### Proxy Config
- The `enabled` key is used to enable the proxy
@@ -230,6 +186,8 @@ This is particularly useful if you want to use the Repair tool without using Qbi
- The `host` key is the host of the Arr
- The `token` key is the API token of the Arr
+
+
### Proxy
@@ -243,6 +201,8 @@ The proxy listens on the port `8181` by default. The username and password can b
The repair worker is a simple worker that checks for missing files in the Arrs(Sonarr, Radarr, etc). It's particularly useful for files either deleted by the Debrid provider or files with bad symlinks.
+**Note**: If you're using zurg, set the `zurg_url` under repair config. This will speed up the repair process, exponentially.
+
- Search for broken symlinks/files
- Search for missing files
- Search for deleted/unreadable files
diff --git a/cmd/main.go b/cmd/main.go
index db8a262..26f3bf7 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -1,9 +1,8 @@
package cmd
import (
- "cmp"
"context"
- "github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/proxy"
@@ -13,39 +12,40 @@ import (
"sync"
)
-func Start(ctx context.Context, config *common.Config) error {
- maxCacheSize := cmp.Or(config.MaxCacheSize, 1000)
+func Start(ctx context.Context) error {
+ cfg := config.GetConfig()
- deb := debrid.NewDebrid(config.Debrids, maxCacheSize)
- arrs := arr.NewStorage(config.Arrs)
+ deb := debrid.NewDebrid()
+ arrs := arr.NewStorage()
+ _repair := repair.NewRepair(deb.Get(), arrs)
var wg sync.WaitGroup
errChan := make(chan error, 2)
- if config.Proxy.Enabled {
+ if cfg.Proxy.Enabled {
wg.Add(1)
go func() {
defer wg.Done()
- if err := proxy.NewProxy(*config, deb).Start(ctx); err != nil {
+ if err := proxy.NewProxy(deb).Start(ctx); err != nil {
errChan <- err
}
}()
}
- if config.QBitTorrent.Port != "" {
+ if cfg.QBitTorrent.Port != "" {
wg.Add(1)
go func() {
defer wg.Done()
- if err := qbit.Start(ctx, config, deb, arrs); err != nil {
+ if err := qbit.Start(ctx, deb, arrs, _repair); err != nil {
errChan <- err
}
}()
}
- if config.Repair.Enabled {
+ if cfg.Repair.Enabled {
wg.Add(1)
go func() {
defer wg.Done()
- if err := repair.Start(ctx, config, arrs); err != nil {
+ if err := _repair.Start(ctx); err != nil {
log.Printf("Error during repair: %v", err)
}
}()
diff --git a/common/regex.go b/common/regex.go
index 0749ea3..e39ba49 100644
--- a/common/regex.go
+++ b/common/regex.go
@@ -7,12 +7,12 @@ import (
)
var (
- VIDEOMATCH = "(?i)(\\.)(YUV|WMV|WEBM|VOB|VIV|SVI|ROQ|RMVB|RM|OGV|OGG|NSV|MXF|MPG|MPEG|M2V|MP2|MPE|MPV|MP4|M4P|M4V|MOV|QT|MNG|MKV|FLV|DRC|AVI|ASF|AMV|MKA|F4V|3GP|3G2|DIVX|X264|X265)$"
- MUSICMATCH = "(?i)(\\.)(?:MP3|WAV|FLAC|AAC|OGG|WMA|AIFF|ALAC|M4A|APE|AC3|DTS|M4P|MID|MIDI|MKA|MP2|MPA|RA|VOC|WV|AMR)$"
- SUBMATCH = "(?i)(\\.)(SRT|SUB|SBV|ASS|VTT|TTML|DFXP|STL|SCC|CAP|SMI|TTXT|TDS|USF|JSS|SSA|PSB|RT|LRC|SSB)$"
- SAMPLEMATCH = `(?i)(^|[\\/]|[._-])(sample|trailer|thumb)s?([._-]|$)`
+ VIDEOMATCH = "(?i)(\\.)(YUV|WMV|WEBM|VOB|VIV|SVI|ROQ|RMVB|RM|OGV|OGG|NSV|MXF|MPG|MPEG|M2V|MP2|MPE|MPV|MP4|M4P|M4V|MOV|QT|MNG|MKV|FLV|DRC|AVI|ASF|AMV|MKA|F4V|3GP|3G2|DIVX|X264|X265)$"
+ MUSICMATCH = "(?i)(\\.)(?:MP3|WAV|FLAC|AAC|OGG|WMA|AIFF|ALAC|M4A|APE|AC3|DTS|M4P|MID|MIDI|MKA|MP2|MPA|RA|VOC|WV|AMR)$"
)
+var SAMPLEMATCH = `(?i)(^|[\\/]|[._-])(sample|trailer|thumb)s?([._-]|$)`
+
func RegexMatch(regex string, value string) bool {
re := regexp.MustCompile(regex)
return re.MatchString(value)
@@ -37,7 +37,7 @@ func RemoveInvalidChars(value string) string {
}
func RemoveExtension(value string) string {
- re := regexp.MustCompile(VIDEOMATCH + "|" + SUBMATCH + "|" + SAMPLEMATCH + "|" + MUSICMATCH)
+ re := regexp.MustCompile(VIDEOMATCH + "|" + SAMPLEMATCH + "|" + MUSICMATCH)
// Find the last index of the matched extension
loc := re.FindStringIndex(value)
@@ -47,13 +47,3 @@ func RemoveExtension(value string) string {
return value
}
}
-
-func RegexFind(regex string, value string) string {
- re := regexp.MustCompile(regex)
- match := re.FindStringSubmatch(value)
- if len(match) > 0 {
- return match[0]
- } else {
- return ""
- }
-}
diff --git a/doc/config.full.json b/doc/config.full.json
new file mode 100644
index 0000000..ac175b0
--- /dev/null
+++ b/doc/config.full.json
@@ -0,0 +1,79 @@
+{
+ "debrids": [
+ {
+ "name": "torbox",
+ "host": "https://api.torbox.app/v1",
+ "api_key": "torbox_api_key",
+ "folder": "/mnt/remote/torbox/torrents/",
+ "rate_limit": "250/minute",
+ "download_uncached": false,
+ "check_cached": true
+ },
+ {
+ "name": "realdebrid",
+ "host": "https://api.real-debrid.com/rest/1.0",
+ "api_key": "realdebrid_key",
+ "folder": "/mnt/remote/realdebrid/__all__/",
+ "rate_limit": "250/minute",
+ "download_uncached": false,
+ "check_cached": false
+ },
+ {
+ "name": "debridlink",
+ "host": "https://debrid-link.com/api/v2",
+ "api_key": "debridlink_key",
+ "folder": "/mnt/remote/debridlink/torrents/",
+ "rate_limit": "250/minute",
+ "download_uncached": false,
+ "check_cached": false
+ },
+ {
+ "name": "alldebrid",
+ "host": "http://api.alldebrid.com/v4.1",
+ "api_key": "alldebrid_key",
+ "folder": "/mnt/remote/alldebrid/magnet/",
+ "rate_limit": "600/minute",
+ "download_uncached": false,
+ "check_cached": false
+ }
+ ],
+ "proxy": {
+ "enabled": true,
+ "port": "8100",
+ "log_level": "info",
+ "username": "username",
+ "password": "password",
+ "cached_only": true
+ },
+ "max_cache_size": 1000,
+ "qbittorrent": {
+ "port": "8282",
+ "download_folder": "/mnt/symlinks/",
+ "categories": ["sonarr", "radarr"],
+ "refresh_interval": 5,
+ "log_level": "info"
+ },
+ "arrs": [
+ {
+ "name": "sonarr",
+ "host": "http://host:8989",
+ "token": "arr_key"
+ },
+ {
+ "name": "radarr",
+ "host": "http://host:7878",
+ "token": "arr_key"
+ }
+ ],
+ "repair": {
+ "enabled": false,
+ "interval": "12h",
+ "run_on_start": false,
+ "zurg_url": "http://zurg:9999",
+ "skip_deletion": false
+ },
+ "log_level": "info",
+ "min_file_size": "",
+ "max_file_size": "",
+ "allowed_file_types": []
+}
\ No newline at end of file
diff --git a/common/config.go b/internal/config/config.go
similarity index 52%
rename from common/config.go
rename to internal/config/config.go
index 1a299a3..d67b40c 100644
--- a/common/config.go
+++ b/internal/config/config.go
@@ -1,15 +1,20 @@
-package common
+package config
import (
"encoding/json"
"errors"
"fmt"
- "log"
"os"
"sync"
)
-type DebridConfig struct {
+var (
+ instance *Config
+ once sync.Once
+ configPath string
+)
+
+type Debrid struct {
Name string `json:"name"`
Host string `json:"host"`
APIKey string `json:"api_key"`
@@ -19,7 +24,7 @@ type DebridConfig struct {
RateLimit string `json:"rate_limit"` // 200/minute or 10/second
}
-type ProxyConfig struct {
+type Proxy struct {
Port string `json:"port"`
Enabled bool `json:"enabled"`
LogLevel string `json:"log_level"`
@@ -28,7 +33,7 @@ type ProxyConfig struct {
CachedOnly *bool `json:"cached_only"`
}
-type QBitTorrentConfig struct {
+type QBitTorrent struct {
Username string `json:"username"`
Password string `json:"password"`
Port string `json:"port"`
@@ -38,30 +43,65 @@ type QBitTorrentConfig struct {
RefreshInterval int `json:"refresh_interval"`
}
-type ArrConfig struct {
+type Arr struct {
Name string `json:"name"`
Host string `json:"host"`
Token string `json:"token"`
}
-type RepairConfig struct {
- Enabled bool `json:"enabled"`
- Interval string `json:"interval"`
- RunOnStart bool `json:"run_on_start"`
+type Repair struct {
+ Enabled bool `json:"enabled"`
+ Interval string `json:"interval"`
+ RunOnStart bool `json:"run_on_start"`
+ ZurgURL string `json:"zurg_url"`
+ SkipDeletion bool `json:"skip_deletion"`
}
type Config struct {
- LogLevel string `json:"log_level"`
- Debrid DebridConfig `json:"debrid"`
- Debrids []DebridConfig `json:"debrids"`
- Proxy ProxyConfig `json:"proxy"`
- MaxCacheSize int `json:"max_cache_size"`
- QBitTorrent QBitTorrentConfig `json:"qbittorrent"`
- Arrs []ArrConfig `json:"arrs"`
- Repair RepairConfig `json:"repair"`
+ LogLevel string `json:"log_level"`
+ Debrid Debrid `json:"debrid"`
+ Debrids []Debrid `json:"debrids"`
+ Proxy Proxy `json:"proxy"`
+ MaxCacheSize int `json:"max_cache_size"`
+ QBitTorrent QBitTorrent `json:"qbittorrent"`
+ Arrs []Arr `json:"arrs"`
+ Repair Repair `json:"repair"`
+ AllowedExt []string `json:"allowed_file_types"`
+ MinFileSize string `json:"min_file_size"` // Minimum file size to download, 10MB, 1GB, etc
+ MaxFileSize string `json:"max_file_size"` // Maximum file size to download (0 means no limit)
}
-func validateDebrids(debrids []DebridConfig) error {
+func (c *Config) loadConfig() error {
+ // Load the config file
+ if configPath == "" {
+ return fmt.Errorf("config path not set")
+ }
+ file, err := os.ReadFile(configPath)
+ if err != nil {
+ return err
+ }
+
+ if err := json.Unmarshal(file, &c); err != nil {
+ return fmt.Errorf("error unmarshaling config: %w", err)
+ }
+
+ if c.Debrid.Name != "" {
+ c.Debrids = append(c.Debrids, c.Debrid)
+ }
+
+ if len(c.AllowedExt) == 0 {
+ c.AllowedExt = getDefaultExtensions()
+ }
+
+ // Validate the config
+ //if err := validateConfig(c); err != nil {
+ // return nil, err
+ //}
+
+ return nil
+}
+
+func validateDebrids(debrids []Debrid) error {
if len(debrids) == 0 {
return errors.New("no debrids configured")
}
@@ -105,7 +145,7 @@ func validateDebrids(debrids []DebridConfig) error {
return nil
}
-func validateQbitTorrent(config *QBitTorrentConfig) error {
+func validateQbitTorrent(config *QBitTorrent) error {
if config.DownloadFolder == "" {
return errors.New("qbittorent download folder is required")
}
@@ -137,36 +177,53 @@ func validateConfig(config *Config) error {
return nil
}
-func LoadConfig(path string) (*Config, error) {
- // Load the config file
- file, err := os.Open(path)
- if err != nil {
- return nil, err
- }
- defer func(file *os.File) {
- err := file.Close()
- if err != nil {
- log.Fatal(err)
- }
- }(file)
-
- decoder := json.NewDecoder(file)
- config := &Config{}
- err = decoder.Decode(config)
- if err != nil {
- return nil, err
- }
-
- if config.Debrid.Name != "" {
- config.Debrids = append(config.Debrids, config.Debrid)
- }
-
- // Validate the config
- //if err := validateConfig(config); err != nil {
- // return nil, err
- //}
-
- return config, nil
+func SetConfigPath(path string) {
+ configPath = path
}
-var CONFIG *Config = nil
+func GetConfig() *Config {
+ once.Do(func() {
+ instance = &Config{} // Initialize instance first
+ if err := instance.loadConfig(); err != nil {
+ panic(err)
+ }
+ })
+ return instance
+}
+
+func (c *Config) GetMinFileSize() int64 {
+ // 0 means no limit
+ if c.MinFileSize == "" {
+ return 0
+ }
+ s, err := parseSize(c.MinFileSize)
+ if err != nil {
+ return 0
+ }
+ return s
+}
+
+func (c *Config) GetMaxFileSize() int64 {
+ // 0 means no limit
+ if c.MaxFileSize == "" {
+ return 0
+ }
+ s, err := parseSize(c.MaxFileSize)
+ if err != nil {
+ return 0
+ }
+ return s
+}
+
+func (c *Config) IsSizeAllowed(size int64) bool {
+ if size == 0 {
+ return true // Maybe the debrid hasn't reported the size yet
+ }
+ if c.GetMinFileSize() > 0 && size < c.GetMinFileSize() {
+ return false
+ }
+ if c.GetMaxFileSize() > 0 && size > c.GetMaxFileSize() {
+ return false
+ }
+ return true
+}
diff --git a/internal/config/misc.go b/internal/config/misc.go
new file mode 100644
index 0000000..8af24c1
--- /dev/null
+++ b/internal/config/misc.go
@@ -0,0 +1,75 @@
+package config
+
+import (
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+func (c *Config) IsAllowedFile(filename string) bool {
+ ext := strings.ToLower(filepath.Ext(filename))
+ if ext == "" {
+ return false
+ }
+ // Remove the leading dot
+ ext = ext[1:]
+
+ for _, allowed := range c.AllowedExt {
+ if ext == allowed {
+ return true
+ }
+ }
+ return false
+}
+
+func getDefaultExtensions() []string {
+ videoExts := strings.Split("YUV,WMV,WEBM,VOB,VIV,SVI,ROQ,RMVB,RM,OGV,OGG,NSV,MXF,MPG,MPEG,M2V,MP2,MPE,MPV,MP4,M4P,M4V,MOV,QT,MNG,MKV,FLV,DRC,AVI,ASF,AMV,MKA,F4V,3GP,3G2,DIVX,X264,X265", ",")
+ musicExts := strings.Split("MP3,WAV,FLAC,AAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",")
+
+ // Combine both slices
+ allExts := append(videoExts, musicExts...)
+
+ // Convert to lowercase
+ for i, ext := range allExts {
+ allExts[i] = strings.ToLower(ext)
+ }
+
+ // Remove duplicates
+ seen := make(map[string]bool)
+ var unique []string
+
+ for _, ext := range allExts {
+ if !seen[ext] {
+ seen[ext] = true
+ unique = append(unique, ext)
+ }
+ }
+
+ sort.Strings(unique)
+ return unique
+}
+
+func parseSize(sizeStr string) (int64, error) {
+ sizeStr = strings.ToUpper(strings.TrimSpace(sizeStr))
+
+ // Absolute size-based cache
+ multiplier := 1.0
+ if strings.HasSuffix(sizeStr, "GB") {
+ multiplier = 1024 * 1024 * 1024
+ sizeStr = strings.TrimSuffix(sizeStr, "GB")
+ } else if strings.HasSuffix(sizeStr, "MB") {
+ multiplier = 1024 * 1024
+ sizeStr = strings.TrimSuffix(sizeStr, "MB")
+ } else if strings.HasSuffix(sizeStr, "KB") {
+ multiplier = 1024
+ sizeStr = strings.TrimSuffix(sizeStr, "KB")
+ }
+
+ size, err := strconv.ParseFloat(sizeStr, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ return int64(size * multiplier), nil
+}
diff --git a/common/logger.go b/internal/logger/logger.go
similarity index 99%
rename from common/logger.go
rename to internal/logger/logger.go
index 49cac90..24990a3 100644
--- a/common/logger.go
+++ b/internal/logger/logger.go
@@ -1,4 +1,4 @@
-package common
+package logger
import (
"fmt"
diff --git a/common/request.go b/internal/request/request.go
similarity index 85%
rename from common/request.go
rename to internal/request/request.go
index a3b8cf9..fd1cb6a 100644
--- a/common/request.go
+++ b/internal/request/request.go
@@ -1,4 +1,4 @@
-package common
+package request
import (
"crypto/tls"
@@ -8,11 +8,32 @@ import (
"io"
"log"
"net/http"
+ "net/url"
"regexp"
"strconv"
+ "strings"
"time"
)
+func JoinURL(base string, paths ...string) (string, error) {
+ // Split the last path component to separate query parameters
+ lastPath := paths[len(paths)-1]
+ parts := strings.Split(lastPath, "?")
+ paths[len(paths)-1] = parts[0]
+
+ joined, err := url.JoinPath(base, paths...)
+ if err != nil {
+ return "", err
+ }
+
+ // Add back query parameters if they exist
+ if len(parts) > 1 {
+ return joined + "?" + parts[1], nil
+ }
+
+ return joined, nil
+}
+
type RLHTTPClient struct {
client *http.Client
Ratelimiter *rate.Limiter
diff --git a/internal/utils/file.go b/internal/utils/file.go
new file mode 100644
index 0000000..9008f48
--- /dev/null
+++ b/internal/utils/file.go
@@ -0,0 +1,2 @@
+package utils
+
diff --git a/common/utils.go b/internal/utils/magnet.go
similarity index 83%
rename from common/utils.go
rename to internal/utils/magnet.go
index e458edd..accad90 100644
--- a/common/utils.go
+++ b/internal/utils/magnet.go
@@ -1,4 +1,4 @@
-package common
+package utils
import (
"bufio"
@@ -7,9 +7,9 @@ import (
"encoding/base32"
"encoding/hex"
"fmt"
+ "github.com/anacrolix/torrent/metainfo"
"io"
"log"
- "math/rand"
"net/http"
"net/url"
"os"
@@ -17,8 +17,6 @@ import (
"regexp"
"strings"
"time"
-
- "github.com/anacrolix/torrent/metainfo"
)
type Magnet struct {
@@ -151,15 +149,6 @@ func GetMagnetInfo(magnetLink string) (*Magnet, error) {
return magnet, nil
}
-func RandomString(length int) string {
- const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
- b := make([]byte, length)
- for i := range b {
- b[i] = charset[rand.Intn(len(charset))]
- }
- return string(b)
-}
-
func ExtractInfoHash(magnetDesc string) string {
const prefix = "xt=urn:btih:"
start := strings.Index(magnetDesc, prefix)
@@ -244,41 +233,3 @@ func GetInfohashFromURL(url string) (string, error) {
infoHash := hash.HexString()
return infoHash, nil
}
-
-func JoinURL(base string, paths ...string) (string, error) {
- // Split the last path component to separate query parameters
- lastPath := paths[len(paths)-1]
- parts := strings.Split(lastPath, "?")
- paths[len(paths)-1] = parts[0]
-
- joined, err := url.JoinPath(base, paths...)
- if err != nil {
- return "", err
- }
-
- // Add back query parameters if they exist
- if len(parts) > 1 {
- return joined + "?" + parts[1], nil
- }
-
- return joined, nil
-}
-
-func FileReady(path string) bool {
- _, err := os.Stat(path)
- return !os.IsNotExist(err) // Returns true if the file exists
-}
-
-func Remove[S ~[]E, E comparable](s S, values ...E) S {
- result := make(S, 0, len(s))
-outer:
- for _, item := range s {
- for _, v := range values {
- if item == v {
- continue outer
- }
- }
- result = append(result, item)
- }
- return result
-}
diff --git a/internal/utils/misc.go b/internal/utils/misc.go
new file mode 100644
index 0000000..d3dc669
--- /dev/null
+++ b/internal/utils/misc.go
@@ -0,0 +1,15 @@
+package utils
+
+func RemoveItem[S ~[]E, E comparable](s S, values ...E) S {
+ result := make(S, 0, len(s))
+outer:
+ for _, item := range s {
+ for _, v := range values {
+ if item == v {
+ continue outer
+ }
+ }
+ result = append(result, item)
+ }
+ return result
+}
diff --git a/internal/utils/regex.go b/internal/utils/regex.go
new file mode 100644
index 0000000..9937a62
--- /dev/null
+++ b/internal/utils/regex.go
@@ -0,0 +1,49 @@
+package utils
+
+import (
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+var (
+ VIDEOMATCH = "(?i)(\\.)(YUV|WMV|WEBM|VOB|VIV|SVI|ROQ|RMVB|RM|OGV|OGG|NSV|MXF|MPG|MPEG|M2V|MP2|MPE|MPV|MP4|M4P|M4V|MOV|QT|MNG|MKV|FLV|DRC|AVI|ASF|AMV|MKA|F4V|3GP|3G2|DIVX|X264|X265)$"
+ MUSICMATCH = "(?i)(\\.)(?:MP3|WAV|FLAC|AAC|OGG|WMA|AIFF|ALAC|M4A|APE|AC3|DTS|M4P|MID|MIDI|MKA|MP2|MPA|RA|VOC|WV|AMR)$"
+)
+
+var SAMPLEMATCH = `(?i)(^|[\\/]|[._-])(sample|trailer|thumb)s?([._-]|$)`
+
+func RegexMatch(regex string, value string) bool {
+ re := regexp.MustCompile(regex)
+ return re.MatchString(value)
+}
+
+func RemoveInvalidChars(value string) string {
+ return strings.Map(func(r rune) rune {
+ if r == filepath.Separator || r == ':' {
+ return r
+ }
+ if filepath.IsAbs(string(r)) {
+ return r
+ }
+ if strings.ContainsRune(filepath.VolumeName("C:"+string(r)), r) {
+ return r
+ }
+ if r < 32 || strings.ContainsRune(`<>:"/\|?*`, r) {
+ return -1
+ }
+ return r
+ }, value)
+}
+
+func RemoveExtension(value string) string {
+ re := regexp.MustCompile(VIDEOMATCH + "|" + SAMPLEMATCH + "|" + MUSICMATCH)
+
+ // Find the last index of the matched extension
+ loc := re.FindStringIndex(value)
+ if loc != nil {
+ return value[:loc[0]]
+ } else {
+ return value
+ }
+}
diff --git a/main.go b/main.go
index f721dfe..e140e95 100644
--- a/main.go
+++ b/main.go
@@ -4,7 +4,7 @@ import (
"context"
"flag"
"github.com/sirrobot01/debrid-blackhole/cmd"
- "github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
"log"
)
@@ -13,14 +13,10 @@ func main() {
flag.StringVar(&configPath, "config", "config.json", "path to the config file")
flag.Parse()
- // Load the config file
- conf, err := common.LoadConfig(configPath)
- common.CONFIG = conf
- if err != nil {
- log.Fatal(err)
- }
+ config.SetConfigPath(configPath)
+ config.GetConfig()
ctx := context.Background()
- if err := cmd.Start(ctx, conf); err != nil {
+ if err := cmd.Start(ctx); err != nil {
log.Fatal(err)
}
diff --git a/pkg/arr/arr.go b/pkg/arr/arr.go
index 047bb0a..db663df 100644
--- a/pkg/arr/arr.go
+++ b/pkg/arr/arr.go
@@ -3,7 +3,8 @@ package arr
import (
"bytes"
"encoding/json"
- "github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
+ "github.com/sirrobot01/debrid-blackhole/internal/request"
"net/http"
"strings"
"sync"
@@ -20,14 +21,15 @@ const (
)
var (
- client *common.RLHTTPClient = common.NewRLHTTPClient(nil, nil)
+ client *request.RLHTTPClient = request.NewRLHTTPClient(nil, nil)
)
type Arr struct {
- Name string `json:"name"`
- Host string `json:"host"`
- Token string `json:"token"`
- Type Type `json:"type"`
+ Name string `json:"name"`
+ Host string `json:"host"`
+ Token string `json:"token"`
+ Type Type `json:"type"`
+ verifiedDirs sync.Map // map[string]struct{} -> dir -> struct{}
}
func NewArr(name, host, token string, arrType Type) *Arr {
@@ -43,7 +45,7 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo
if a.Token == "" || a.Host == "" {
return nil, nil
}
- url, err := common.JoinURL(a.Host, endpoint)
+ url, err := request.JoinURL(a.Host, endpoint)
if err != nil {
return nil, err
}
@@ -84,9 +86,9 @@ func inferType(host, name string) Type {
}
}
-func NewStorage(cfg []common.ArrConfig) *Storage {
+func NewStorage() *Storage {
arrs := make(map[string]*Arr)
- for _, a := range cfg {
+ for _, a := range config.GetConfig().Arrs {
name := a.Name
arrs[name] = NewArr(name, a.Host, a.Token, inferType(a.Host, name))
}
diff --git a/pkg/arr/content.go b/pkg/arr/content.go
index a48b4bf..d2c1a86 100644
--- a/pkg/arr/content.go
+++ b/pkg/arr/content.go
@@ -3,7 +3,9 @@ package arr
import (
"encoding/json"
"fmt"
+ "log"
"net/http"
+ "strconv"
)
func (a *Arr) GetMedia(tvId string) ([]Content, error) {
@@ -14,7 +16,7 @@ func (a *Arr) GetMedia(tvId string) ([]Content, error) {
}
if resp.StatusCode == http.StatusNotFound {
// This is Radarr
- repairLogger.Info().Msg("Radarr detected")
+ log.Println("Radarr detected")
a.Type = Radarr
return GetMovies(a, tvId)
}
@@ -44,11 +46,34 @@ func (a *Arr) GetMedia(tvId string) ([]Content, error) {
Title: d.Title,
Id: d.Id,
}
- files := make([]contentFile, 0)
+
+ type episode struct {
+ Id int `json:"id"`
+ EpisodeFileID int `json:"episodeFileId"`
+ }
+ resp, err = a.Request(http.MethodGet, fmt.Sprintf("api/v3/episode?seriesId=%d", d.Id), nil)
+ if err != nil {
+ continue
+ }
+ defer resp.Body.Close()
+ var episodes []episode
+ if err = json.NewDecoder(resp.Body).Decode(&episodes); err != nil {
+ continue
+ }
+ episodeFileIDMap := make(map[int]int)
+ for _, e := range episodes {
+ episodeFileIDMap[e.EpisodeFileID] = e.Id
+ }
+ files := make([]ContentFile, 0)
for _, file := range seriesFiles {
- files = append(files, contentFile{
- Id: file.Id,
- Path: file.Path,
+ eId, ok := episodeFileIDMap[file.Id]
+ if !ok {
+ eId = 0
+ }
+ files = append(files, ContentFile{
+ FileId: file.Id,
+ Path: file.Path,
+ Id: eId,
})
}
ct.Files = files
@@ -73,10 +98,11 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) {
Title: movie.Title,
Id: movie.Id,
}
- files := make([]contentFile, 0)
- files = append(files, contentFile{
- Id: movie.MovieFile.Id,
- Path: movie.MovieFile.Path,
+ files := make([]ContentFile, 0)
+ files = append(files, ContentFile{
+ FileId: movie.MovieFile.Id,
+ Id: movie.Id,
+ Path: movie.MovieFile.Path,
})
ct.Files = files
contents = append(contents, ct)
@@ -84,15 +110,69 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) {
return contents, nil
}
-func (a *Arr) DeleteFile(id int) error {
+func (a *Arr) SearchMissing(files []ContentFile) error {
+ var payload interface{}
+
+ ids := make([]int, 0)
+ for _, f := range files {
+ ids = append(ids, f.Id)
+ }
+
switch a.Type {
case Sonarr:
- _, err := a.Request(http.MethodDelete, fmt.Sprintf("api/v3/episodefile/%d", id), nil)
+ payload = struct {
+ Name string `json:"name"`
+ EpisodeIds []int `json:"episodeIds"`
+ }{
+ Name: "EpisodeSearch",
+ EpisodeIds: ids,
+ }
+ case Radarr:
+ payload = struct {
+ Name string `json:"name"`
+ MovieIds []int `json:"movieIds"`
+ }{
+ Name: "MoviesSearch",
+ MovieIds: ids,
+ }
+ default:
+ return fmt.Errorf("unknown arr type: %s", a.Type)
+ }
+
+ resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
+ if err != nil {
+ return fmt.Errorf("failed to search missing: %v", err)
+ }
+ if statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'; !statusOk {
+ return fmt.Errorf("failed to search missing. Status Code: %s", resp.Status)
+ }
+ return nil
+}
+
+func (a *Arr) DeleteFiles(files []ContentFile) error {
+ ids := make([]int, 0)
+ for _, f := range files {
+ ids = append(ids, f.FileId)
+ }
+ var payload interface{}
+ switch a.Type {
+ case Sonarr:
+ payload = struct {
+ EpisodeFileIds []int `json:"episodeFileIds"`
+ }{
+ EpisodeFileIds: ids,
+ }
+ _, err := a.Request(http.MethodDelete, "api/v3/episodefile/bulk", payload)
if err != nil {
return err
}
case Radarr:
- _, err := a.Request(http.MethodDelete, fmt.Sprintf("api/v3/moviefile/%d", id), nil)
+ payload = struct {
+ MovieFileIds []int `json:"movieFileIds"`
+ }{
+ MovieFileIds: ids,
+ }
+ _, err := a.Request(http.MethodDelete, "api/v3/moviefile/bulk", payload)
if err != nil {
return err
}
diff --git a/pkg/arr/refresh.go b/pkg/arr/refresh.go
index cd7900b..e82cb35 100644
--- a/pkg/arr/refresh.go
+++ b/pkg/arr/refresh.go
@@ -3,7 +3,7 @@ package arr
import (
"cmp"
"fmt"
- "github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/request"
"net/http"
"strconv"
"strings"
@@ -36,7 +36,7 @@ func (a *Arr) MarkAsFailed(infoHash string) error {
}
}
if torrentId != 0 {
- url, err := common.JoinURL(a.Host, "history/failed/", strconv.Itoa(torrentId))
+ url, err := request.JoinURL(a.Host, "history/failed/", strconv.Itoa(torrentId))
if err != nil {
return err
}
diff --git a/pkg/arr/repair.go b/pkg/arr/repair.go
deleted file mode 100644
index dc10c09..0000000
--- a/pkg/arr/repair.go
+++ /dev/null
@@ -1,342 +0,0 @@
-package arr
-
-import (
- "github.com/rs/zerolog"
- "github.com/sirrobot01/debrid-blackhole/common"
- "io"
- "net/http"
- "os"
- "path/filepath"
- "runtime"
- "strconv"
- "sync"
-)
-
-var repairLogger *zerolog.Logger
-
-func getLogger() *zerolog.Logger {
- if repairLogger == nil {
- logger := common.NewLogger("repair", common.CONFIG.LogLevel, os.Stdout)
- repairLogger = &logger
- }
- return repairLogger
-}
-
-func (a *Arr) SearchMissing(id int) {
- var payload interface{}
-
- switch a.Type {
- case Sonarr:
- payload = struct {
- Name string `json:"name"`
- SeriesId int `json:"seriesId"`
- }{
- Name: "SeriesSearch",
- SeriesId: id,
- }
- case Radarr:
- payload = struct {
- Name string `json:"name"`
- MovieId []int `json:"movieIds"`
- }{
- Name: "MoviesSearch",
- MovieId: []int{id},
- }
- default:
- getLogger().Info().Msgf("Unknown arr type: %s", a.Type)
- return
- }
-
- resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
- if err != nil {
- getLogger().Info().Msgf("Failed to search missing: %v", err)
- return
- }
- if statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'; !statusOk {
- getLogger().Info().Msgf("Failed to search missing: %s", resp.Status)
- return
- }
-}
-
-func (a *Arr) Repair(tmdbId string) error {
-
- getLogger().Info().Msgf("Starting repair for %s", a.Name)
- media, err := a.GetMedia(tmdbId)
- if err != nil {
- getLogger().Info().Msgf("Failed to get %s media: %v", a.Type, err)
- return err
- }
- getLogger().Info().Msgf("Found %d %s media", len(media), a.Type)
-
- brokenMedia := a.processMedia(media)
- getLogger().Info().Msgf("Found %d %s broken media files", len(brokenMedia), a.Type)
-
- // Automatic search for missing files
- getLogger().Info().Msgf("Repair completed for %s", a.Name)
- return nil
-}
-
-func (a *Arr) processMedia(media []Content) []Content {
- if len(media) <= 1 {
- var brokenMedia []Content
- for _, m := range media {
- // Check if media is accessible
- if !a.isMediaAccessible(m) {
- getLogger().Debug().Msgf("Skipping media check for %s - parent directory not accessible", m.Title)
- continue
- }
- if a.checkMediaFiles(m) {
- a.SearchMissing(m.Id)
- brokenMedia = append(brokenMedia, m)
- }
- }
- return brokenMedia
- }
-
- workerCount := runtime.NumCPU() * 4
- if len(media) < workerCount {
- workerCount = len(media)
- }
-
- jobs := make(chan Content)
- results := make(chan Content)
- var brokenMedia []Content
-
- var wg sync.WaitGroup
- for i := 0; i < workerCount; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- for m := range jobs {
- // Check if media is accessible
- // First check if we can access this media's directory
- if !a.isMediaAccessible(m) {
- getLogger().Debug().Msgf("Skipping media check for %s - parent directory not accessible", m.Title)
- continue
- }
- if a.checkMediaFilesParallel(m) {
- a.SearchMissing(m.Id)
- results <- m
- }
- }
- }()
- }
-
- go func() {
- for _, m := range media {
- jobs <- m
- }
- close(jobs)
- }()
-
- go func() {
- wg.Wait()
- close(results)
- }()
-
- for m := range results {
- brokenMedia = append(brokenMedia, m)
- }
-
- return brokenMedia
-}
-
-func (a *Arr) checkMediaFilesParallel(m Content) bool {
- if len(m.Files) <= 1 {
- return a.checkMediaFiles(m)
- }
-
- fileWorkers := runtime.NumCPU() * 2
- if len(m.Files) < fileWorkers {
- fileWorkers = len(m.Files)
- }
-
- fileJobs := make(chan contentFile)
- brokenFiles := make(chan bool, len(m.Files))
-
- var fileWg sync.WaitGroup
- for i := 0; i < fileWorkers; i++ {
- fileWg.Add(1)
- go func() {
- defer fileWg.Done()
- for f := range fileJobs {
- getLogger().Debug().Msgf("Checking file: %s", f.Path)
- isBroken := false
-
- if fileIsSymlinked(f.Path) {
- getLogger().Debug().Msgf("File is symlinked: %s", f.Path)
- if !fileIsCorrectSymlink(f.Path) {
- getLogger().Debug().Msgf("File is broken: %s", f.Path)
- isBroken = true
- if err := a.DeleteFile(f.Id); err != nil {
- getLogger().Info().Msgf("Failed to delete file: %s %d: %v", f.Path, f.Id, err)
- }
- }
- } else {
- getLogger().Debug().Msgf("File is not symlinked: %s", f.Path)
- if !fileIsReadable(f.Path) {
- getLogger().Debug().Msgf("File is broken: %s", f.Path)
- isBroken = true
- if err := a.DeleteFile(f.Id); err != nil {
- getLogger().Info().Msgf("Failed to delete file: %s %d: %v", f.Path, f.Id, err)
- }
- }
- }
- brokenFiles <- isBroken
- }
- }()
- }
-
- go func() {
- for _, f := range m.Files {
- fileJobs <- f
- }
- close(fileJobs)
- }()
-
- go func() {
- fileWg.Wait()
- close(brokenFiles)
- }()
-
- isBroken := false
- for broken := range brokenFiles {
- if broken {
- isBroken = true
- }
- }
-
- return isBroken
-}
-
-func (a *Arr) checkMediaFiles(m Content) bool {
- isBroken := false
- for _, f := range m.Files {
- if fileIsSymlinked(f.Path) {
- if !fileIsCorrectSymlink(f.Path) {
- isBroken = true
- if err := a.DeleteFile(f.Id); err != nil {
- getLogger().Info().Msgf("Failed to delete file: %s %d: %v", f.Path, f.Id, err)
- }
- }
- } else {
- if !fileIsReadable(f.Path) {
- isBroken = true
- if err := a.DeleteFile(f.Id); err != nil {
- getLogger().Info().Msgf("Failed to delete file: %s %d: %v", f.Path, f.Id, err)
- }
- }
- }
- }
- return isBroken
-}
-
-func (a *Arr) isMediaAccessible(m Content) bool {
- // We're likely to mount the debrid path.
- // So instead of checking the arr path, we check the original path
- // This is because the arr path is likely to be a symlink
- // And we want to check the actual path where the media is stored
- // This is to avoid false positives
-
- if len(m.Files) == 0 {
- return false
- }
-
- // Get the first file to check its target location
- file := m.Files[0].Path
-
- var targetPath string
- fileInfo, err := os.Lstat(file)
- if err != nil {
- repairLogger.Debug().Msgf("Cannot stat file %s: %v", file, err)
- return false
- }
-
- if fileInfo.Mode()&os.ModeSymlink != 0 {
- // If it's a symlink, get where it points to
- target, err := os.Readlink(file)
- if err != nil {
- repairLogger.Debug().Msgf("Cannot read symlink %s: %v", file, err)
- return false
- }
-
- // If the symlink target is relative, make it absolute
- if !filepath.IsAbs(target) {
- dir := filepath.Dir(file)
- target = filepath.Join(dir, target)
- }
- targetPath = target
- } else {
- // If it's a regular file, use its path
- targetPath = file
- }
-
- mediaDir := filepath.Dir(targetPath) // Gets /remote/storage/Movie
- parentDir := filepath.Dir(mediaDir) // Gets /remote/storage
-
- _, err = os.Stat(parentDir)
- if err != nil {
- repairLogger.Debug().Msgf("Parent directory of target not accessible for media %s: %s", m.Title, parentDir)
- return false
- }
- return true
-}
-
-func fileIsSymlinked(file string) bool {
- info, err := os.Lstat(file)
- if err != nil {
- return false
- }
- return info.Mode()&os.ModeSymlink != 0
-}
-
-func fileIsCorrectSymlink(file string) bool {
- target, err := os.Readlink(file)
- if err != nil {
- return false
- }
-
- if !filepath.IsAbs(target) {
- dir := filepath.Dir(file)
- target = filepath.Join(dir, target)
- }
-
- return fileIsReadable(target)
-}
-
-func fileIsReadable(filePath string) bool {
- // First check if file exists and is accessible
- info, err := os.Stat(filePath)
- if err != nil {
- return false
- }
-
- // Check if it's a regular file
- if !info.Mode().IsRegular() {
- return false
- }
-
- // Try to read the first 1024 bytes
- err = checkFileStart(filePath)
- if err != nil {
- return false
- }
-
- return true
-}
-
-func checkFileStart(filePath string) error {
- f, err := os.Open(filePath)
- if err != nil {
- return err
- }
- defer f.Close()
-
- buffer := make([]byte, 1024)
- _, err = io.ReadAtLeast(f, buffer, 1024)
- if err != nil && err != io.EOF {
- return err
- }
-
- return nil
-}
diff --git a/pkg/arr/structs.go b/pkg/arr/structs.go
index cb9cee5..17fd9e1 100644
--- a/pkg/arr/structs.go
+++ b/pkg/arr/structs.go
@@ -14,16 +14,20 @@ type Movie struct {
Id int `json:"id"`
}
-type contentFile struct {
- Name string `json:"name"`
- Path string `json:"path"`
- Id int `json:"id"`
+type ContentFile struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ Id int `json:"id"`
+ FileId int `json:"fileId"`
+ TargetPath string `json:"targetPath"`
+ IsSymlink bool `json:"isSymlink"`
+ IsBroken bool `json:"isBroken"`
}
type Content struct {
Title string `json:"title"`
Id int `json:"id"`
- Files []contentFile `json:"files"`
+ Files []ContentFile `json:"files"`
}
type seriesFile struct {
diff --git a/pkg/debrid/alldebrid.go b/pkg/debrid/alldebrid.go
index 69ddbd8..46ea83f 100644
--- a/pkg/debrid/alldebrid.go
+++ b/pkg/debrid/alldebrid.go
@@ -5,6 +5,9 @@ import (
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
+ "github.com/sirrobot01/debrid-blackhole/internal/logger"
+ "github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/structs"
"net/http"
gourl "net/url"
@@ -85,6 +88,8 @@ func getAlldebridStatus(statusCode int) string {
func flattenFiles(files []structs.AllDebridMagnetFile, parentPath string, index *int) []TorrentFile {
result := make([]TorrentFile, 0)
+ cfg := config.GetConfig()
+
for _, f := range files {
currentPath := f.Name
if parentPath != "" {
@@ -100,7 +105,11 @@ func flattenFiles(files []structs.AllDebridMagnetFile, parentPath string, index
if common.RegexMatch(common.SAMPLEMATCH, fileName) {
continue
}
- if !common.RegexMatch(common.VIDEOMATCH, fileName) && !common.RegexMatch(common.MUSICMATCH, fileName) {
+ if !cfg.IsAllowedFile(fileName) {
+ continue
+ }
+
+ if !cfg.IsSizeAllowed(f.Size) {
continue
}
@@ -240,13 +249,12 @@ func (r *AllDebrid) GetCheckCached() bool {
return r.CheckCached
}
-func NewAllDebrid(dc common.DebridConfig, cache *common.Cache) *AllDebrid {
- rl := common.ParseRateLimit(dc.RateLimit)
+func NewAllDebrid(dc config.Debrid, cache *common.Cache) *AllDebrid {
+ rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
- client := common.NewRLHTTPClient(rl, headers)
- logger := common.NewLogger(dc.Name, common.CONFIG.LogLevel, os.Stdout)
+ client := request.NewRLHTTPClient(rl, headers)
return &AllDebrid{
BaseDebrid: BaseDebrid{
Name: "alldebrid",
@@ -256,7 +264,7 @@ func NewAllDebrid(dc common.DebridConfig, cache *common.Cache) *AllDebrid {
client: client,
cache: cache,
MountPath: dc.Folder,
- logger: logger,
+ logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
CheckCached: dc.CheckCached,
},
}
diff --git a/pkg/debrid/debrid.go b/pkg/debrid/debrid.go
index 7948a4a..1c66c98 100644
--- a/pkg/debrid/debrid.go
+++ b/pkg/debrid/debrid.go
@@ -1,10 +1,14 @@
package debrid
import (
+ "cmp"
"fmt"
"github.com/anacrolix/torrent/metainfo"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
+ "github.com/sirrobot01/debrid-blackhole/internal/request"
+ "github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"path/filepath"
)
@@ -14,7 +18,7 @@ type BaseDebrid struct {
Host string `json:"host"`
APIKey string
DownloadUncached bool
- client *common.RLHTTPClient
+ client *request.RLHTTPClient
cache *common.Cache
MountPath string
logger zerolog.Logger
@@ -34,12 +38,14 @@ type Service interface {
GetLogger() zerolog.Logger
}
-func NewDebrid(debs []common.DebridConfig, maxCachedSize int) *DebridService {
+func NewDebrid() *DebridService {
+ cfg := config.GetConfig()
+ maxCachedSize := cmp.Or(cfg.MaxCacheSize, 1000)
debrids := make([]Service, 0)
// Divide the cache size by the number of debrids
- maxCacheSize := maxCachedSize / len(debs)
+ maxCacheSize := maxCachedSize / len(cfg.Debrids)
- for _, dc := range debs {
+ for _, dc := range cfg.Debrids {
d := createDebrid(dc, common.NewCache(maxCacheSize))
logger := d.GetLogger()
logger.Info().Msg("Debrid Service started")
@@ -49,7 +55,7 @@ func NewDebrid(debs []common.DebridConfig, maxCachedSize int) *DebridService {
return d
}
-func createDebrid(dc common.DebridConfig, cache *common.Cache) Service {
+func createDebrid(dc config.Debrid, cache *common.Cache) Service {
switch dc.Name {
case "realdebrid":
return NewRealDebrid(dc, cache)
@@ -75,8 +81,8 @@ func GetTorrentInfo(filePath string) (*Torrent, error) {
}
func torrentFromMagnetFile(filePath string) (*Torrent, error) {
- magnetLink := common.OpenMagnetFile(filePath)
- magnet, err := common.GetMagnetInfo(magnetLink)
+ magnetLink := utils.OpenMagnetFile(filePath)
+ magnet, err := utils.GetMagnetInfo(magnetLink)
if err != nil {
return nil, err
}
@@ -102,7 +108,7 @@ func getTorrentInfo(filePath string) (*Torrent, error) {
return nil, err
}
infoLength := info.Length
- magnet := &common.Magnet{
+ magnet := &utils.Magnet{
InfoHash: infoHash,
Name: info.Name,
Size: infoLength,
@@ -145,7 +151,7 @@ func GetLocalCache(infohashes []string, cache *common.Cache) ([]string, map[stri
return infohashes, result
}
-func ProcessTorrent(d *DebridService, magnet *common.Magnet, a *arr.Arr, isSymlink bool) (*Torrent, error) {
+func ProcessTorrent(d *DebridService, magnet *utils.Magnet, a *arr.Arr, isSymlink bool) (*Torrent, error) {
debridTorrent := &Torrent{
InfoHash: magnet.InfoHash,
Magnet: magnet,
diff --git a/pkg/debrid/debrid_link.go b/pkg/debrid/debrid_link.go
index 39eceda..e583051 100644
--- a/pkg/debrid/debrid_link.go
+++ b/pkg/debrid/debrid_link.go
@@ -6,6 +6,9 @@ import (
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
+ "github.com/sirrobot01/debrid-blackhole/internal/logger"
+ "github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/structs"
"log"
"net/http"
@@ -129,7 +132,11 @@ func (r *DebridLink) GetTorrent(id string) (*Torrent, error) {
torrent.Filename = name
torrent.OriginalFilename = name
files := make([]TorrentFile, len(data.Files))
+ cfg := config.GetConfig()
for i, f := range data.Files {
+ if !cfg.IsSizeAllowed(f.Size) {
+ continue
+ }
files[i] = TorrentFile{
Id: f.ID,
Name: f.Name,
@@ -250,14 +257,13 @@ func (r *DebridLink) GetCheckCached() bool {
return r.CheckCached
}
-func NewDebridLink(dc common.DebridConfig, cache *common.Cache) *DebridLink {
- rl := common.ParseRateLimit(dc.RateLimit)
+func NewDebridLink(dc config.Debrid, cache *common.Cache) *DebridLink {
+ rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
}
- client := common.NewRLHTTPClient(rl, headers)
- logger := common.NewLogger(dc.Name, common.CONFIG.LogLevel, os.Stdout)
+ client := request.NewRLHTTPClient(rl, headers)
return &DebridLink{
BaseDebrid: BaseDebrid{
Name: "debridlink",
@@ -267,7 +273,7 @@ func NewDebridLink(dc common.DebridConfig, cache *common.Cache) *DebridLink {
client: client,
cache: cache,
MountPath: dc.Folder,
- logger: logger,
+ logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
CheckCached: dc.CheckCached,
},
}
diff --git a/pkg/debrid/realdebrid.go b/pkg/debrid/realdebrid.go
index 2ecdabc..123f1e8 100644
--- a/pkg/debrid/realdebrid.go
+++ b/pkg/debrid/realdebrid.go
@@ -5,6 +5,10 @@ import (
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
+ "github.com/sirrobot01/debrid-blackhole/internal/logger"
+ "github.com/sirrobot01/debrid-blackhole/internal/request"
+ "github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/structs"
"net/http"
gourl "net/url"
@@ -33,13 +37,17 @@ func (r *RealDebrid) GetLogger() zerolog.Logger {
func GetTorrentFiles(data structs.RealDebridTorrentInfo) []TorrentFile {
files := make([]TorrentFile, 0)
+ cfg := config.GetConfig()
for _, f := range data.Files {
name := filepath.Base(f.Path)
- if common.RegexMatch(common.SAMPLEMATCH, name) {
+ if utils.RegexMatch(utils.SAMPLEMATCH, name) {
// Skip sample files
continue
}
- if !common.RegexMatch(common.VIDEOMATCH, name) && !common.RegexMatch(common.MUSICMATCH, name) {
+ if !cfg.IsAllowedFile(name) {
+ continue
+ }
+ if !cfg.IsSizeAllowed(f.Bytes) {
continue
}
fileId := f.ID
@@ -273,13 +281,12 @@ func (r *RealDebrid) GetCheckCached() bool {
return r.CheckCached
}
-func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
- rl := common.ParseRateLimit(dc.RateLimit)
+func NewRealDebrid(dc config.Debrid, cache *common.Cache) *RealDebrid {
+ rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
- client := common.NewRLHTTPClient(rl, headers)
- logger := common.NewLogger(dc.Name, common.CONFIG.LogLevel, os.Stdout)
+ client := request.NewRLHTTPClient(rl, headers)
return &RealDebrid{
BaseDebrid: BaseDebrid{
Name: "realdebrid",
@@ -289,7 +296,7 @@ func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
client: client,
cache: cache,
MountPath: dc.Folder,
- logger: logger,
+ logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
CheckCached: dc.CheckCached,
},
}
diff --git a/pkg/debrid/torbox.go b/pkg/debrid/torbox.go
index cdf8243..dc6f8b4 100644
--- a/pkg/debrid/torbox.go
+++ b/pkg/debrid/torbox.go
@@ -6,6 +6,9 @@ import (
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
+ "github.com/sirrobot01/debrid-blackhole/internal/logger"
+ "github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/structs"
"log"
"mime/multipart"
@@ -166,13 +169,18 @@ func (r *Torbox) GetTorrent(id string) (*Torrent, error) {
torrent.Filename = name
torrent.OriginalFilename = name
files := make([]TorrentFile, 0)
+ cfg := config.GetConfig()
for _, f := range data.Files {
fileName := filepath.Base(f.Name)
if common.RegexMatch(common.SAMPLEMATCH, fileName) {
// Skip sample files
continue
}
- if !common.RegexMatch(common.VIDEOMATCH, fileName) && !common.RegexMatch(common.MUSICMATCH, fileName) {
+ if !cfg.IsAllowedFile(fileName) {
+ continue
+ }
+
+ if !cfg.IsSizeAllowed(f.Size) {
continue
}
file := TorrentFile{
@@ -283,13 +291,12 @@ func (r *Torbox) GetCheckCached() bool {
return r.CheckCached
}
-func NewTorbox(dc common.DebridConfig, cache *common.Cache) *Torbox {
- rl := common.ParseRateLimit(dc.RateLimit)
+func NewTorbox(dc config.Debrid, cache *common.Cache) *Torbox {
+ rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
- client := common.NewRLHTTPClient(rl, headers)
- logger := common.NewLogger(dc.Name, common.CONFIG.LogLevel, os.Stdout)
+ client := request.NewRLHTTPClient(rl, headers)
return &Torbox{
BaseDebrid: BaseDebrid{
Name: "torbox",
@@ -299,7 +306,7 @@ func NewTorbox(dc common.DebridConfig, cache *common.Cache) *Torbox {
client: client,
cache: cache,
MountPath: dc.Folder,
- logger: logger,
+ logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
CheckCached: dc.CheckCached,
},
}
diff --git a/pkg/debrid/torrent.go b/pkg/debrid/torrent.go
index 6b98061..fdd58bb 100644
--- a/pkg/debrid/torrent.go
+++ b/pkg/debrid/torrent.go
@@ -3,6 +3,7 @@ package debrid
import (
"fmt"
"github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"os"
"path/filepath"
@@ -36,7 +37,7 @@ type Torrent struct {
OriginalFilename string `json:"original_filename"`
Size int64 `json:"size"`
Bytes int64 `json:"bytes"` // Size of only the files that are downloaded
- Magnet *common.Magnet `json:"magnet"`
+ Magnet *utils.Magnet `json:"magnet"`
Files []TorrentFile `json:"files"`
Status string `json:"status"`
Added string `json:"added"`
@@ -70,7 +71,8 @@ func (t *Torrent) GetMountFolder(rClonePath string) (string, error) {
}
for _, path := range possiblePaths {
- if common.FileReady(filepath.Join(rClonePath, path)) {
+ _, err := os.Stat(filepath.Join(rClonePath, path))
+ if !os.IsNotExist(err) {
return path, nil
}
}
diff --git a/pkg/proxy/proxy.go b/pkg/proxy/proxy.go
index 5858a8d..f2afa9e 100644
--- a/pkg/proxy/proxy.go
+++ b/pkg/proxy/proxy.go
@@ -10,7 +10,9 @@ import (
"github.com/elazarl/goproxy"
"github.com/elazarl/goproxy/ext/auth"
"github.com/rs/zerolog"
- "github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
+ "github.com/sirrobot01/debrid-blackhole/internal/logger"
+ "github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/valyala/fastjson"
"io"
@@ -78,8 +80,8 @@ type Proxy struct {
logger zerolog.Logger
}
-func NewProxy(config common.Config, deb *debrid.DebridService) *Proxy {
- cfg := config.Proxy
+func NewProxy(deb *debrid.DebridService) *Proxy {
+ cfg := config.GetConfig().Proxy
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
return &Proxy{
port: port,
@@ -88,7 +90,7 @@ func NewProxy(config common.Config, deb *debrid.DebridService) *Proxy {
password: cfg.Password,
cachedOnly: *cfg.CachedOnly,
debrid: deb.Get(),
- logger: common.NewLogger("Proxy", cfg.LogLevel, os.Stdout),
+ logger: logger.NewLogger("Proxy", cfg.LogLevel, os.Stdout),
}
}
@@ -182,7 +184,7 @@ func (item Item) getHash() string {
}
if strings.Contains(item.GUID, "magnet:?") {
- magnet, err := common.GetMagnetInfo(item.GUID)
+ magnet, err := utils.GetMagnetInfo(item.GUID)
if err == nil && magnet != nil && magnet.InfoHash != "" {
return magnet.InfoHash
}
@@ -196,22 +198,22 @@ func (item Item) getHash() string {
}
if strings.Contains(magnetLink, "magnet:?") {
- magnet, err := common.GetMagnetInfo(magnetLink)
+ magnet, err := utils.GetMagnetInfo(magnetLink)
if err == nil && magnet != nil && magnet.InfoHash != "" {
return magnet.InfoHash
}
}
//Check Description for infohash
- hash := common.ExtractInfoHash(item.Description)
+ hash := utils.ExtractInfoHash(item.Description)
if hash == "" {
// Check Title for infohash
- hash = common.ExtractInfoHash(item.Comments)
+ hash = utils.ExtractInfoHash(item.Comments)
}
infohash = hash
if infohash == "" {
if strings.Contains(magnetLink, "http") {
- h, _ := common.GetInfohashFromURL(magnetLink)
+ h, _ := utils.GetInfohashFromURL(magnetLink)
if h != "" {
infohash = h
}
diff --git a/pkg/qbit/main.go b/pkg/qbit/main.go
index ccdbf52..d979978 100644
--- a/pkg/qbit/main.go
+++ b/pkg/qbit/main.go
@@ -3,14 +3,14 @@ package qbit
import (
"context"
"fmt"
- "github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/qbit/server"
+ "github.com/sirrobot01/debrid-blackhole/pkg/repair"
)
-func Start(ctx context.Context, config *common.Config, deb *debrid.DebridService, arrs *arr.Storage) error {
- srv := server.NewServer(config, deb, arrs)
+func Start(ctx context.Context, deb *debrid.DebridService, arrs *arr.Storage, _repair *repair.Repair) error {
+ srv := server.NewServer(deb, arrs, _repair)
if err := srv.Start(ctx); err != nil {
return fmt.Errorf("failed to start qbit server: %w", err)
}
diff --git a/pkg/qbit/server/import.go b/pkg/qbit/server/import.go
index 7ff0974..73231fb 100644
--- a/pkg/qbit/server/import.go
+++ b/pkg/qbit/server/import.go
@@ -2,10 +2,10 @@ package server
import (
"fmt"
+ "github.com/sirrobot01/debrid-blackhole/internal/utils"
"time"
"github.com/google/uuid"
- "github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/qbit/shared"
@@ -66,7 +66,7 @@ func (i *ImportRequest) Complete() {
func (i *ImportRequest) Process(q *shared.QBit) (err error) {
// Use this for now.
// This sends the torrent to the arr
- magnet, err := common.GetMagnetFromUrl(i.URI)
+ magnet, err := utils.GetMagnetFromUrl(i.URI)
if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err)
}
diff --git a/pkg/qbit/server/qbit_handlers.go b/pkg/qbit/server/qbit_handlers.go
index d33ae95..dd53304 100644
--- a/pkg/qbit/server/qbit_handlers.go
+++ b/pkg/qbit/server/qbit_handlers.go
@@ -5,7 +5,7 @@ import (
"encoding/base64"
"github.com/go-chi/chi/v5"
"github.com/rs/zerolog"
- "github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/qbit/shared"
"net/http"
@@ -13,7 +13,7 @@ import (
"strings"
)
-type qbitHandler struct {
+type QbitHandler struct {
qbit *shared.QBit
logger zerolog.Logger
debug bool
@@ -40,7 +40,7 @@ func decodeAuthHeader(header string) (string, string, error) {
return host, token, nil
}
-func (q *qbitHandler) CategoryContext(next http.Handler) http.Handler {
+func (q *QbitHandler) CategoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" {
@@ -59,7 +59,7 @@ func (q *qbitHandler) CategoryContext(next http.Handler) http.Handler {
})
}
-func (q *qbitHandler) authContext(next http.Handler) http.Handler {
+func (q *QbitHandler) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
category := r.Context().Value("category").(string)
@@ -96,29 +96,29 @@ func HashesCtx(next http.Handler) http.Handler {
})
}
-func (q *qbitHandler) handleLogin(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleLogin(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("Ok."))
}
-func (q *qbitHandler) handleVersion(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleVersion(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("v4.3.2"))
}
-func (q *qbitHandler) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("2.7"))
}
-func (q *qbitHandler) handlePreferences(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handlePreferences(w http.ResponseWriter, r *http.Request) {
preferences := shared.NewAppPreferences()
preferences.WebUiUsername = q.qbit.Username
preferences.SavePath = q.qbit.DownloadFolder
preferences.TempPath = filepath.Join(q.qbit.DownloadFolder, "temp")
- common.JSONResponse(w, preferences, http.StatusOK)
+ request.JSONResponse(w, preferences, http.StatusOK)
}
-func (q *qbitHandler) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
res := shared.BuildInfo{
Bitness: 64,
Boost: "1.75.0",
@@ -127,24 +127,24 @@ func (q *qbitHandler) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
Qt: "5.15.2",
Zlib: "1.2.11",
}
- common.JSONResponse(w, res, http.StatusOK)
+ request.JSONResponse(w, res, http.StatusOK)
}
-func (q *qbitHandler) shutdown(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) shutdown(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
-func (q *qbitHandler) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
//log all url params
ctx := r.Context()
category := ctx.Value("category").(string)
filter := strings.Trim(r.URL.Query().Get("filter"), "")
hashes, _ := ctx.Value("hashes").([]string)
torrents := q.qbit.Storage.GetAll(category, filter, hashes)
- common.JSONResponse(w, torrents, http.StatusOK)
+ request.JSONResponse(w, torrents, http.StatusOK)
}
-func (q *qbitHandler) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Parse form based on content type
@@ -209,7 +209,7 @@ func (q *qbitHandler) handleTorrentsAdd(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusOK)
}
-func (q *qbitHandler) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
if len(hashes) == 0 {
@@ -223,7 +223,7 @@ func (q *qbitHandler) handleTorrentsDelete(w http.ResponseWriter, r *http.Reques
w.WriteHeader(http.StatusOK)
}
-func (q *qbitHandler) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
for _, hash := range hashes {
@@ -237,7 +237,7 @@ func (q *qbitHandler) handleTorrentsPause(w http.ResponseWriter, r *http.Request
w.WriteHeader(http.StatusOK)
}
-func (q *qbitHandler) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
for _, hash := range hashes {
@@ -251,7 +251,7 @@ func (q *qbitHandler) handleTorrentsResume(w http.ResponseWriter, r *http.Reques
w.WriteHeader(http.StatusOK)
}
-func (q *qbitHandler) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
for _, hash := range hashes {
@@ -265,7 +265,7 @@ func (q *qbitHandler) handleTorrentRecheck(w http.ResponseWriter, r *http.Reques
w.WriteHeader(http.StatusOK)
}
-func (q *qbitHandler) handleCategories(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleCategories(w http.ResponseWriter, r *http.Request) {
var categories = map[string]shared.TorrentCategory{}
for _, cat := range q.qbit.Categories {
path := filepath.Join(q.qbit.DownloadFolder, cat)
@@ -274,10 +274,10 @@ func (q *qbitHandler) handleCategories(w http.ResponseWriter, r *http.Request) {
SavePath: path,
}
}
- common.JSONResponse(w, categories, http.StatusOK)
+ request.JSONResponse(w, categories, http.StatusOK)
}
-func (q *qbitHandler) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
@@ -292,27 +292,27 @@ func (q *qbitHandler) handleCreateCategory(w http.ResponseWriter, r *http.Reques
q.qbit.Categories = append(q.qbit.Categories, name)
- common.JSONResponse(w, nil, http.StatusOK)
+ request.JSONResponse(w, nil, http.StatusOK)
}
-func (q *qbitHandler) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
hash := r.URL.Query().Get("hash")
torrent := q.qbit.Storage.Get(hash)
properties := q.qbit.GetTorrentProperties(torrent)
- common.JSONResponse(w, properties, http.StatusOK)
+ request.JSONResponse(w, properties, http.StatusOK)
}
-func (q *qbitHandler) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
hash := r.URL.Query().Get("hash")
torrent := q.qbit.Storage.Get(hash)
if torrent == nil {
return
}
files := q.qbit.GetTorrentFiles(torrent)
- common.JSONResponse(w, files, http.StatusOK)
+ request.JSONResponse(w, files, http.StatusOK)
}
-func (q *qbitHandler) handleSetCategory(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleSetCategory(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
category := ctx.Value("category").(string)
hashes, _ := ctx.Value("hashes").([]string)
@@ -321,10 +321,10 @@ func (q *qbitHandler) handleSetCategory(w http.ResponseWriter, r *http.Request)
torrent.Category = category
q.qbit.Storage.AddOrUpdate(torrent)
}
- common.JSONResponse(w, nil, http.StatusOK)
+ request.JSONResponse(w, nil, http.StatusOK)
}
-func (q *qbitHandler) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
@@ -340,10 +340,10 @@ func (q *qbitHandler) handleAddTorrentTags(w http.ResponseWriter, r *http.Reques
for _, t := range torrents {
q.qbit.SetTorrentTags(t, tags)
}
- common.JSONResponse(w, nil, http.StatusOK)
+ request.JSONResponse(w, nil, http.StatusOK)
}
-func (q *qbitHandler) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
@@ -360,14 +360,14 @@ func (q *qbitHandler) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Req
q.qbit.RemoveTorrentTags(torrent, tags)
}
- common.JSONResponse(w, nil, http.StatusOK)
+ request.JSONResponse(w, nil, http.StatusOK)
}
-func (q *qbitHandler) handleGetTags(w http.ResponseWriter, r *http.Request) {
- common.JSONResponse(w, q.qbit.Tags, http.StatusOK)
+func (q *QbitHandler) handleGetTags(w http.ResponseWriter, r *http.Request) {
+ request.JSONResponse(w, q.qbit.Tags, http.StatusOK)
}
-func (q *qbitHandler) handleCreateTags(w http.ResponseWriter, r *http.Request) {
+func (q *QbitHandler) handleCreateTags(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
@@ -378,5 +378,5 @@ func (q *qbitHandler) handleCreateTags(w http.ResponseWriter, r *http.Request) {
tags[i] = strings.TrimSpace(tag)
}
q.qbit.AddTags(tags)
- common.JSONResponse(w, nil, http.StatusOK)
+ request.JSONResponse(w, nil, http.StatusOK)
}
diff --git a/pkg/qbit/server/qbit_routes.go b/pkg/qbit/server/qbit_routes.go
index d30d22a..3082505 100644
--- a/pkg/qbit/server/qbit_routes.go
+++ b/pkg/qbit/server/qbit_routes.go
@@ -5,7 +5,7 @@ import (
"net/http"
)
-func (q *qbitHandler) Routes(r chi.Router) http.Handler {
+func (q *QbitHandler) Routes(r chi.Router) http.Handler {
r.Route("/api/v2", func(r chi.Router) {
//if q.debug {
// r.Use(middleware.Logger)
diff --git a/pkg/qbit/server/server.go b/pkg/qbit/server/server.go
index 96ba27b..41b5a2e 100644
--- a/pkg/qbit/server/server.go
+++ b/pkg/qbit/server/server.go
@@ -7,10 +7,12 @@ import (
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rs/zerolog"
- "github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
+ "github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/qbit/shared"
+ "github.com/sirrobot01/debrid-blackhole/pkg/repair"
"io"
"net/http"
"os"
@@ -23,12 +25,13 @@ type Server struct {
logger zerolog.Logger
}
-func NewServer(config *common.Config, deb *debrid.DebridService, arrs *arr.Storage) *Server {
- logger := common.NewLogger("QBit", config.QBitTorrent.LogLevel, os.Stdout)
- q := shared.NewQBit(config, deb, logger, arrs)
+func NewServer(deb *debrid.DebridService, arrs *arr.Storage, _repair *repair.Repair) *Server {
+ cfg := config.GetConfig()
+ l := logger.NewLogger("QBit", cfg.QBitTorrent.LogLevel, os.Stdout)
+ q := shared.NewQBit(deb, l, arrs, _repair)
return &Server{
qbit: q,
- logger: logger,
+ logger: l,
}
}
@@ -38,8 +41,12 @@ func (s *Server) Start(ctx context.Context) error {
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
logLevel := s.logger.GetLevel().String()
debug := logLevel == "debug"
- q := qbitHandler{qbit: s.qbit, logger: s.logger, debug: debug}
- ui := uiHandler{qbit: s.qbit, logger: common.NewLogger("UI", s.logger.GetLevel().String(), os.Stdout), debug: debug}
+ q := QbitHandler{qbit: s.qbit, logger: s.logger, debug: debug}
+ ui := UIHandler{
+ qbit: s.qbit,
+ logger: logger.NewLogger("UI", s.logger.GetLevel().String(), os.Stdout),
+ debug: debug,
+ }
// Register routes
r.Get("/logs", s.GetLogs)
@@ -71,7 +78,7 @@ func (s *Server) Start(ctx context.Context) error {
}
func (s *Server) GetLogs(w http.ResponseWriter, r *http.Request) {
- logFile := common.GetLogPath()
+ logFile := logger.GetLogPath()
// Open and read the file
file, err := os.Open(logFile)
@@ -79,7 +86,12 @@ func (s *Server) GetLogs(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Error reading log file", http.StatusInternalServerError)
return
}
- defer file.Close()
+ defer func(file *os.File) {
+ err := file.Close()
+ if err != nil {
+ s.logger.Debug().Err(err).Msg("Error closing log file")
+ }
+ }(file)
// Set headers
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
@@ -91,6 +103,7 @@ func (s *Server) GetLogs(w http.ResponseWriter, r *http.Request) {
// Stream the file
_, err = io.Copy(w, file)
if err != nil {
+ s.logger.Debug().Err(err).Msg("Error streaming log file")
http.Error(w, "Error streaming log file", http.StatusInternalServerError)
return
}
diff --git a/pkg/qbit/server/templates/config.html b/pkg/qbit/server/templates/config.html
index be430e5..c208fb0 100644
--- a/pkg/qbit/server/templates/config.html
+++ b/pkg/qbit/server/templates/config.html
@@ -30,6 +30,45 @@
Open Magnet Links in DecyphArr
+
+
+
+
+
+ Minimum file size to download (0 for no limit)
+
+
+
+
+
+
+
+ Maximum file size to download (0 for no limit)
+
+
@@ -216,6 +255,15 @@
const logLevel = document.getElementById('log-level');
logLevel.value = config.log_level;
+ if (config.allowed_file_types && Array.isArray(config.allowed_file_types)) {
+ document.querySelector('[name="allowed_file_types"]').value = config.allowed_file_types.join(', ');
+ }
+ if (config.min_file_size) {
+ document.querySelector('[name="min_file_size"]').value = config.min_file_size;
+ }
+ if (config.max_file_size) {
+ document.querySelector('[name="max_file_size"]').value = config.max_file_size;
+ }
});
diff --git a/pkg/qbit/server/ui_handlers.go b/pkg/qbit/server/ui_handlers.go
index ba27986..fe6560c 100644
--- a/pkg/qbit/server/ui_handlers.go
+++ b/pkg/qbit/server/ui_handlers.go
@@ -3,15 +3,16 @@ package server
import (
"embed"
"encoding/json"
- "errors"
"fmt"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
+ "github.com/sirrobot01/debrid-blackhole/internal/request"
+ "github.com/sirrobot01/debrid-blackhole/internal/utils"
"html/template"
"net/http"
"strings"
"github.com/go-chi/chi/v5"
"github.com/rs/zerolog"
- "github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/qbit/shared"
@@ -49,7 +50,7 @@ type RepairRequest struct {
//go:embed templates/*
var content embed.FS
-type uiHandler struct {
+type UIHandler struct {
qbit *shared.QBit
logger zerolog.Logger
debug bool
@@ -68,7 +69,7 @@ func init() {
))
}
-func (u *uiHandler) IndexHandler(w http.ResponseWriter, r *http.Request) {
+func (u *UIHandler) IndexHandler(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{
"Page": "index",
"Title": "Torrents",
@@ -79,7 +80,7 @@ func (u *uiHandler) IndexHandler(w http.ResponseWriter, r *http.Request) {
}
}
-func (u *uiHandler) DownloadHandler(w http.ResponseWriter, r *http.Request) {
+func (u *UIHandler) DownloadHandler(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{
"Page": "download",
"Title": "Download",
@@ -90,7 +91,7 @@ func (u *uiHandler) DownloadHandler(w http.ResponseWriter, r *http.Request) {
}
}
-func (u *uiHandler) RepairHandler(w http.ResponseWriter, r *http.Request) {
+func (u *UIHandler) RepairHandler(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{
"Page": "repair",
"Title": "Repair",
@@ -101,7 +102,7 @@ func (u *uiHandler) RepairHandler(w http.ResponseWriter, r *http.Request) {
}
}
-func (u *uiHandler) ConfigHandler(w http.ResponseWriter, r *http.Request) {
+func (u *UIHandler) ConfigHandler(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{
"Page": "config",
"Title": "Config",
@@ -112,11 +113,11 @@ func (u *uiHandler) ConfigHandler(w http.ResponseWriter, r *http.Request) {
}
}
-func (u *uiHandler) handleGetArrs(w http.ResponseWriter, r *http.Request) {
- common.JSONResponse(w, u.qbit.Arrs.GetAll(), http.StatusOK)
+func (u *UIHandler) handleGetArrs(w http.ResponseWriter, r *http.Request) {
+ request.JSONResponse(w, u.qbit.Arrs.GetAll(), http.StatusOK)
}
-func (u *uiHandler) handleAddContent(w http.ResponseWriter, r *http.Request) {
+func (u *UIHandler) handleAddContent(w http.ResponseWriter, r *http.Request) {
if err := r.ParseMultipartForm(32 << 20); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
@@ -162,7 +163,7 @@ func (u *uiHandler) handleAddContent(w http.ResponseWriter, r *http.Request) {
continue
}
- magnet, err := common.GetMagnetFromFile(file, fileHeader.Filename)
+ magnet, err := utils.GetMagnetFromFile(file, fileHeader.Filename)
if err != nil {
errs = append(errs, fmt.Sprintf("Failed to parse torrent file %s: %v", fileHeader.Filename, err))
continue
@@ -178,7 +179,7 @@ func (u *uiHandler) handleAddContent(w http.ResponseWriter, r *http.Request) {
}
}
- common.JSONResponse(w, struct {
+ request.JSONResponse(w, struct {
Results []*ImportRequest `json:"results"`
Errors []string `json:"errors,omitempty"`
}{
@@ -187,7 +188,7 @@ func (u *uiHandler) handleAddContent(w http.ResponseWriter, r *http.Request) {
}, http.StatusOK)
}
-func (u *uiHandler) handleCheckCached(w http.ResponseWriter, r *http.Request) {
+func (u *UIHandler) handleCheckCached(w http.ResponseWriter, r *http.Request) {
_hashes := r.URL.Query().Get("hash")
if _hashes == "" {
http.Error(w, "No hashes provided", http.StatusBadRequest)
@@ -216,10 +217,10 @@ func (u *uiHandler) handleCheckCached(w http.ResponseWriter, r *http.Request) {
_, exists := res[h]
result[h] = exists
}
- common.JSONResponse(w, result, http.StatusOK)
+ request.JSONResponse(w, result, http.StatusOK)
}
-func (u *uiHandler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
+func (u *UIHandler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
var req RepairRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
@@ -232,50 +233,35 @@ func (u *uiHandler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
return
}
- mediaIds := req.MediaIds
- if len(mediaIds) == 0 {
- mediaIds = []string{""}
- }
-
if req.Async {
- for _, tvId := range mediaIds {
- go func() {
- err := _arr.Repair(tvId)
- if err != nil {
- u.logger.Info().Msgf("Failed to repair: %v", err)
- }
- }()
- }
- common.JSONResponse(w, "Repair process started", http.StatusOK)
+ go func() {
+ if err := u.qbit.Repair.Repair([]*arr.Arr{_arr}, req.MediaIds); err != nil {
+ u.logger.Error().Err(err).Msg("Failed to repair media")
+ }
+ }()
+ request.JSONResponse(w, "Repair process started", http.StatusOK)
return
}
- var errs []error
- for _, tvId := range mediaIds {
- if err := _arr.Repair(tvId); err != nil {
- errs = append(errs, err)
- }
- }
-
- if len(errs) > 0 {
- combinedErr := errors.Join(errs...)
- http.Error(w, fmt.Sprintf("Failed to repair: %v", combinedErr), http.StatusInternalServerError)
+ if err := u.qbit.Repair.Repair([]*arr.Arr{_arr}, req.MediaIds); err != nil {
+ http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError)
return
+
}
- common.JSONResponse(w, "Repair completed", http.StatusOK)
+ request.JSONResponse(w, "Repair completed", http.StatusOK)
}
-func (u *uiHandler) handleGetVersion(w http.ResponseWriter, r *http.Request) {
+func (u *UIHandler) handleGetVersion(w http.ResponseWriter, r *http.Request) {
v := version.GetInfo()
- common.JSONResponse(w, v, http.StatusOK)
+ request.JSONResponse(w, v, http.StatusOK)
}
-func (u *uiHandler) handleGetTorrents(w http.ResponseWriter, r *http.Request) {
- common.JSONResponse(w, u.qbit.Storage.GetAll("", "", nil), http.StatusOK)
+func (u *UIHandler) handleGetTorrents(w http.ResponseWriter, r *http.Request) {
+ request.JSONResponse(w, u.qbit.Storage.GetAll("", "", nil), http.StatusOK)
}
-func (u *uiHandler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) {
+func (u *UIHandler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) {
hash := chi.URLParam(r, "hash")
if hash == "" {
http.Error(w, "No hash provided", http.StatusBadRequest)
@@ -285,12 +271,12 @@ func (u *uiHandler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusOK)
}
-func (u *uiHandler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
- config := common.CONFIG
- arrCfgs := make([]common.ArrConfig, 0)
+func (u *UIHandler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
+ cfg := config.GetConfig()
+ arrCfgs := make([]config.Arr, 0)
for _, a := range u.qbit.Arrs.GetAll() {
- arrCfgs = append(arrCfgs, common.ArrConfig{Host: a.Host, Name: a.Name, Token: a.Token})
+ arrCfgs = append(arrCfgs, config.Arr{Host: a.Host, Name: a.Name, Token: a.Token})
}
- config.Arrs = arrCfgs
- common.JSONResponse(w, config, http.StatusOK)
+ cfg.Arrs = arrCfgs
+ request.JSONResponse(w, cfg, http.StatusOK)
}
diff --git a/pkg/qbit/server/ui_routes.go b/pkg/qbit/server/ui_routes.go
index d1669a1..bb80349 100644
--- a/pkg/qbit/server/ui_routes.go
+++ b/pkg/qbit/server/ui_routes.go
@@ -5,7 +5,7 @@ import (
"net/http"
)
-func (u *uiHandler) Routes(r chi.Router) http.Handler {
+func (u *UIHandler) Routes(r chi.Router) http.Handler {
r.Group(func(r chi.Router) {
r.Get("/", u.IndexHandler)
r.Get("/download", u.DownloadHandler)
diff --git a/pkg/qbit/shared/qbit.go b/pkg/qbit/shared/qbit.go
index 71af370..f17a430 100644
--- a/pkg/qbit/shared/qbit.go
+++ b/pkg/qbit/shared/qbit.go
@@ -3,9 +3,10 @@ package shared
import (
"cmp"
"github.com/rs/zerolog"
- "github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
+ "github.com/sirrobot01/debrid-blackhole/pkg/repair"
"os"
)
@@ -16,6 +17,7 @@ type QBit struct {
DownloadFolder string `json:"download_folder"`
Categories []string `json:"categories"`
Debrid *debrid.DebridService
+ Repair *repair.Repair
Storage *TorrentStorage
debug bool
logger zerolog.Logger
@@ -24,8 +26,8 @@ type QBit struct {
RefreshInterval int
}
-func NewQBit(config *common.Config, deb *debrid.DebridService, logger zerolog.Logger, arrs *arr.Storage) *QBit {
- cfg := config.QBitTorrent
+func NewQBit(deb *debrid.DebridService, logger zerolog.Logger, arrs *arr.Storage, _repair *repair.Repair) *QBit {
+ cfg := config.GetConfig().QBitTorrent
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8282")
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
return &QBit{
@@ -35,7 +37,8 @@ func NewQBit(config *common.Config, deb *debrid.DebridService, logger zerolog.Lo
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
Debrid: deb,
- Storage: NewTorrentStorage("/app/torrents.json"),
+ Storage: NewTorrentStorage(cmp.Or(os.Getenv("TORRENT_FILE"), "/app/torrents.json")),
+ Repair: _repair,
logger: logger,
Arrs: arrs,
RefreshInterval: refreshInterval,
diff --git a/pkg/qbit/shared/torrent.go b/pkg/qbit/shared/torrent.go
index 253a1f7..d459d82 100644
--- a/pkg/qbit/shared/torrent.go
+++ b/pkg/qbit/shared/torrent.go
@@ -5,7 +5,7 @@ import (
"context"
"fmt"
"github.com/google/uuid"
- "github.com/sirrobot01/debrid-blackhole/common"
+ "github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"io"
@@ -20,7 +20,7 @@ import (
// All torrent related helpers goes here
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
- magnet, err := common.GetMagnetFromUrl(url)
+ magnet, err := utils.GetMagnetFromUrl(url)
if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err)
}
@@ -35,7 +35,7 @@ func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
file, _ := fileHeader.Open()
defer file.Close()
var reader io.Reader = file
- magnet, err := common.GetMagnetFromFile(reader, fileHeader.Filename)
+ magnet, err := utils.GetMagnetFromFile(reader, fileHeader.Filename)
if err != nil {
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
}
@@ -46,7 +46,7 @@ func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
return nil
}
-func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category string) error {
+func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error {
torrent := q.CreateTorrentFromMagnet(magnet, category, "auto")
a, ok := ctx.Value("arr").(*arr.Arr)
if !ok {
@@ -69,7 +69,7 @@ func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category stri
return nil
}
-func (q *QBit) CreateTorrentFromMagnet(magnet *common.Magnet, category, source string) *Torrent {
+func (q *QBit) CreateTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Torrent {
torrent := &Torrent{
ID: uuid.NewString(),
Hash: strings.ToLower(magnet.InfoHash),
@@ -296,8 +296,8 @@ func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool {
func (q *QBit) RemoveTorrentTags(t *Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",")
- newTorrentTags := common.Remove(torrentTags, tags...)
- q.Tags = common.Remove(q.Tags, tags...)
+ newTorrentTags := utils.RemoveItem(torrentTags, tags...)
+ q.Tags = utils.RemoveItem(q.Tags, tags...)
t.Tags = strings.Join(newTorrentTags, ",")
q.Storage.Update(t)
return true
@@ -316,6 +316,6 @@ func (q *QBit) AddTags(tags []string) bool {
}
func (q *QBit) RemoveTags(tags []string) bool {
- q.Tags = common.Remove(q.Tags, tags...)
+ q.Tags = utils.RemoveItem(q.Tags, tags...)
return true
}
diff --git a/pkg/qbit/shared/utils.go b/pkg/qbit/shared/utils.go
index 4efd02b..0a864e1 100644
--- a/pkg/qbit/shared/utils.go
+++ b/pkg/qbit/shared/utils.go
@@ -1,8 +1,8 @@
package shared
import (
- "github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
+ "os"
"path/filepath"
"sync"
"time"
@@ -16,7 +16,8 @@ func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.TorrentFile, read
for {
select {
case <-ticker.C:
- if common.FileReady(path) {
+ _, err := os.Stat(path)
+ if !os.IsNotExist(err) {
ready <- file
return
}
diff --git a/pkg/repair/utils.go b/pkg/repair/misc.go
similarity index 60%
rename from pkg/repair/utils.go
rename to pkg/repair/misc.go
index 257993c..31b281d 100644
--- a/pkg/repair/utils.go
+++ b/pkg/repair/misc.go
@@ -2,6 +2,8 @@ package repair
import (
"fmt"
+ "os"
+ "path/filepath"
"strconv"
"strings"
"time"
@@ -68,3 +70,62 @@ func parseDurationInterval(interval string) (time.Duration, error) {
return 0, fmt.Errorf("invalid unit in interval: %c", unit)
}
}
+
+func fileIsSymlinked(file string) bool {
+ info, err := os.Lstat(file)
+ if err != nil {
+ return false
+ }
+ return info.Mode()&os.ModeSymlink != 0
+}
+
+func getSymlinkTarget(file string) string {
+ if fileIsSymlinked(file) {
+ target, err := os.Readlink(file)
+ if err != nil {
+ return ""
+ }
+ if !filepath.IsAbs(target) {
+ dir := filepath.Dir(file)
+ target = filepath.Join(dir, target)
+ }
+ return target
+ }
+ return ""
+}
+
+func fileIsReadable(filePath string) error {
+ // First check if file exists and is accessible
+ info, err := os.Stat(filePath)
+ if err != nil {
+ return err
+ }
+
+ // Check if it's a regular file
+ if !info.Mode().IsRegular() {
+ return fmt.Errorf("not a regular file")
+ }
+
+ // Try to read the first 1024 bytes
+ err = checkFileStart(filePath)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func checkFileStart(filePath string) error {
+ f, err := os.Open(filePath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ // Read first 1kb
+ buffer := make([]byte, 1024)
+ _, err = f.Read(buffer)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go
index 1e70b4d..f2c5eb4 100644
--- a/pkg/repair/repair.go
+++ b/pkg/repair/repair.go
@@ -2,71 +2,351 @@ package repair
import (
"context"
- "github.com/sirrobot01/debrid-blackhole/common"
+ "fmt"
+ "github.com/google/uuid"
+ "github.com/rs/zerolog"
+ "github.com/sirrobot01/debrid-blackhole/internal/config"
+ "github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
+ "github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"log"
+ "net/http"
+ "net/url"
"os"
"os/signal"
+ "path/filepath"
+ "runtime"
"strings"
+ "sync"
"syscall"
"time"
)
-func Start(ctx context.Context, config *common.Config, arrs *arr.Storage) error {
- ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
- logger := common.NewLogger("Repair", config.LogLevel, os.Stdout)
- defer stop()
+type Repair struct {
+ Jobs []Job `json:"jobs"`
+ deb debrid.Service
+ arrs *arr.Storage
+ duration time.Duration
+ runOnStart bool
+ ZurgURL string
+ IsZurg bool
+ logger zerolog.Logger
+}
- duration, err := parseSchedule(config.Repair.Interval)
+func NewRepair(deb debrid.Service, arrs *arr.Storage) *Repair {
+ cfg := config.GetConfig()
+ duration, err := parseSchedule(cfg.Repair.Interval)
if err != nil {
- log.Fatalf("Failed to parse schedule: %v", err)
+ duration = time.Hour * 24
+ }
+ r := &Repair{
+ deb: deb,
+ logger: logger.NewLogger("Repair", cfg.LogLevel, os.Stdout),
+ arrs: arrs,
+ duration: duration,
+ runOnStart: cfg.Repair.RunOnStart,
+ ZurgURL: cfg.Repair.ZurgURL,
+ }
+ if r.ZurgURL != "" {
+ r.IsZurg = true
+ }
+ return r
+}
+
+type Job struct {
+ ID string `json:"id"`
+ Arrs []*arr.Arr `json:"arrs"`
+ MediaIDs []string `json:"media_ids"`
+ StartedAt time.Time `json:"created_at"`
+ CompletedAt time.Time `json:"finished_at"`
+ FailedAt time.Time `json:"failed_at"`
+
+ Error string `json:"error"`
+}
+
+func (r *Repair) NewJob(arrs []*arr.Arr, mediaIDs []string) *Job {
+ return &Job{
+ ID: uuid.New().String(),
+ Arrs: arrs,
+ MediaIDs: mediaIDs,
+ StartedAt: time.Now(),
+ }
+}
+
+func (r *Repair) PreRunChecks() error {
+ // Check if zurg url is reachable
+ if !r.IsZurg {
+ return nil
+ }
+ resp, err := http.Get(fmt.Sprint(r.ZurgURL, "/http/version.txt"))
+ if err != nil {
+ r.logger.Debug().Err(err).Msgf("Precheck failed: Failed to reach zurg at %s", r.ZurgURL)
+ return err
+ }
+ if resp.StatusCode != http.StatusOK {
+ r.logger.Debug().Msgf("Precheck failed: Zurg returned %d", resp.StatusCode)
+ return err
+ }
+ return nil
+}
+
+func (r *Repair) Repair(arrs []*arr.Arr, mediaIds []string) error {
+
+ j := r.NewJob(arrs, mediaIds)
+
+ if err := r.PreRunChecks(); err != nil {
+ return err
+ }
+ var wg sync.WaitGroup
+ errors := make(chan error)
+ for _, a := range j.Arrs {
+ wg.Add(1)
+ go func(a *arr.Arr) {
+ defer wg.Done()
+ if len(j.MediaIDs) == 0 {
+ if err := r.RepairArr(a, ""); err != nil {
+ log.Printf("Error repairing %s: %v", a.Name, err)
+ errors <- err
+ }
+ } else {
+ for _, id := range j.MediaIDs {
+ if err := r.RepairArr(a, id); err != nil {
+ log.Printf("Error repairing %s: %v", a.Name, err)
+ errors <- err
+ }
+ }
+ }
+ }(a)
+ }
+ wg.Wait()
+ close(errors)
+ err := <-errors
+ if err != nil {
+ j.FailedAt = time.Now()
+ j.Error = err.Error()
+ return err
+ }
+ j.CompletedAt = time.Now()
+ return nil
+}
+
+func (r *Repair) Start(ctx context.Context) error {
+ ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
+ defer stop()
+ cfg := config.GetConfig()
+
+ if r.runOnStart {
+ r.logger.Info().Msgf("Running initial repair")
+ go func() {
+ if err := r.Repair(r.arrs.GetAll(), []string{}); err != nil {
+ r.logger.Info().Msgf("Error during initial repair: %v", err)
+ }
+ }()
}
- if config.Repair.RunOnStart {
- logger.Info().Msgf("Running initial repair")
- if err := repair(arrs); err != nil {
- log.Printf("Error during initial repair: %v", err)
- return err
- }
- }
-
- ticker := time.NewTicker(duration)
+ ticker := time.NewTicker(r.duration)
defer ticker.Stop()
- if strings.Contains(config.Repair.Interval, ":") {
- logger.Info().Msgf("Starting repair worker, scheduled daily at %s", config.Repair.Interval)
- } else {
- logger.Info().Msgf("Starting repair worker with %v interval", duration)
- }
+ r.logger.Info().Msgf("Starting repair worker with %v interval", r.duration)
for {
select {
case <-ctx.Done():
- logger.Info().Msg("Repair worker stopped")
+ r.logger.Info().Msg("Repair worker stopped")
return nil
case t := <-ticker.C:
- logger.Info().Msgf("Running repair at %v", t.Format("15:04:05"))
- if err := repair(arrs); err != nil {
- logger.Info().Msgf("Error during repair: %v", err)
- return err
+ r.logger.Info().Msgf("Running repair at %v", t.Format("15:04:05"))
+ if err := r.Repair(r.arrs.GetAll(), []string{}); err != nil {
+ r.logger.Info().Msgf("Error during repair: %v", err)
+ continue
}
// If using time-of-day schedule, reset the ticker for next day
- if strings.Contains(config.Repair.Interval, ":") {
- nextDuration, err := parseSchedule(config.Repair.Interval)
- if err != nil {
- logger.Info().Msgf("Error calculating next schedule: %v", err)
- return err
- }
- ticker.Reset(nextDuration)
+ if strings.Contains(cfg.Repair.Interval, ":") {
+ ticker.Reset(r.duration)
}
+
+ r.logger.Info().Msgf("Next scheduled repair at %v", t.Add(r.duration).Format("15:04:05"))
}
}
}
-func repair(arrs *arr.Storage) error {
- for _, a := range arrs.GetAll() {
- go a.Repair("")
+func (r *Repair) RepairArr(a *arr.Arr, tmdbId string) error {
+
+ cfg := config.GetConfig()
+
+ r.logger.Info().Msgf("Starting repair for %s", a.Name)
+ media, err := a.GetMedia(tmdbId)
+ if err != nil {
+ r.logger.Info().Msgf("Failed to get %s media: %v", a.Type, err)
+ return err
}
+ r.logger.Info().Msgf("Found %d %s media", len(media), a.Type)
+
+ if len(media) == 0 {
+ r.logger.Info().Msgf("No %s media found", a.Type)
+ return nil
+ }
+ // Check first media to confirm mounts are accessible
+ if !r.isMediaAccessible(media[0]) {
+ r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts")
+ return nil
+ }
+
+ semaphore := make(chan struct{}, runtime.NumCPU()*4)
+ totalBrokenItems := 0
+ var wg sync.WaitGroup
+ for _, m := range media {
+ wg.Add(1)
+ semaphore <- struct{}{}
+ go func(m arr.Content) {
+ defer wg.Done()
+ defer func() { <-semaphore }()
+ brokenItems := r.getBrokenFiles(m)
+ if brokenItems != nil {
+ r.logger.Debug().Msgf("Found %d broken files for %s", len(brokenItems), m.Title)
+ if !cfg.Repair.SkipDeletion {
+ if err := a.DeleteFiles(brokenItems); err != nil {
+ r.logger.Info().Msgf("Failed to delete broken items for %s: %v", m.Title, err)
+ }
+ }
+ if err := a.SearchMissing(brokenItems); err != nil {
+ r.logger.Info().Msgf("Failed to search missing items for %s: %v", m.Title, err)
+ }
+ totalBrokenItems += len(brokenItems)
+ }
+ }(m)
+ }
+ wg.Wait()
+ r.logger.Info().Msgf("Repair completed for %s. %d broken items found", a.Name, totalBrokenItems)
return nil
}
+
+func (r *Repair) isMediaAccessible(m arr.Content) bool {
+ files := m.Files
+ if len(files) == 0 {
+ return false
+ }
+ firstFile := files[0]
+ r.logger.Debug().Msgf("Checking parent directory for %s", firstFile.Path)
+ if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) {
+ return false
+ }
+ // Check symlink parent directory
+ symlinkPath := getSymlinkTarget(firstFile.Path)
+
+ r.logger.Debug().Msgf("Checking symlink parent directory for %s", symlinkPath)
+
+ if symlinkPath != "" {
+ parentSymlink := filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents
+ if _, err := os.Stat(parentSymlink); os.IsNotExist(err) {
+ return false
+ }
+ }
+ return true
+}
+
+func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile {
+
+ if r.IsZurg {
+ return r.getZurgBrokenFiles(media)
+ } else {
+ return r.getFileBrokenFiles(media)
+ }
+}
+
+func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile {
+ // This checks symlink target, try to get read a tiny bit of the file
+
+ brokenFiles := make([]arr.ContentFile, 0)
+
+ uniqueParents := make(map[string][]arr.ContentFile)
+ files := media.Files
+ for _, file := range files {
+ target := getSymlinkTarget(file.Path)
+ if target != "" {
+ file.IsSymlink = true
+ dir, _ := filepath.Split(target)
+ parent := filepath.Base(filepath.Clean(dir))
+ uniqueParents[parent] = append(uniqueParents[parent], file)
+ }
+ }
+
+ for parent, f := range uniqueParents {
+ // Check stat
+ // Check file stat first
+ firstFile := f[0]
+ // Read a tiny bit of the file
+ if err := fileIsReadable(firstFile.Path); err != nil {
+ r.logger.Debug().Msgf("Broken file found at: %s", parent)
+ brokenFiles = append(brokenFiles, f...)
+ continue
+ }
+ }
+ if len(brokenFiles) == 0 {
+ r.logger.Debug().Msgf("No broken files found for %s", media.Title)
+ return nil
+ }
+ r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
+ return brokenFiles
+}
+
+func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
+ // Use zurg setup to check file availability with zurg
+ // This reduces bandwidth usage significantly
+
+ brokenFiles := make([]arr.ContentFile, 0)
+ uniqueParents := make(map[string][]arr.ContentFile)
+ files := media.Files
+ for _, file := range files {
+ target := getSymlinkTarget(file.Path)
+ if target != "" {
+ file.IsSymlink = true
+ dir, f := filepath.Split(target)
+ parent := filepath.Base(filepath.Clean(dir))
+ // Set target path folder/file.mkv
+ file.TargetPath = f
+ uniqueParents[parent] = append(uniqueParents[parent], file)
+ }
+ }
+ // Access zurg url + symlink folder + first file(encoded)
+ for parent, f := range uniqueParents {
+ r.logger.Debug().Msgf("Checking %s", parent)
+ encodedParent := url.PathEscape(parent)
+ encodedFile := url.PathEscape(f[0].TargetPath)
+ fullURL := fmt.Sprintf("%s/http/__all__/%s/%s", r.ZurgURL, encodedParent, encodedFile)
+ // Check file stat first
+ if _, err := os.Stat(f[0].Path); os.IsNotExist(err) {
+ r.logger.Debug().Msgf("Broken symlink found: %s", fullURL)
+ brokenFiles = append(brokenFiles, f...)
+ continue
+ }
+
+ resp, err := http.Get(fullURL)
+ if err != nil {
+ r.logger.Debug().Err(err).Msgf("Failed to reach %s", fullURL)
+ brokenFiles = append(brokenFiles, f...)
+ continue
+ }
+ resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
+ brokenFiles = append(brokenFiles, f...)
+ continue
+ }
+ downloadUrl := resp.Request.URL.String()
+ if downloadUrl != "" {
+ r.logger.Debug().Msgf("Found download url: %s", downloadUrl)
+ } else {
+ r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
+ brokenFiles = append(brokenFiles, f...)
+ continue
+ }
+ }
+ if len(brokenFiles) == 0 {
+ r.logger.Debug().Msgf("No broken files found for %s", media.Title)
+ return nil
+ }
+ r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
+ return brokenFiles
+}