- Refractor code

- Add a better logging for 429 when streaming
- Fix minor issues
This commit is contained in:
Mukhtar Akere
2025-04-01 06:37:10 +01:00
parent 8bf164451c
commit 7d954052ae
28 changed files with 214 additions and 179 deletions

View File

@@ -25,13 +25,12 @@ archives:
- format: tar.gz - format: tar.gz
# this name template makes the OS and Arch compatible with the results of `uname`. # this name template makes the OS and Arch compatible with the results of `uname`.
name_template: >- name_template: >-
{{ .ProjectName }}_ decypharr_
{{- title .Os }}_ {{- title .Os }}_
{{- if eq .Arch "amd64" }}x86_64 {{- if eq .Arch "amd64" }}x86_64
{{- else if eq .Arch "386" }}i386 {{- else if eq .Arch "386" }}i386
{{- else }}{{ .Arch }}{{ end }} {{- else }}{{ .Arch }}{{ end }}
{{- if .Arm }}v{{ .Arm }}{{ end }} {{- if .Arm }}v{{ .Arm }}{{ end }}
# use zip for windows archives
format_overrides: format_overrides:
- goos: windows - goos: windows
format: zip format: zip

View File

@@ -31,7 +31,7 @@ func Start(ctx context.Context) error {
syscall.Umask(int(umask)) syscall.Umask(int(umask))
} }
cfg := config.GetConfig() cfg := config.Get()
var wg sync.WaitGroup var wg sync.WaitGroup
errChan := make(chan error) errChan := make(chan error)

View File

@@ -15,6 +15,4 @@ services:
- UMASK=002 - UMASK=002
- QBIT_PORT=8282 - QBIT_PORT=8282
- PORT=8181 - PORT=8181
restart: unless-stopped restart: unless-stopped
depends_on:
- rclone # If you are using rclone with docker

View File

@@ -100,7 +100,7 @@
"download_links_refresh_interval": "1h", "download_links_refresh_interval": "1h",
"folder_naming": "original", "folder_naming": "original",
"auto_expire_links_after": "24h", "auto_expire_links_after": "24h",
"rc_url": "http://192.168.0.219:9990", "rc_url": "http://your-ip-address:9990",
"rc_user": "your_rclone_rc_user", "rc_user": "your_rclone_rc_user",
"rc_pass": "your_rclone_rc_pass" "rc_pass": "your_rclone_rc_pass"
} }

View File

@@ -217,7 +217,7 @@ func SetConfigPath(path string) error {
return nil return nil
} }
func GetConfig() *Config { func Get() *Config {
once.Do(func() { once.Do(func() {
instance = &Config{} // Initialize instance first instance = &Config{} // Initialize instance first
if err := instance.loadConfig(); err != nil { if err := instance.loadConfig(); err != nil {

View File

@@ -17,7 +17,7 @@ var (
) )
func GetLogPath() string { func GetLogPath() string {
cfg := config.GetConfig() cfg := config.Get()
logsDir := filepath.Join(cfg.Path, "logs") logsDir := filepath.Join(cfg.Path, "logs")
if _, err := os.Stat(logsDir); os.IsNotExist(err) { if _, err := os.Stat(logsDir); os.IsNotExist(err) {
@@ -29,9 +29,9 @@ func GetLogPath() string {
return filepath.Join(logsDir, "decypharr.log") return filepath.Join(logsDir, "decypharr.log")
} }
func NewLogger(prefix string) zerolog.Logger { func New(prefix string) zerolog.Logger {
level := config.GetConfig().LogLevel level := config.Get().LogLevel
rotatingLogFile := &lumberjack.Logger{ rotatingLogFile := &lumberjack.Logger{
Filename: GetLogPath(), Filename: GetLogPath(),
@@ -91,7 +91,7 @@ func NewLogger(prefix string) zerolog.Logger {
func GetDefaultLogger() zerolog.Logger { func GetDefaultLogger() zerolog.Logger {
once.Do(func() { once.Do(func() {
logger = NewLogger("decypharr") logger = New("decypharr")
}) })
return logger return logger
} }

View File

@@ -56,7 +56,7 @@ func getDiscordHeader(event string) string {
} }
func SendDiscordMessage(event string, status string, message string) error { func SendDiscordMessage(event string, status string, message string) error {
cfg := config.GetConfig() cfg := config.Get()
webhookURL := cfg.DiscordWebhook webhookURL := cfg.DiscordWebhook
if webhookURL == "" { if webhookURL == "" {
return nil return nil

View File

@@ -60,9 +60,10 @@ type Client struct {
} }
// WithMaxRetries sets the maximum number of retry attempts // WithMaxRetries sets the maximum number of retry attempts
func (c *Client) WithMaxRetries(retries int) *Client { func WithMaxRetries(maxRetries int) ClientOption {
c.maxRetries = retries return func(c *Client) {
return c c.maxRetries = maxRetries
}
} }
// WithTimeout sets the request timeout // WithTimeout sets the request timeout
@@ -79,24 +80,27 @@ func WithRedirectPolicy(policy func(req *http.Request, via []*http.Request) erro
} }
// WithRateLimiter sets a rate limiter // WithRateLimiter sets a rate limiter
func (c *Client) WithRateLimiter(rl *rate.Limiter) *Client { func WithRateLimiter(rl *rate.Limiter) ClientOption {
c.rateLimiter = rl return func(c *Client) {
return c c.rateLimiter = rl
}
} }
// WithHeaders sets default headers // WithHeaders sets default headers
func (c *Client) WithHeaders(headers map[string]string) *Client { func WithHeaders(headers map[string]string) ClientOption {
c.headers = headers return func(c *Client) {
return c c.headers = headers
}
} }
func (c *Client) SetHeader(key, value string) { func (c *Client) SetHeader(key, value string) {
c.headers[key] = value c.headers[key] = value
} }
func (c *Client) WithLogger(logger zerolog.Logger) *Client { func WithLogger(logger zerolog.Logger) ClientOption {
c.logger = logger return func(c *Client) {
return c c.logger = logger
}
} }
func WithTransport(transport *http.Transport) ClientOption { func WithTransport(transport *http.Transport) ClientOption {
@@ -106,11 +110,12 @@ func WithTransport(transport *http.Transport) ClientOption {
} }
// WithRetryableStatus adds status codes that should trigger a retry // WithRetryableStatus adds status codes that should trigger a retry
func (c *Client) WithRetryableStatus(statusCodes ...int) *Client { func WithRetryableStatus(statusCodes ...int) ClientOption {
for _, code := range statusCodes { return func(c *Client) {
c.retryableStatus[code] = true for _, code := range statusCodes {
c.retryableStatus[code] = true
}
} }
return c
} }
// doRequest performs a single HTTP request with rate limiting // doRequest performs a single HTTP request with rate limiting
@@ -249,7 +254,7 @@ func New(options ...ClientOption) *Client {
http.StatusServiceUnavailable: true, http.StatusServiceUnavailable: true,
http.StatusGatewayTimeout: true, http.StatusGatewayTimeout: true,
}, },
logger: logger.NewLogger("request"), logger: logger.New("request"),
timeout: 60 * time.Second, timeout: 60 * time.Second,
} }

14
main.go
View File

@@ -5,10 +5,7 @@ import (
"flag" "flag"
"github.com/sirrobot01/debrid-blackhole/cmd/decypharr" "github.com/sirrobot01/debrid-blackhole/cmd/decypharr"
"github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/pkg/version"
"log" "log"
"net/http"
_ "net/http/pprof" // registers pprof handlers
"os" "os"
"os/signal" "os/signal"
"runtime/debug" "runtime/debug"
@@ -22,15 +19,6 @@ func main() {
debug.PrintStack() debug.PrintStack()
} }
}() }()
if version.GetInfo().Channel == "dev" {
log.Println("Running in dev mode")
go func() {
if err := http.ListenAndServe(":6060", nil); err != nil {
log.Fatalf("pprof server failed: %v", err)
}
}()
}
var configPath string var configPath string
flag.StringVar(&configPath, "config", "/data", "path to the data folder") flag.StringVar(&configPath, "config", "/data", "path to the data folder")
flag.Parse() flag.Parse()
@@ -39,7 +27,7 @@ func main() {
log.Fatal(err) log.Fatal(err)
} }
config.GetConfig() config.Get()
// Create a context that's cancelled on SIGINT/SIGTERM // Create a context that's cancelled on SIGINT/SIGTERM
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)

View File

@@ -134,7 +134,7 @@ func InferType(host, name string) Type {
func NewStorage() *Storage { func NewStorage() *Storage {
arrs := make(map[string]*Arr) arrs := make(map[string]*Arr)
for _, a := range config.GetConfig().Arrs { for _, a := range config.Get().Arrs {
name := a.Name name := a.Name
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached) arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached)
} }

View File

@@ -9,14 +9,14 @@ import (
"github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types"
"slices"
"strings"
"time"
"net/http" "net/http"
gourl "net/url" gourl "net/url"
"path/filepath" "path/filepath"
"slices"
"strconv" "strconv"
"strings"
"sync"
"time"
) )
type AllDebrid struct { type AllDebrid struct {
@@ -32,6 +32,36 @@ type AllDebrid struct {
CheckCached bool CheckCached bool
} }
func New(dc config.Debrid) *AllDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
apiKeys := strings.Split(dc.APIKey, ",")
extraKeys := make([]string, 0)
if len(apiKeys) > 1 {
extraKeys = apiKeys[1:]
}
mainKey := apiKeys[0]
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", mainKey),
}
_log := logger.New(dc.Name)
client := request.New(
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithRateLimiter(rl),
)
return &AllDebrid{
Name: "alldebrid",
Host: dc.Host,
APIKey: mainKey,
ExtraAPIKeys: extraKeys,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
CheckCached: dc.CheckCached,
}
}
func (ad *AllDebrid) GetName() string { func (ad *AllDebrid) GetName() string {
return ad.Name return ad.Name
} }
@@ -89,7 +119,7 @@ func getAlldebridStatus(statusCode int) string {
func flattenFiles(files []MagnetFile, parentPath string, index *int) map[string]types.File { func flattenFiles(files []MagnetFile, parentPath string, index *int) map[string]types.File {
result := make(map[string]types.File) result := make(map[string]types.File)
cfg := config.GetConfig() cfg := config.Get()
for _, f := range files { for _, f := range files {
currentPath := f.Name currentPath := f.Name
@@ -218,26 +248,46 @@ func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
} }
func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error { func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error {
for _, file := range t.Files { filesCh := make(chan types.File, len(t.Files))
url := fmt.Sprintf("%s/link/unlock", ad.Host) errCh := make(chan error, len(t.Files))
query := gourl.Values{}
query.Add("link", file.Link)
url += "?" + query.Encode()
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := ad.client.MakeRequest(req)
if err != nil {
return err
}
var data DownloadLink
if err = json.Unmarshal(resp, &data); err != nil {
return err
}
link := data.Data.Link
file.DownloadLink = link
file.Generated = time.Now()
t.Files[file.Name] = file
var wg sync.WaitGroup
wg.Add(len(t.Files))
for _, file := range t.Files {
go func(file types.File) {
defer wg.Done()
link, err := ad.GetDownloadLink(t, &file)
if err != nil {
errCh <- err
return
}
file.DownloadLink = link
file.Generated = time.Now()
if link == "" {
errCh <- fmt.Errorf("error getting download links %w", err)
return
}
filesCh <- file
}(file)
} }
go func() {
wg.Wait()
close(filesCh)
close(errCh)
}()
files := make(map[string]types.File, len(t.Files))
for file := range filesCh {
files[file.Name] = file
}
// Check for errors
for err := range errCh {
if err != nil {
return err // Return the first error encountered
}
}
t.Files = files
return nil return nil
} }
@@ -317,31 +367,3 @@ func (ad *AllDebrid) CheckLink(link string) error {
func (ad *AllDebrid) GetMountPath() string { func (ad *AllDebrid) GetMountPath() string {
return ad.MountPath return ad.MountPath
} }
func New(dc config.Debrid) *AllDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
apiKeys := strings.Split(dc.APIKey, ",")
extraKeys := make([]string, 0)
if len(apiKeys) > 1 {
extraKeys = apiKeys[1:]
}
mainKey := apiKeys[0]
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", mainKey),
}
_log := logger.NewLogger(dc.Name)
client := request.New().
WithHeaders(headers).
WithRateLimiter(rl).WithLogger(_log)
return &AllDebrid{
Name: "alldebrid",
Host: dc.Host,
APIKey: mainKey,
ExtraAPIKeys: extraKeys,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: logger.NewLogger(dc.Name),
CheckCached: dc.CheckCached,
}
}

View File

@@ -95,7 +95,7 @@ type Cache struct {
} }
func New(dc config.Debrid, client types.Client) *Cache { func New(dc config.Debrid, client types.Client) *Cache {
cfg := config.GetConfig() cfg := config.Get()
torrentRefreshInterval, err := time.ParseDuration(dc.TorrentsRefreshInterval) torrentRefreshInterval, err := time.ParseDuration(dc.TorrentsRefreshInterval)
if err != nil { if err != nil {
torrentRefreshInterval = time.Second * 15 torrentRefreshInterval = time.Second * 15
@@ -117,7 +117,7 @@ func New(dc config.Debrid, client types.Client) *Cache {
torrents: xsync.NewMapOf[string, *CachedTorrent](), torrents: xsync.NewMapOf[string, *CachedTorrent](),
torrentsNames: xsync.NewMapOf[string, *CachedTorrent](), torrentsNames: xsync.NewMapOf[string, *CachedTorrent](),
client: client, client: client,
logger: logger.NewLogger(fmt.Sprintf("%s-webdav", client.GetName())), logger: logger.New(fmt.Sprintf("%s-webdav", client.GetName())),
workers: workers, workers: workers,
downloadLinks: xsync.NewMapOf[string, downloadLinkCache](), downloadLinks: xsync.NewMapOf[string, downloadLinkCache](),
torrentRefreshInterval: torrentRefreshInterval, torrentRefreshInterval: torrentRefreshInterval,
@@ -219,11 +219,12 @@ func (c *Cache) load() (map[string]*CachedTorrent, error) {
now := time.Now() now := time.Now()
for _, file := range files { for _, file := range files {
if file.IsDir() || filepath.Ext(file.Name()) != ".json" { fileName := file.Name()
if file.IsDir() || filepath.Ext(fileName) != ".json" {
continue continue
} }
filePath := filepath.Join(c.dir, file.Name()) filePath := filepath.Join(c.dir, fileName)
data, err := os.ReadFile(filePath) data, err := os.ReadFile(filePath)
if err != nil { if err != nil {
c.logger.Debug().Err(err).Msgf("Failed to read file: %s", filePath) c.logger.Debug().Err(err).Msgf("Failed to read file: %s", filePath)
@@ -301,7 +302,7 @@ func (c *Cache) SaveTorrent(ct *CachedTorrent) {
c.saveTorrent(ct) c.saveTorrent(ct)
}() }()
default: default:
c.saveTorrent(ct) go c.saveTorrent(ct) // If the semaphore is full, just run the save in the background
} }
} }

View File

@@ -12,7 +12,7 @@ type Engine struct {
} }
func NewEngine() *Engine { func NewEngine() *Engine {
cfg := config.GetConfig() cfg := config.Get()
clients := make(map[string]types.Client) clients := make(map[string]types.Client)
caches := make(map[string]*Cache) caches := make(map[string]*Cache)

View File

@@ -61,8 +61,7 @@ func (c *Cache) refreshListings() {
} }
// Atomic store of the complete ready-to-use slice // Atomic store of the complete ready-to-use slice
c.listings.Store(files) c.listings.Store(files)
//c.resetPropfindResponse() _ = c.refreshXml()
_ = c.RefreshXml()
if err := c.RefreshRclone(); err != nil { if err := c.RefreshRclone(); err != nil {
c.logger.Debug().Err(err).Msg("Failed to refresh rclone") c.logger.Debug().Err(err).Msg("Failed to refresh rclone")
} }
@@ -187,7 +186,7 @@ func (c *Cache) refreshTorrents() {
func (c *Cache) RefreshRclone() error { func (c *Cache) RefreshRclone() error {
client := request.Default() client := request.Default()
cfg := config.GetConfig().WebDav cfg := config.Get().WebDav
if cfg.RcUrl == "" { if cfg.RcUrl == "" {
return nil return nil

View File

@@ -130,7 +130,6 @@ func (c *Cache) ReInsertTorrent(torrent *types.Torrent) error {
} }
oldID := torrent.Id oldID := torrent.Id
defer c.repairsInProgress.Delete(oldID) defer c.repairsInProgress.Delete(oldID)
// Submit the magnet to the debrid service // Submit the magnet to the debrid service
@@ -138,6 +137,8 @@ func (c *Cache) ReInsertTorrent(torrent *types.Torrent) error {
var err error var err error
torrent, err = c.client.SubmitMagnet(torrent) torrent, err = c.client.SubmitMagnet(torrent)
if err != nil { if err != nil {
// Remove the old torrent from the cache and debrid service
_ = c.DeleteTorrent(oldID)
return fmt.Errorf("failed to submit magnet: %w", err) return fmt.Errorf("failed to submit magnet: %w", err)
} }

View File

@@ -10,7 +10,7 @@ import (
"time" "time"
) )
func (c *Cache) RefreshXml() error { func (c *Cache) refreshXml() error {
parents := []string{"__all__", "torrents"} parents := []string{"__all__", "torrents"}
torrents := c.GetListing() torrents := c.GetListing()
for _, parent := range parents { for _, parent := range parents {

View File

@@ -129,7 +129,7 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
t.Seeders = data.PeersConnected t.Seeders = data.PeersConnected
t.Filename = name t.Filename = name
t.OriginalFilename = name t.OriginalFilename = name
cfg := config.GetConfig() cfg := config.Get()
for _, f := range data.Files { for _, f := range data.Files {
if !cfg.IsSizeAllowed(f.Size) { if !cfg.IsSizeAllowed(f.Size) {
continue continue
@@ -235,6 +235,7 @@ func (dl *DebridLink) DeleteTorrent(torrentId string) error {
} }
func (dl *DebridLink) GenerateDownloadLinks(t *types.Torrent) error { func (dl *DebridLink) GenerateDownloadLinks(t *types.Torrent) error {
// Download links are already generated
return nil return nil
} }
@@ -270,10 +271,12 @@ func New(dc config.Debrid) *DebridLink {
"Authorization": fmt.Sprintf("Bearer %s", mainKey), "Authorization": fmt.Sprintf("Bearer %s", mainKey),
"Content-Type": "application/json", "Content-Type": "application/json",
} }
_log := logger.NewLogger(dc.Name) _log := logger.New(dc.Name)
client := request.New(). client := request.New(
WithHeaders(headers). request.WithHeaders(headers),
WithRateLimiter(rl).WithLogger(_log) request.WithLogger(_log),
request.WithRateLimiter(rl),
)
return &DebridLink{ return &DebridLink{
Name: "debridlink", Name: "debridlink",
Host: dc.Host, Host: dc.Host,
@@ -282,7 +285,7 @@ func New(dc config.Debrid) *DebridLink {
DownloadUncached: dc.DownloadUncached, DownloadUncached: dc.DownloadUncached,
client: client, client: client,
MountPath: dc.Folder, MountPath: dc.Folder,
logger: logger.NewLogger(dc.Name), logger: logger.New(dc.Name),
CheckCached: dc.CheckCached, CheckCached: dc.CheckCached,
} }
} }
@@ -341,7 +344,7 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
Debrid: dl.Name, Debrid: dl.Name,
MountPath: dl.MountPath, MountPath: dl.MountPath,
} }
cfg := config.GetConfig() cfg := config.Get()
for _, f := range t.Files { for _, f := range t.Files {
if !cfg.IsSizeAllowed(f.Size) { if !cfg.IsSizeAllowed(f.Size) {
continue continue

View File

@@ -48,7 +48,7 @@ func (r *RealDebrid) GetLogger() zerolog.Logger {
// if validate is false, selected files will be returned // if validate is false, selected files will be returned
func getTorrentFiles(t *types.Torrent, data TorrentInfo, validate bool) map[string]types.File { func getTorrentFiles(t *types.Torrent, data TorrentInfo, validate bool) map[string]types.File {
files := make(map[string]types.File) files := make(map[string]types.File)
cfg := config.GetConfig() cfg := config.Get()
idx := 0 idx := 0
for _, f := range data.Files { for _, f := range data.Files {
name := filepath.Base(f.Path) name := filepath.Base(f.Path)
@@ -267,9 +267,8 @@ func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error {
errCh := make(chan error, len(t.Files)) errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(len(t.Files))
for _, f := range t.Files { for _, f := range t.Files {
wg.Add(1)
go func(file types.File) { go func(file types.File) {
defer wg.Done() defer wg.Done()
@@ -446,7 +445,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent,
} }
func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) { func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
limit := 1000 limit := 5000
// Get first batch and total count // Get first batch and total count
totalItems, firstBatch, err := r.getTorrents(0, limit) totalItems, firstBatch, err := r.getTorrents(0, limit)
@@ -561,12 +560,14 @@ func New(dc config.Debrid) *RealDebrid {
headers := map[string]string{ headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
} }
_log := logger.NewLogger(dc.Name) _log := logger.New(dc.Name)
client := request.New(). client := request.New(
WithHeaders(headers). request.WithHeaders(headers),
WithRateLimiter(rl).WithLogger(_log). request.WithRateLimiter(rl),
WithMaxRetries(5). request.WithLogger(_log),
WithRetryableStatus(429) request.WithMaxRetries(5),
request.WithRetryableStatus(429),
)
return &RealDebrid{ return &RealDebrid{
Name: "realdebrid", Name: "realdebrid",
Host: dc.Host, Host: dc.Host,
@@ -575,7 +576,7 @@ func New(dc config.Debrid) *RealDebrid {
DownloadUncached: dc.DownloadUncached, DownloadUncached: dc.DownloadUncached,
client: client, client: client,
MountPath: dc.Folder, MountPath: dc.Folder,
logger: logger.NewLogger(dc.Name), logger: logger.New(dc.Name),
CheckCached: dc.CheckCached, CheckCached: dc.CheckCached,
} }
} }

View File

@@ -10,8 +10,6 @@ import (
"github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types"
"time"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
gourl "net/url" gourl "net/url"
@@ -20,6 +18,7 @@ import (
"slices" "slices"
"strconv" "strconv"
"strings" "strings"
"sync"
) )
type Torbox struct { type Torbox struct {
@@ -46,10 +45,12 @@ func New(dc config.Debrid) *Torbox {
headers := map[string]string{ headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", mainKey), "Authorization": fmt.Sprintf("Bearer %s", mainKey),
} }
_log := logger.NewLogger(dc.Name) _log := logger.New(dc.Name)
client := request.New(). client := request.New(
WithHeaders(headers). request.WithHeaders(headers),
WithRateLimiter(rl).WithLogger(_log) request.WithRateLimiter(rl),
request.WithLogger(_log),
)
return &Torbox{ return &Torbox{
Name: "torbox", Name: "torbox",
@@ -196,7 +197,7 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
t.OriginalFilename = name t.OriginalFilename = name
t.MountPath = tb.MountPath t.MountPath = tb.MountPath
t.Debrid = tb.Name t.Debrid = tb.Name
cfg := config.GetConfig() cfg := config.Get()
for _, f := range data.Files { for _, f := range data.Files {
fileName := filepath.Base(f.Name) fileName := filepath.Base(f.Name)
if utils.IsSampleFile(f.AbsolutePath) { if utils.IsSampleFile(f.AbsolutePath) {
@@ -275,30 +276,43 @@ func (tb *Torbox) DeleteTorrent(torrentId string) error {
} }
func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error { func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files))
errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup
wg.Add(len(t.Files))
for _, file := range t.Files { for _, file := range t.Files {
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host) go func() {
query := gourl.Values{} defer wg.Done()
query.Add("torrent_id", t.Id) link, err := tb.GetDownloadLink(t, &file)
query.Add("token", tb.APIKey) if err != nil {
query.Add("file_id", file.Id) errCh <- err
url += "?" + query.Encode() return
req, _ := http.NewRequest(http.MethodGet, url, nil) }
resp, err := tb.client.MakeRequest(req) file.DownloadLink = link
if err != nil { filesCh <- file
return err }()
}
var data DownloadLinksResponse
if err = json.Unmarshal(resp, &data); err != nil {
return err
}
if data.Data == nil {
return fmt.Errorf("error getting download links")
}
link := *data.Data
file.DownloadLink = link
file.Generated = time.Now()
t.Files[file.Name] = file
} }
go func() {
wg.Wait()
close(filesCh)
close(errCh)
}()
// Collect results
files := make(map[string]types.File, len(t.Files))
for file := range filesCh {
files[file.Name] = file
}
// Check for errors
for err := range errCh {
if err != nil {
return err // Return the first error encountered
}
}
t.Files = files
return nil return nil
} }

View File

@@ -80,7 +80,7 @@ type Proxy struct {
} }
func NewProxy() *Proxy { func NewProxy() *Proxy {
cfg := config.GetConfig().Proxy cfg := config.Get().Proxy
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181") port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
return &Proxy{ return &Proxy{
port: port, port: port,
@@ -88,7 +88,7 @@ func NewProxy() *Proxy {
username: cfg.Username, username: cfg.Username,
password: cfg.Password, password: cfg.Password,
cachedOnly: cfg.CachedOnly, cachedOnly: cfg.CachedOnly,
logger: logger.NewLogger("proxy"), logger: logger.New("proxy"),
} }
} }

View File

@@ -23,7 +23,7 @@ type QBit struct {
} }
func New() *QBit { func New() *QBit {
_cfg := config.GetConfig() _cfg := config.Get()
cfg := _cfg.QBitTorrent cfg := _cfg.QBitTorrent
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8282") port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8282")
refreshInterval := cmp.Or(cfg.RefreshInterval, 10) refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
@@ -34,7 +34,7 @@ func New() *QBit {
DownloadFolder: cfg.DownloadFolder, DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories, Categories: cfg.Categories,
Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")), Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")),
logger: logger.NewLogger("qbit"), logger: logger.New("qbit"),
RefreshInterval: refreshInterval, RefreshInterval: refreshInterval,
SkipPreCache: cfg.SkipPreCache, SkipPreCache: cfg.SkipPreCache,
} }

View File

@@ -119,8 +119,9 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr
if ok { if ok {
q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid) q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
// Use webdav to download the file // Use webdav to download the file
err := cache.AddTorrent(debridTorrent) if err := cache.AddTorrent(debridTorrent); err != nil {
if err != nil { q.logger.Error().Msgf("Error adding torrent to cache: %v", err)
q.MarkAsFailed(torrent)
return return
} }
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent))

View File

@@ -41,7 +41,7 @@ type Repair struct {
} }
func New(arrs *arr.Storage, engine *debrid.Engine) *Repair { func New(arrs *arr.Storage, engine *debrid.Engine) *Repair {
cfg := config.GetConfig() cfg := config.Get()
duration, err := parseSchedule(cfg.Repair.Interval) duration, err := parseSchedule(cfg.Repair.Interval)
if err != nil { if err != nil {
duration = time.Hour * 24 duration = time.Hour * 24
@@ -52,7 +52,7 @@ func New(arrs *arr.Storage, engine *debrid.Engine) *Repair {
} }
r := &Repair{ r := &Repair{
arrs: arrs, arrs: arrs,
logger: logger.NewLogger("repair"), logger: logger.New("repair"),
duration: duration, duration: duration,
runOnStart: cfg.Repair.RunOnStart, runOnStart: cfg.Repair.RunOnStart,
ZurgURL: cfg.Repair.ZurgURL, ZurgURL: cfg.Repair.ZurgURL,
@@ -73,7 +73,7 @@ func New(arrs *arr.Storage, engine *debrid.Engine) *Repair {
} }
func (r *Repair) Start(ctx context.Context) error { func (r *Repair) Start(ctx context.Context) error {
cfg := config.GetConfig() cfg := config.Get()
r.ctx = ctx r.ctx = ctx
if r.runOnStart { if r.runOnStart {
r.logger.Info().Msgf("Running initial repair") r.logger.Info().Msgf("Running initial repair")

View File

@@ -24,7 +24,7 @@ type Server struct {
} }
func New() *Server { func New() *Server {
l := logger.NewLogger("http") l := logger.New("http")
r := chi.NewRouter() r := chi.NewRouter()
r.Use(middleware.Recoverer) r.Use(middleware.Recoverer)
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static")))) r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
@@ -36,7 +36,7 @@ func New() *Server {
} }
func (s *Server) Start(ctx context.Context) error { func (s *Server) Start(ctx context.Context) error {
cfg := config.GetConfig() cfg := config.Get()
// Register routes // Register routes
// Register webhooks // Register webhooks
s.router.Post("/webhooks/tautulli", s.handleTautulli) s.router.Post("/webhooks/tautulli", s.handleTautulli)

View File

@@ -62,7 +62,7 @@ type Handler struct {
func New(qbit *qbit.QBit) *Handler { func New(qbit *qbit.QBit) *Handler {
return &Handler{ return &Handler{
qbit: qbit, qbit: qbit,
logger: logger.NewLogger("ui"), logger: logger.New("ui"),
} }
} }
@@ -93,7 +93,7 @@ func init() {
func (ui *Handler) authMiddleware(next http.Handler) http.Handler { func (ui *Handler) authMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Check if setup is needed // Check if setup is needed
cfg := config.GetConfig() cfg := config.Get()
if cfg.NeedsSetup() && r.URL.Path != "/setup" { if cfg.NeedsSetup() && r.URL.Path != "/setup" {
http.Redirect(w, r, "/setup", http.StatusSeeOther) http.Redirect(w, r, "/setup", http.StatusSeeOther)
return return
@@ -127,7 +127,7 @@ func (ui *Handler) verifyAuth(username, password string) bool {
if username == "" { if username == "" {
return false return false
} }
auth := config.GetConfig().GetAuth() auth := config.Get().GetAuth()
if auth == nil { if auth == nil {
return false return false
} }
@@ -187,7 +187,7 @@ func (ui *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) {
} }
func (ui *Handler) SetupHandler(w http.ResponseWriter, r *http.Request) { func (ui *Handler) SetupHandler(w http.ResponseWriter, r *http.Request) {
cfg := config.GetConfig() cfg := config.Get()
authCfg := cfg.GetAuth() authCfg := cfg.GetAuth()
if !cfg.NeedsSetup() { if !cfg.NeedsSetup() {
@@ -436,7 +436,7 @@ func (ui *Handler) handleDeleteTorrents(w http.ResponseWriter, r *http.Request)
} }
func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) { func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
cfg := config.GetConfig() cfg := config.Get()
arrCfgs := make([]config.Arr, 0) arrCfgs := make([]config.Arr, 0)
svc := service.GetService() svc := service.GetService()
for _, a := range svc.Arr.GetAll() { for _, a := range svc.Arr.GetAll() {

View File

@@ -118,8 +118,15 @@ func (f *File) Read(p []byte) (n int, err error) {
if err != nil { if err != nil {
return 0, fmt.Errorf("HTTP request error: %w", err) return 0, fmt.Errorf("HTTP request error: %w", err)
} }
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
resp.Body.Close() resp.Body.Close()
_log := f.cache.GetLogger()
_log.Debug().
Str("downloadLink", downloadLink).
Str("link", f.link).
Str("torrentId", f.torrentId).
Msgf("Unexpected HTTP status: %d", resp.StatusCode)
return 0, fmt.Errorf("unexpected HTTP status: %d", resp.StatusCode) return 0, fmt.Errorf("unexpected HTTP status: %d", resp.StatusCode)
} }
f.reader = resp.Body f.reader = resp.Body

View File

@@ -56,19 +56,15 @@ func (h *Handler) RemoveAll(ctx context.Context, name string) error {
return os.ErrPermission return os.ErrPermission
} }
torrentName, filename := getName(rootDir, name) torrentName, _ := getName(rootDir, name)
cachedTorrent := h.cache.GetTorrentByName(torrentName) cachedTorrent := h.cache.GetTorrentByName(torrentName)
if cachedTorrent == nil { if cachedTorrent == nil {
h.logger.Debug().Msgf("Torrent not found: %s", torrentName) h.logger.Debug().Msgf("Torrent not found: %s", torrentName)
return os.ErrNotExist return nil // It's possible that the torrent was removed
} }
if filename == "" { h.cache.OnRemove(cachedTorrent.Id)
h.cache.OnRemove(cachedTorrent.Id) return nil
return nil
}
return os.ErrPermission
} }
// Rename implements webdav.FileSystem // Rename implements webdav.FileSystem

View File

@@ -18,13 +18,13 @@ var (
func getLogger() zerolog.Logger { func getLogger() zerolog.Logger {
once.Do(func() { once.Do(func() {
_logInstance = logger.NewLogger("worker") _logInstance = logger.New("worker")
}) })
return _logInstance return _logInstance
} }
func Start(ctx context.Context) error { func Start(ctx context.Context) error {
cfg := config.GetConfig() cfg := config.Get()
// Start Arr Refresh Worker // Start Arr Refresh Worker
var wg sync.WaitGroup var wg sync.WaitGroup