- Add support for multiple api keys
- Fix minor bugs, removes goroutine mem leaks
This commit is contained in:
Mukhtar Akere
2025-03-28 23:44:21 +01:00
parent f9bc7ad914
commit dc2301eb98
24 changed files with 419 additions and 369 deletions
+11 -2
View File
@@ -10,6 +10,7 @@ import (
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/types"
"slices"
"strings"
"time"
"net/http"
@@ -22,6 +23,7 @@ type AllDebrid struct {
Name string
Host string `json:"host"`
APIKey string
ExtraAPIKeys []string
DownloadUncached bool
client *request.Client
@@ -319,8 +321,14 @@ func (ad *AllDebrid) GetMountPath() string {
func New(dc config.Debrid) *AllDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
apiKeys := strings.Split(dc.APIKey, ",")
extraKeys := make([]string, 0)
if len(apiKeys) > 1 {
extraKeys = apiKeys[1:]
}
mainKey := apiKeys[0]
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Authorization": fmt.Sprintf("Bearer %s", mainKey),
}
_log := logger.NewLogger(dc.Name)
client := request.New().
@@ -329,7 +337,8 @@ func New(dc config.Debrid) *AllDebrid {
return &AllDebrid{
Name: "alldebrid",
Host: dc.Host,
APIKey: dc.APIKey,
APIKey: mainKey,
ExtraAPIKeys: extraKeys,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
+13 -12
View File
@@ -82,6 +82,7 @@ type Cache struct {
torrentsRefreshMu sync.RWMutex // for refreshing torrents
saveSemaphore chan struct{}
ctx context.Context
}
func NewCache(dc config.Debrid, client types.Client) *Cache {
@@ -98,13 +99,17 @@ func NewCache(dc config.Debrid, client types.Client) *Cache {
if err != nil {
autoExpiresLinksAfter = time.Hour * 24
}
workers := runtime.NumCPU() * 50
if dc.Workers > 0 {
workers = dc.Workers
}
return &Cache{
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
torrents: xsync.NewMapOf[string, *CachedTorrent](),
torrentsNames: xsync.NewMapOf[string, *CachedTorrent](),
client: client,
logger: logger.NewLogger(fmt.Sprintf("%s-webdav", client.GetName())),
workers: 200,
workers: workers,
downloadLinks: xsync.NewMapOf[string, downloadLinkCache](),
torrentRefreshInterval: torrentRefreshInterval,
downloadLinksRefreshInterval: downloadLinksRefreshInterval,
@@ -113,6 +118,7 @@ func NewCache(dc config.Debrid, client types.Client) *Cache {
autoExpiresLinksAfter: autoExpiresLinksAfter,
repairsInProgress: xsync.NewMapOf[string, bool](),
saveSemaphore: make(chan struct{}, 10),
ctx: context.Background(),
}
}
@@ -159,10 +165,11 @@ func (c *Cache) GetListing() []os.FileInfo {
return nil
}
func (c *Cache) Start() error {
func (c *Cache) Start(ctx context.Context) error {
if err := os.MkdirAll(c.dir, 0755); err != nil {
return fmt.Errorf("failed to create cache directory: %w", err)
}
c.ctx = ctx
if err := c.Sync(); err != nil {
return fmt.Errorf("failed to sync cache: %w", err)
@@ -378,25 +385,19 @@ func (c *Cache) Sync() error {
}
func (c *Cache) sync(torrents []*types.Torrent) error {
// Calculate optimal workers - balance between CPU and IO
workers := runtime.NumCPU() * 50 // A more balanced multiplier for BadgerDB
// Create channels with appropriate buffering
workChan := make(chan *types.Torrent, workers*2)
workChan := make(chan *types.Torrent, min(1000, len(torrents)))
// Use an atomic counter for progress tracking
var processed int64
var errorCount int64
// Create a context with cancellation in case of critical errors
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create a wait group for workers
var wg sync.WaitGroup
// Start workers
for i := 0; i < workers; i++ {
for i := 0; i < c.workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
@@ -418,7 +419,7 @@ func (c *Cache) sync(torrents []*types.Torrent) error {
c.logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents))
}
case <-ctx.Done():
case <-c.ctx.Done():
return // Context cancelled, exit goroutine
}
}
@@ -430,7 +431,7 @@ func (c *Cache) sync(torrents []*types.Torrent) error {
select {
case workChan <- t:
// Work sent successfully
case <-ctx.Done():
case <-c.ctx.Done():
break // Context cancelled
}
}
+35 -23
View File
@@ -1,12 +1,10 @@
package debrid
import (
"context"
"fmt"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/types"
"golang.org/x/sync/errgroup"
"io"
"net/http"
"os"
@@ -14,6 +12,7 @@ import (
"slices"
"sort"
"strings"
"sync"
"time"
)
@@ -62,7 +61,8 @@ func (c *Cache) refreshListings() {
}
// Atomic store of the complete ready-to-use slice
c.listings.Store(files)
c.resetPropfindResponse()
//c.resetPropfindResponse()
_ = c.RefreshXml()
if err := c.RefreshRclone(); err != nil {
c.logger.Debug().Err(err).Msg("Failed to refresh rclone")
}
@@ -136,7 +136,7 @@ func (c *Cache) refreshTorrents() {
newTorrents := make([]*types.Torrent, 0)
for _, t := range _newTorrents {
if !slices.Contains(deletedTorrents, t.Id) {
newTorrents = append(newTorrents, t) // <-- FIXED: Use newTorrents
newTorrents = append(newTorrents, t)
}
}
@@ -149,28 +149,40 @@ func (c *Cache) refreshTorrents() {
}
c.logger.Info().Msgf("Found %d new torrents", len(newTorrents))
g, ctx := errgroup.WithContext(context.Background())
workChan := make(chan *types.Torrent, min(100, len(newTorrents)))
errChan := make(chan error, len(newTorrents))
var wg sync.WaitGroup
for i := 0; i < c.workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for t := range workChan {
select {
case <-c.ctx.Done():
return
default:
}
if err := c.ProcessTorrent(t, true); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to process new torrent %s", t.Id)
errChan <- err
}
}
}()
}
for _, t := range newTorrents {
t := t
g.Go(func() error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if err := c.ProcessTorrent(t, true); err != nil {
return err
}
return nil
})
}
if err := g.Wait(); err != nil {
c.logger.Debug().Err(err).Msg("Failed to process new torrents")
select {
case <-c.ctx.Done():
break
default:
workChan <- t
}
}
close(workChan)
wg.Wait()
c.logger.Debug().Msgf("Processed %d new torrents", len(newTorrents))
}
func (c *Cache) RefreshRclone() error {
+1 -1
View File
@@ -42,7 +42,7 @@ func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool {
} else {
// Check if file.Link not in the downloadLink Cache
if err := c.client.CheckLink(f.Link); err != nil {
if errors.Is(err, request.ErrLinkBroken) {
if errors.Is(err, request.HosterUnavailableError) {
isBroken = true
break
} else {
+1 -1
View File
@@ -18,7 +18,7 @@ func (c *Cache) RefreshXml() error {
return fmt.Errorf("failed to refresh XML for %s: %v", parent, err)
}
}
c.logger.Debug().Msgf("Refreshed XML cache for %s", c.client.GetName())
c.logger.Trace().Msgf("Refreshed XML cache for %s", c.client.GetName())
return nil
}
+10 -2
View File
@@ -21,6 +21,7 @@ type DebridLink struct {
Name string
Host string `json:"host"`
APIKey string
ExtraAPIKeys []string
DownloadUncached bool
client *request.Client
@@ -260,8 +261,14 @@ func (dl *DebridLink) GetDownloadUncached() bool {
func New(dc config.Debrid) *DebridLink {
rl := request.ParseRateLimit(dc.RateLimit)
apiKeys := strings.Split(dc.APIKey, ",")
extraKeys := make([]string, 0)
if len(apiKeys) > 1 {
extraKeys = apiKeys[1:]
}
mainKey := apiKeys[0]
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Authorization": fmt.Sprintf("Bearer %s", mainKey),
"Content-Type": "application/json",
}
_log := logger.NewLogger(dc.Name)
@@ -271,7 +278,8 @@ func New(dc config.Debrid) *DebridLink {
return &DebridLink{
Name: "debridlink",
Host: dc.Host,
APIKey: dc.APIKey,
APIKey: mainKey,
ExtraAPIKeys: extraKeys,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
+57 -33
View File
@@ -21,9 +21,12 @@ import (
)
type RealDebrid struct {
Name string
Host string `json:"host"`
APIKey string
Name string
Host string `json:"host"`
APIKey string
ExtraAPIKeys []string // This is used for bandwidth
DownloadUncached bool
client *request.Client
@@ -262,7 +265,6 @@ func (r *RealDebrid) DeleteTorrent(torrentId string) {
}
func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
filesCh := make(chan types.File, len(t.Files))
errCh := make(chan error, len(t.Files))
@@ -273,32 +275,13 @@ func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error {
go func(file types.File) {
defer wg.Done()
payload := gourl.Values{"link": {file.Link}}
req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
link, err := r.GetDownloadLink(t, &file)
if err != nil {
errCh <- err
return
}
resp, err := r.client.Do(req)
if err != nil {
errCh <- err
return
}
if resp.StatusCode == http.StatusServiceUnavailable {
errCh <- request.HosterUnavailableError
return
}
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
var data UnrestrictResponse
if err = json.Unmarshal(b, &data); err != nil {
errCh <- err
return
}
file.DownloadLink = data.Download
file.DownloadLink = link
filesCh <- file
}(f)
}
@@ -337,12 +320,12 @@ func (r *RealDebrid) CheckLink(link string) error {
return err
}
if resp.StatusCode == http.StatusNotFound {
return request.ErrLinkBroken // File has been removed
return request.HosterUnavailableError // File has been removed
}
return nil
}
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string, error) {
func (r *RealDebrid) _getDownloadLink(file *types.File) (string, error) {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
payload := gourl.Values{
"link": {file.Link},
@@ -352,8 +335,25 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string
if err != nil {
return "", err
}
if resp.StatusCode == http.StatusServiceUnavailable {
return "", request.HosterUnavailableError
if resp.StatusCode != http.StatusOK {
// Read the response body to get the error message
b, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
var data ErrorResponse
if err = json.Unmarshal(b, &data); err != nil {
return "", err
}
switch data.ErrorCode {
case 23:
return "", request.TrafficExceededError
case 24:
return "", request.HosterUnavailableError // Link has been nerfed
default:
return "", fmt.Errorf("realdebrid API error: %d", resp.StatusCode)
}
}
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
@@ -365,6 +365,23 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string
return "", err
}
return data.Download, nil
}
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string, error) {
link, err := r._getDownloadLink(file)
if err == nil {
return link, nil
}
for _, key := range r.ExtraAPIKeys {
r.client.SetHeader("Authorization", fmt.Sprintf("Bearer %s", key))
if link, err := r._getDownloadLink(file); err == nil {
return link, nil
}
}
// Reset to main API key
r.client.SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.APIKey))
return "", err
}
func (r *RealDebrid) GetCheckCached() bool {
@@ -431,7 +448,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent,
}
func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
limit := 5000
limit := 1000
// Get first batch and total count
totalItems, firstBatch, err := r.getTorrents(0, limit)
@@ -472,7 +489,7 @@ func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
func (r *RealDebrid) GetDownloads() (map[string]types.DownloadLinks, error) {
links := make(map[string]types.DownloadLinks)
offset := 0
limit := 5000
limit := 1000
for {
dl, err := r._getDownloads(offset, limit)
if err != nil {
@@ -538,8 +555,14 @@ func (r *RealDebrid) GetMountPath() string {
func New(dc config.Debrid) *RealDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
apiKeys := strings.Split(dc.APIKey, ",")
extraKeys := make([]string, 0)
if len(apiKeys) > 1 {
extraKeys = apiKeys[1:]
}
mainKey := apiKeys[0]
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Authorization": fmt.Sprintf("Bearer %s", mainKey),
}
_log := logger.NewLogger(dc.Name)
client := request.New().
@@ -550,7 +573,8 @@ func New(dc config.Debrid) *RealDebrid {
return &RealDebrid{
Name: "realdebrid",
Host: dc.Host,
APIKey: dc.APIKey,
APIKey: mainKey,
ExtraAPIKeys: extraKeys,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
+5
View File
@@ -134,3 +134,8 @@ type DownloadsResponse struct {
Streamable int `json:"streamable"`
Generated time.Time `json:"generated"`
}
type ErrorResponse struct {
Error string `json:"error"`
ErrorCode int `json:"error_code"`
}
+30 -22
View File
@@ -26,6 +26,7 @@ type Torbox struct {
Name string
Host string `json:"host"`
APIKey string
ExtraAPIKeys []string
DownloadUncached bool
client *request.Client
@@ -34,6 +35,35 @@ type Torbox struct {
CheckCached bool
}
func New(dc config.Debrid) *Torbox {
rl := request.ParseRateLimit(dc.RateLimit)
apiKeys := strings.Split(dc.APIKey, ",")
extraKeys := make([]string, 0)
if len(apiKeys) > 1 {
extraKeys = apiKeys[1:]
}
mainKey := apiKeys[0]
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", mainKey),
}
_log := logger.NewLogger(dc.Name)
client := request.New().
WithHeaders(headers).
WithRateLimiter(rl).WithLogger(_log)
return &Torbox{
Name: "torbox",
Host: dc.Host,
APIKey: mainKey,
ExtraAPIKeys: extraKeys,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: _log,
CheckCached: dc.CheckCached,
}
}
func (tb *Torbox) GetName() string {
return tb.Name
}
@@ -312,28 +342,6 @@ func (tb *Torbox) GetDownloadUncached() bool {
return tb.DownloadUncached
}
func New(dc config.Debrid) *Torbox {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
_log := logger.NewLogger(dc.Name)
client := request.New().
WithHeaders(headers).
WithRateLimiter(rl).WithLogger(_log)
return &Torbox{
Name: "torbox",
Host: dc.Host,
APIKey: dc.APIKey,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: _log,
CheckCached: dc.CheckCached,
}
}
func (tb *Torbox) GetDownloads() (map[string]types.DownloadLinks, error) {
return nil, nil
}