initializing webdav server

This commit is contained in:
Mukhtar Akere
2025-03-18 10:02:10 +01:00
parent fa469c64c6
commit 5d2fabe20b
39 changed files with 1650 additions and 1141 deletions
+1 -1
View File
@@ -3,8 +3,8 @@ package arr
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"io"
+1 -1
View File
@@ -1,8 +1,8 @@
package arr
import (
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"net/http"
"strconv"
"strings"
+1 -1
View File
@@ -1,7 +1,7 @@
package arr
import (
"encoding/json"
"github.com/goccy/go-json"
"io"
"net/http"
gourl "net/url"
+1 -1
View File
@@ -1,8 +1,8 @@
package arr
import (
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"io"
"net/http"
gourl "net/url"
+1 -1
View File
@@ -1,7 +1,7 @@
package arr
import (
"encoding/json"
"github.com/goccy/go-json"
"net/http"
url2 "net/url"
)
+51 -50
View File
@@ -1,20 +1,19 @@
package alldebrid
import (
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/internal/cache"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
"slices"
"time"
"net/http"
gourl "net/url"
"os"
"path/filepath"
"strconv"
)
@@ -24,11 +23,11 @@ type AllDebrid struct {
Host string `json:"host"`
APIKey string
DownloadUncached bool
client *request.RLHTTPClient
cache *cache.Cache
MountPath string
logger zerolog.Logger
CheckCached bool
client *request.Client
MountPath string
logger zerolog.Logger
CheckCached bool
}
func (ad *AllDebrid) GetName() string {
@@ -39,15 +38,9 @@ func (ad *AllDebrid) GetLogger() zerolog.Logger {
return ad.logger
}
func (ad *AllDebrid) IsAvailable(infohashes []string) map[string]bool {
func (ad *AllDebrid) IsAvailable(hashes []string) map[string]bool {
// Check if the infohashes are available in the local cache
hashes, result := torrent.GetLocalCache(infohashes, ad.cache)
if len(hashes) == 0 {
// Either all the infohashes are locally cached or none are
ad.cache.AddMultiple(result)
return result
}
result := make(map[string]bool)
// Divide hashes into groups of 100
// AllDebrid does not support checking cached infohashes
@@ -91,8 +84,8 @@ func getAlldebridStatus(statusCode int) string {
}
}
func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.File {
result := make([]torrent.File, 0)
func flattenFiles(files []MagnetFile, parentPath string, index *int) map[string]torrent.File {
result := make(map[string]torrent.File)
cfg := config.GetConfig()
@@ -104,7 +97,15 @@ func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.F
if f.Elements != nil {
// This is a folder, recurse into it
result = append(result, flattenFiles(f.Elements, currentPath, index)...)
subFiles := flattenFiles(f.Elements, currentPath, index)
for k, v := range subFiles {
if _, ok := result[k]; ok {
// File already exists, use path as key
result[v.Path] = v
} else {
result[k] = v
}
}
} else {
// This is a file
fileName := filepath.Base(f.Name)
@@ -128,25 +129,25 @@ func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.F
Size: f.Size,
Path: currentPath,
}
result = append(result, file)
result[file.Name] = file
}
}
return result
}
func (ad *AllDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
func (ad *AllDebrid) UpdateTorrent(t *torrent.Torrent) error {
url := fmt.Sprintf("%s/magnet/status?id=%s", ad.Host, t.Id)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := ad.client.MakeRequest(req)
if err != nil {
return t, err
return err
}
var res TorrentInfoResponse
err = json.Unmarshal(resp, &res)
if err != nil {
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
return t, err
return err
}
data := res.Data.Magnets
status := getAlldebridStatus(data.StatusCode)
@@ -158,7 +159,6 @@ func (ad *AllDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
t.Folder = name
t.MountPath = ad.MountPath
t.Debrid = ad.Name
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
if status == "downloaded" {
t.Bytes = data.Size
@@ -169,23 +169,21 @@ func (ad *AllDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
files := flattenFiles(data.Files, "", &index)
t.Files = files
}
return t, nil
return nil
}
func (ad *AllDebrid) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
for {
tb, err := ad.GetTorrent(torrent)
err := ad.UpdateTorrent(torrent)
torrent = tb
if err != nil || tb == nil {
return tb, err
if err != nil || torrent == nil {
return torrent, err
}
status := torrent.Status
if status == "downloaded" {
ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
if !isSymlink {
err = ad.GetDownloadLinks(torrent)
err = ad.GenerateDownloadLinks(torrent)
if err != nil {
return torrent, err
}
@@ -217,8 +215,7 @@ func (ad *AllDebrid) DeleteTorrent(torrent *torrent.Torrent) {
}
}
func (ad *AllDebrid) GetDownloadLinks(t *torrent.Torrent) error {
downloadLinks := make(map[string]torrent.DownloadLinks)
func (ad *AllDebrid) GenerateDownloadLinks(t *torrent.Torrent) error {
for _, file := range t.Files {
url := fmt.Sprintf("%s/link/unlock", ad.Host)
query := gourl.Values{}
@@ -234,19 +231,15 @@ func (ad *AllDebrid) GetDownloadLinks(t *torrent.Torrent) error {
return err
}
link := data.Data.Link
file.DownloadLink = link
file.Generated = time.Now()
t.Files[file.Name] = file
dl := torrent.DownloadLinks{
Link: file.Link,
Filename: data.Data.Filename,
DownloadLink: link,
}
downloadLinks[file.Id] = dl
}
t.DownloadLinks = downloadLinks
return nil
}
func (ad *AllDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
func (ad *AllDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File {
url := fmt.Sprintf("%s/link/unlock", ad.Host)
query := gourl.Values{}
query.Add("link", file.Link)
@@ -261,11 +254,9 @@ func (ad *AllDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *to
return nil
}
link := data.Data.Link
return &torrent.DownloadLinks{
DownloadLink: link,
Link: file.Link,
Filename: data.Data.Filename,
}
file.DownloadLink = link
file.Generated = time.Now()
return file
}
func (ad *AllDebrid) GetCheckCached() bool {
@@ -276,6 +267,10 @@ func (ad *AllDebrid) GetTorrents() ([]*torrent.Torrent, error) {
return nil, nil
}
func (ad *AllDebrid) GetDownloads() (map[string]torrent.DownloadLinks, error) {
return nil, nil
}
func (ad *AllDebrid) GetDownloadingStatus() []string {
return []string{"downloading"}
}
@@ -284,21 +279,27 @@ func (ad *AllDebrid) GetDownloadUncached() bool {
return ad.DownloadUncached
}
func New(dc config.Debrid, cache *cache.Cache) *AllDebrid {
func (ad *AllDebrid) ConvertLinksToFiles(links []string) []torrent.File {
return nil
}
func New(dc config.Debrid) *AllDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
client := request.NewRLHTTPClient(rl, headers)
_log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel)
client := request.New().
WithHeaders(headers).
WithRateLimiter(rl).WithLogger(_log)
return &AllDebrid{
Name: "alldebrid",
Host: dc.Host,
APIKey: dc.APIKey,
DownloadUncached: dc.DownloadUncached,
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel),
CheckCached: dc.CheckCached,
}
}
-452
View File
@@ -1,452 +0,0 @@
package cache
import (
"context"
"encoding/json"
"fmt"
"github.com/dgraph-io/badger/v4"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"os"
"path/filepath"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
)
type DownloadLinkCache struct {
Link string `json:"download_link"`
}
type CachedTorrent struct {
*torrent.Torrent
LastRead time.Time `json:"last_read"`
IsComplete bool `json:"is_complete"`
DownloadLinks map[string]DownloadLinkCache `json:"download_links"`
}
var (
_logInstance zerolog.Logger
once sync.Once
)
func getLogger() zerolog.Logger {
once.Do(func() {
cfg := config.GetConfig()
_logInstance = logger.NewLogger("cache", cfg.LogLevel, os.Stdout)
})
return _logInstance
}
type Cache struct {
dir string
client engine.Service
db *badger.DB
torrents map[string]*CachedTorrent // key: torrent.Id, value: *CachedTorrent
torrentsMutex sync.RWMutex
torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent
torrentNamesMutex sync.RWMutex
LastUpdated time.Time `json:"last_updated"`
}
func (c *Cache) SetTorrent(t *CachedTorrent) {
c.torrentsMutex.Lock()
defer c.torrentsMutex.Unlock()
c.torrents[t.Id] = t
}
func (c *Cache) SetTorrentName(name string, t *CachedTorrent) {
c.torrentNamesMutex.Lock()
defer c.torrentNamesMutex.Unlock()
c.torrentsNames[name] = t
}
func (c *Cache) GetTorrents() map[string]*CachedTorrent {
c.torrentsMutex.RLock()
defer c.torrentsMutex.RUnlock()
return c.torrents
}
func (c *Cache) GetTorrentNames() map[string]*CachedTorrent {
c.torrentNamesMutex.RLock()
defer c.torrentNamesMutex.RUnlock()
return c.torrentsNames
}
type Manager struct {
caches map[string]*Cache
}
func NewManager(debridService *engine.Engine) *Manager {
cfg := config.GetConfig()
cm := &Manager{
caches: make(map[string]*Cache),
}
for _, debrid := range debridService.GetDebrids() {
c := New(debrid, cfg.Path)
cm.caches[debrid.GetName()] = c
}
return cm
}
func (m *Manager) GetCaches() map[string]*Cache {
return m.caches
}
func (m *Manager) GetCache(debridName string) *Cache {
return m.caches[debridName]
}
func New(debridService engine.Service, basePath string) *Cache {
dbPath := filepath.Join(basePath, "cache", debridService.GetName(), "db")
return &Cache{
dir: dbPath,
torrents: make(map[string]*CachedTorrent),
torrentsNames: make(map[string]*CachedTorrent),
client: debridService,
}
}
func (c *Cache) Start() error {
_logger := getLogger()
_logger.Info().Msg("Starting cache for: " + c.client.GetName())
// Make sure the directory exists
if err := os.MkdirAll(c.dir, 0755); err != nil {
return fmt.Errorf("failed to create cache directory: %w", err)
}
// Open BadgerDB
opts := badger.DefaultOptions(c.dir)
opts.Logger = nil // Disable Badger's internal logger
var err error
c.db, err = badger.Open(opts)
if err != nil {
return fmt.Errorf("failed to open BadgerDB: %w", err)
}
if err := c.Load(); err != nil {
return fmt.Errorf("failed to load cache: %v", err)
}
if err := c.Sync(); err != nil {
return fmt.Errorf("failed to sync cache: %v", err)
}
return nil
}
func (c *Cache) Close() error {
if c.db != nil {
return c.db.Close()
}
return nil
}
func (c *Cache) Load() error {
_logger := getLogger()
err := c.db.View(func(txn *badger.Txn) error {
opts := badger.DefaultIteratorOptions
it := txn.NewIterator(opts)
defer it.Close()
prefix := []byte("torrent:")
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
err := item.Value(func(val []byte) error {
var ct CachedTorrent
if err := json.Unmarshal(val, &ct); err != nil {
_logger.Debug().Err(err).Msgf("Failed to unmarshal torrent")
return nil // Continue to next item
}
if len(ct.Files) > 0 {
c.SetTorrent(&ct)
c.SetTorrentName(ct.Name, &ct)
}
return nil
})
if err != nil {
_logger.Debug().Err(err).Msg("Error reading torrent value")
}
}
return nil
})
return err
}
func (c *Cache) GetTorrent(id string) *CachedTorrent {
if t, ok := c.GetTorrents()[id]; ok {
return t
}
return nil
}
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
if t, ok := c.GetTorrentNames()[name]; ok {
return t
}
return nil
}
func (c *Cache) SaveTorrent(ct *CachedTorrent) error {
data, err := json.Marshal(ct)
if err != nil {
return fmt.Errorf("failed to marshal torrent: %w", err)
}
key := []byte(fmt.Sprintf("torrent:%s", ct.Torrent.Id))
err = c.db.Update(func(txn *badger.Txn) error {
return txn.Set(key, data)
})
if err != nil {
return fmt.Errorf("failed to save torrent to BadgerDB: %w", err)
}
// Also create an index by name for quick lookups
nameKey := []byte(fmt.Sprintf("name:%s", ct.Torrent.Name))
err = c.db.Update(func(txn *badger.Txn) error {
return txn.Set(nameKey, []byte(ct.Torrent.Id))
})
if err != nil {
return fmt.Errorf("failed to save torrent name index: %w", err)
}
return nil
}
func (c *Cache) SaveAll() error {
const batchSize = 100
var wg sync.WaitGroup
_logger := getLogger()
tasks := make(chan *CachedTorrent, batchSize)
for i := 0; i < runtime.NumCPU(); i++ {
wg.Add(1)
go func() {
defer wg.Done()
for ct := range tasks {
if err := c.SaveTorrent(ct); err != nil {
_logger.Error().Err(err).Msg("failed to save torrent")
}
}
}()
}
for _, value := range c.GetTorrents() {
tasks <- value
}
close(tasks)
wg.Wait()
c.LastUpdated = time.Now()
// Run value log garbage collection when appropriate
// This helps reclaim space from deleted/updated values
go func() {
err := c.db.RunValueLogGC(0.5) // Run GC if 50% of the value log can be discarded
if err != nil && err != badger.ErrNoRewrite {
_logger.Debug().Err(err).Msg("BadgerDB value log GC")
}
}()
return nil
}
func (c *Cache) Sync() error {
_logger := getLogger()
torrents, err := c.client.GetTorrents()
if err != nil {
return fmt.Errorf("failed to sync torrents: %v", err)
}
_logger.Info().Msgf("Syncing %d torrents", len(torrents))
// Calculate optimal workers - balance between CPU and IO
workers := runtime.NumCPU() * 4 // A more balanced multiplier for BadgerDB
// Create channels with appropriate buffering
workChan := make(chan *torrent.Torrent, workers*2)
// Use an atomic counter for progress tracking
var processed int64
var errorCount int64
// Create a context with cancellation in case of critical errors
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create a wait group for workers
var wg sync.WaitGroup
// Start workers
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case t, ok := <-workChan:
if !ok {
return // Channel closed, exit goroutine
}
if err := c.processTorrent(t); err != nil {
_logger.Error().Err(err).Str("torrent", t.Name).Msg("sync error")
atomic.AddInt64(&errorCount, 1)
}
count := atomic.AddInt64(&processed, 1)
if count%1000 == 0 {
_logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents))
}
case <-ctx.Done():
return // Context cancelled, exit goroutine
}
}
}()
}
// Feed work to workers
for _, t := range torrents {
select {
case workChan <- t:
// Work sent successfully
case <-ctx.Done():
break // Context cancelled
}
}
// Signal workers that no more work is coming
close(workChan)
// Wait for all workers to complete
wg.Wait()
_logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount)
return nil
}
func (c *Cache) processTorrent(t *torrent.Torrent) error {
if ct := c.GetTorrent(t.Id); ct != nil {
if ct.IsComplete {
return nil
}
}
c.AddTorrent(t)
return nil
}
func (c *Cache) AddTorrent(t *torrent.Torrent) {
_logger := getLogger()
if len(t.Files) == 0 {
tNew, err := c.client.GetTorrent(t)
_logger.Debug().Msgf("Getting torrent files for %s", t.Id)
if err != nil {
_logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err)
return
}
t = tNew
}
if len(t.Files) == 0 {
_logger.Debug().Msgf("No files found for %s", t.Id)
return
}
ct := &CachedTorrent{
Torrent: t,
LastRead: time.Now(),
IsComplete: len(t.Files) > 0,
DownloadLinks: make(map[string]DownloadLinkCache),
}
c.SetTorrent(ct)
c.SetTorrentName(t.Name, ct)
go func() {
if err := c.SaveTorrent(ct); err != nil {
_logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
}
}()
}
func (c *Cache) RefreshTorrent(torrent *CachedTorrent) *CachedTorrent {
_logger := getLogger()
t, err := c.client.GetTorrent(torrent.Torrent)
if err != nil {
_logger.Debug().Msgf("Failed to get torrent files for %s: %v", torrent.Id, err)
return nil
}
if len(t.Files) == 0 {
return nil
}
ct := &CachedTorrent{
Torrent: t,
LastRead: time.Now(),
IsComplete: len(t.Files) > 0,
DownloadLinks: make(map[string]DownloadLinkCache),
}
c.SetTorrent(ct)
c.SetTorrentName(t.Name, ct)
go func() {
if err := c.SaveTorrent(ct); err != nil {
_logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
}
}()
return ct
}
func (c *Cache) GetFileDownloadLink(t *CachedTorrent, file *torrent.File) (string, error) {
_logger := getLogger()
if linkCache, ok := t.DownloadLinks[file.Id]; ok {
return linkCache.Link, nil
}
if file.Link == "" {
t = c.RefreshTorrent(t)
if t == nil {
return "", fmt.Errorf("torrent not found")
}
file = t.Torrent.GetFile(file.Id)
}
_logger.Debug().Msgf("Getting download link for %s", t.Name)
link := c.client.GetDownloadLink(t.Torrent, file)
if link == nil {
return "", fmt.Errorf("download link not found")
}
t.DownloadLinks[file.Id] = DownloadLinkCache{
Link: link.DownloadLink,
}
go func() {
if err := c.SaveTorrent(t); err != nil {
_logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
}
}()
return link.DownloadLink, nil
}
+16 -16
View File
@@ -1,13 +1,12 @@
package debrid
import (
"cmp"
"fmt"
"github.com/sirrobot01/debrid-blackhole/internal/cache"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/alldebrid"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid_link"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/realdebrid"
@@ -17,33 +16,33 @@ import (
func New() *engine.Engine {
cfg := config.GetConfig()
maxCachedSize := cmp.Or(cfg.MaxCacheSize, 1000)
debrids := make([]engine.Service, 0)
// Divide the cache size by the number of debrids
maxCacheSize := maxCachedSize / len(cfg.Debrids)
debrids := make([]debrid.Client, 0)
for _, dc := range cfg.Debrids {
d := createDebrid(dc, cache.New(maxCacheSize))
logger := d.GetLogger()
client := createDebridClient(dc)
logger := client.GetLogger()
logger.Info().Msg("Debrid Service started")
debrids = append(debrids, d)
debrids = append(debrids, client)
}
d := &engine.Engine{
Debrids: debrids,
LastUsed: 0,
}
d := &engine.Engine{Debrids: debrids, LastUsed: 0}
return d
}
func createDebrid(dc config.Debrid, cache *cache.Cache) engine.Service {
func createDebridClient(dc config.Debrid) debrid.Client {
switch dc.Name {
case "realdebrid":
return realdebrid.New(dc, cache)
return realdebrid.New(dc)
case "torbox":
return torbox.New(dc, cache)
return torbox.New(dc)
case "debridlink":
return debrid_link.New(dc, cache)
return debrid_link.New(dc)
case "alldebrid":
return alldebrid.New(dc, cache)
return alldebrid.New(dc)
default:
return realdebrid.New(dc, cache)
return realdebrid.New(dc)
}
}
@@ -55,6 +54,7 @@ func ProcessTorrent(d *engine.Engine, magnet *utils.Magnet, a *arr.Arr, isSymlin
Name: magnet.Name,
Arr: a,
Size: magnet.Size,
Files: make(map[string]torrent.File),
}
errs := make([]error, 0)
@@ -1,22 +1,24 @@
package engine
package debrid
import (
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
)
type Service interface {
type Client interface {
SubmitMagnet(tr *torrent.Torrent) (*torrent.Torrent, error)
CheckStatus(tr *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error)
GetDownloadLinks(tr *torrent.Torrent) error
GetDownloadLink(tr *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks
GenerateDownloadLinks(tr *torrent.Torrent) error
GetDownloadLink(tr *torrent.Torrent, file *torrent.File) *torrent.File
ConvertLinksToFiles(links []string) []torrent.File
DeleteTorrent(tr *torrent.Torrent)
IsAvailable(infohashes []string) map[string]bool
GetCheckCached() bool
GetDownloadUncached() bool
GetTorrent(torrent *torrent.Torrent) (*torrent.Torrent, error)
UpdateTorrent(torrent *torrent.Torrent) error
GetTorrents() ([]*torrent.Torrent, error)
GetName() string
GetLogger() zerolog.Logger
GetDownloadingStatus() []string
GetDownloads() (map[string]torrent.DownloadLinks, error)
}
+54 -65
View File
@@ -2,19 +2,18 @@ package debrid_link
import (
"bytes"
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/internal/cache"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
"slices"
"time"
"net/http"
"os"
"strings"
)
@@ -23,11 +22,11 @@ type DebridLink struct {
Host string `json:"host"`
APIKey string
DownloadUncached bool
client *request.RLHTTPClient
cache *cache.Cache
MountPath string
logger zerolog.Logger
CheckCached bool
client *request.Client
MountPath string
logger zerolog.Logger
CheckCached bool
}
func (dl *DebridLink) GetName() string {
@@ -38,15 +37,9 @@ func (dl *DebridLink) GetLogger() zerolog.Logger {
return dl.logger
}
func (dl *DebridLink) IsAvailable(infohashes []string) map[string]bool {
func (dl *DebridLink) IsAvailable(hashes []string) map[string]bool {
// Check if the infohashes are available in the local cache
hashes, result := torrent.GetLocalCache(infohashes, dl.cache)
if len(hashes) == 0 {
// Either all the infohashes are locally cached or none are
dl.cache.AddMultiple(result)
return result
}
result := make(map[string]bool)
// Divide hashes into groups of 100
for i := 0; i < len(hashes); i += 100 {
@@ -93,32 +86,31 @@ func (dl *DebridLink) IsAvailable(infohashes []string) map[string]bool {
}
}
}
dl.cache.AddMultiple(result) // Add the results to the cache
return result
}
func (dl *DebridLink) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
func (dl *DebridLink) UpdateTorrent(t *torrent.Torrent) error {
url := fmt.Sprintf("%s/seedbox/list?ids=%s", dl.Host, t.Id)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := dl.client.MakeRequest(req)
if err != nil {
return t, err
return err
}
var res TorrentInfo
err = json.Unmarshal(resp, &res)
if err != nil {
return t, err
return err
}
if !res.Success {
return t, fmt.Errorf("error getting torrent")
return fmt.Errorf("error getting torrent")
}
if res.Value == nil {
return t, fmt.Errorf("torrent not found")
return fmt.Errorf("torrent not found")
}
dt := *res.Value
if len(dt) == 0 {
return t, fmt.Errorf("torrent not found")
return fmt.Errorf("torrent not found")
}
data := dt[0]
status := "downloading"
@@ -136,21 +128,22 @@ func (dl *DebridLink) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
t.Seeders = data.PeersConnected
t.Filename = name
t.OriginalFilename = name
files := make([]torrent.File, len(data.Files))
cfg := config.GetConfig()
for i, f := range data.Files {
for _, f := range data.Files {
if !cfg.IsSizeAllowed(f.Size) {
continue
}
files[i] = torrent.File{
Id: f.ID,
Name: f.Name,
Size: f.Size,
Path: f.Name,
file := torrent.File{
Id: f.ID,
Name: f.Name,
Size: f.Size,
Path: f.Name,
DownloadLink: f.DownloadURL,
Link: f.DownloadURL,
}
t.Files[f.Name] = file
}
t.Files = files
return t, nil
return nil
}
func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) {
@@ -185,33 +178,32 @@ func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error)
t.OriginalFilename = name
t.MountPath = dl.MountPath
t.Debrid = dl.Name
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
files := make([]torrent.File, len(data.Files))
for i, f := range data.Files {
files[i] = torrent.File{
Id: f.ID,
Name: f.Name,
Size: f.Size,
Path: f.Name,
Link: f.DownloadURL,
for _, f := range data.Files {
file := torrent.File{
Id: f.ID,
Name: f.Name,
Size: f.Size,
Path: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: time.Now(),
}
t.Files[f.Name] = file
}
t.Files = files
return t, nil
}
func (dl *DebridLink) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
for {
t, err := dl.GetTorrent(torrent)
torrent = t
err := dl.UpdateTorrent(torrent)
if err != nil || torrent == nil {
return torrent, err
}
status := torrent.Status
if status == "downloaded" {
dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
err = dl.GetDownloadLinks(torrent)
err = dl.GenerateDownloadLinks(torrent)
if err != nil {
return torrent, err
}
@@ -242,25 +234,16 @@ func (dl *DebridLink) DeleteTorrent(torrent *torrent.Torrent) {
}
}
func (dl *DebridLink) GetDownloadLinks(t *torrent.Torrent) error {
downloadLinks := make(map[string]torrent.DownloadLinks)
for _, f := range t.Files {
dl := torrent.DownloadLinks{
Link: f.Link,
Filename: f.Name,
}
downloadLinks[f.Id] = dl
}
t.DownloadLinks = downloadLinks
func (dl *DebridLink) GenerateDownloadLinks(t *torrent.Torrent) error {
return nil
}
func (dl *DebridLink) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
dlLink, ok := t.DownloadLinks[file.Id]
if !ok {
return nil
}
return &dlLink
func (dl *DebridLink) GetDownloads() (map[string]torrent.DownloadLinks, error) {
return nil, nil
}
func (dl *DebridLink) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File {
return file
}
func (dl *DebridLink) GetDownloadingStatus() []string {
@@ -275,22 +258,24 @@ func (dl *DebridLink) GetDownloadUncached() bool {
return dl.DownloadUncached
}
func New(dc config.Debrid, cache *cache.Cache) *DebridLink {
func New(dc config.Debrid) *DebridLink {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
}
client := request.NewRLHTTPClient(rl, headers)
_log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel)
client := request.New().
WithHeaders(headers).
WithRateLimiter(rl).WithLogger(_log)
return &DebridLink{
Name: "debridlink",
Host: dc.Host,
APIKey: dc.APIKey,
DownloadUncached: dc.DownloadUncached,
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel),
CheckCached: dc.CheckCached,
}
}
@@ -298,3 +283,7 @@ func New(dc config.Debrid, cache *cache.Cache) *DebridLink {
func (dl *DebridLink) GetTorrents() ([]*torrent.Torrent, error) {
return nil, nil
}
func (dl *DebridLink) ConvertLinksToFiles(links []string) []torrent.File {
return nil
}
+8 -4
View File
@@ -1,18 +1,22 @@
package engine
import (
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid"
)
type Engine struct {
Debrids []Service
Debrids []debrid.Client
LastUsed int
}
func (d *Engine) Get() Service {
func (d *Engine) Get() debrid.Client {
if d.LastUsed == 0 {
return d.Debrids[0]
}
return d.Debrids[d.LastUsed]
}
func (d *Engine) GetByName(name string) Service {
func (d *Engine) GetByName(name string) debrid.Client {
for _, deb := range d.Debrids {
if deb.GetName() == name {
return deb
@@ -21,6 +25,6 @@ func (d *Engine) GetByName(name string) Service {
return nil
}
func (d *Engine) GetDebrids() []Service {
func (d *Engine) GetDebrids() []debrid.Client {
return d.Debrids
}
+204 -70
View File
@@ -1,22 +1,23 @@
package realdebrid
import (
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/internal/cache"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
"io"
"net/http"
gourl "net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
"time"
)
type RealDebrid struct {
@@ -24,11 +25,11 @@ type RealDebrid struct {
Host string `json:"host"`
APIKey string
DownloadUncached bool
client *request.RLHTTPClient
cache *cache.Cache
MountPath string
logger zerolog.Logger
CheckCached bool
client *request.Client
MountPath string
logger zerolog.Logger
CheckCached bool
}
func (r *RealDebrid) GetName() string {
@@ -39,11 +40,11 @@ func (r *RealDebrid) GetLogger() zerolog.Logger {
return r.logger
}
// GetTorrentFiles returns a list of torrent files from the torrent info
// getTorrentFiles returns a list of torrent files from the torrent info
// validate is used to determine if the files should be validated
// if validate is false, selected files will be returned
func GetTorrentFiles(data TorrentInfo, validate bool) []torrent.File {
files := make([]torrent.File, 0)
func getTorrentFiles(t *torrent.Torrent, data TorrentInfo, validate bool) map[string]torrent.File {
files := make(map[string]torrent.File)
cfg := config.GetConfig()
idx := 0
for _, f := range data.Files {
@@ -72,6 +73,13 @@ func GetTorrentFiles(data TorrentInfo, validate bool) []torrent.File {
if len(data.Links) > idx {
_link = data.Links[idx]
}
if a, ok := t.Files[name]; ok {
a.Link = _link
files[name] = a
continue
}
file := torrent.File{
Name: name,
Path: name,
@@ -79,21 +87,15 @@ func GetTorrentFiles(data TorrentInfo, validate bool) []torrent.File {
Id: strconv.Itoa(fileId),
Link: _link,
}
files = append(files, file)
files[name] = file
idx++
}
return files
}
func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool {
func (r *RealDebrid) IsAvailable(hashes []string) map[string]bool {
// Check if the infohashes are available in the local cache
hashes, result := torrent.GetLocalCache(infohashes, r.cache)
if len(hashes) == 0 {
// Either all the infohashes are locally cached or none are
r.cache.AddMultiple(result)
return result
}
result := make(map[string]bool)
// Divide hashes into groups of 100
for i := 0; i < len(hashes); i += 200 {
@@ -136,7 +138,6 @@ func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool {
}
}
}
r.cache.AddMultiple(result) // Add the results to the cache
return result
}
@@ -160,17 +161,17 @@ func (r *RealDebrid) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error)
return t, nil
}
func (r *RealDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
func (r *RealDebrid) UpdateTorrent(t *torrent.Torrent) error {
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
return t, err
return err
}
var data TorrentInfo
err = json.Unmarshal(resp, &data)
if err != nil {
return t, err
return err
}
name := utils.RemoveInvalidChars(data.OriginalFilename)
t.Name = name
@@ -185,10 +186,8 @@ func (r *RealDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
t.Links = data.Links
t.MountPath = r.MountPath
t.Debrid = r.Name
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
files := GetTorrentFiles(data, false) // Get selected files
t.Files = files
return t, nil
t.Files = getTorrentFiles(t, data, false) // Get selected files
return nil
}
func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
@@ -219,13 +218,12 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T
t.Debrid = r.Name
t.MountPath = r.MountPath
if status == "waiting_files_selection" {
files := GetTorrentFiles(data, true) // Validate files to be selected
t.Files = files
if len(files) == 0 {
t.Files = getTorrentFiles(t, data, true)
if len(t.Files) == 0 {
return t, fmt.Errorf("no video files found")
}
filesId := make([]string, 0)
for _, f := range files {
for _, f := range t.Files {
filesId = append(filesId, f.Id)
}
p := gourl.Values{
@@ -238,11 +236,10 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T
return t, err
}
} else if status == "downloaded" {
files := GetTorrentFiles(data, false) // Get selected files
t.Files = files
t.Files = getTorrentFiles(t, data, false) // Get selected files
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
if !isSymlink {
err = r.GetDownloadLinks(t)
err = r.GenerateDownloadLinks(t)
if err != nil {
return t, err
}
@@ -271,12 +268,11 @@ func (r *RealDebrid) DeleteTorrent(torrent *torrent.Torrent) {
}
}
func (r *RealDebrid) GetDownloadLinks(t *torrent.Torrent) error {
func (r *RealDebrid) GenerateDownloadLinks(t *torrent.Torrent) error {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
downloadLinks := make(map[string]torrent.DownloadLinks)
for _, f := range t.Files {
dlLink := t.DownloadLinks[f.Id]
if f.Link == "" || dlLink.DownloadLink != "" {
if f.DownloadLink != "" {
// Or check the generated link
continue
}
payload := gourl.Values{
@@ -291,18 +287,41 @@ func (r *RealDebrid) GetDownloadLinks(t *torrent.Torrent) error {
if err = json.Unmarshal(resp, &data); err != nil {
return err
}
download := torrent.DownloadLinks{
Link: data.Link,
Filename: data.Filename,
DownloadLink: data.Download,
}
downloadLinks[f.Id] = download
f.DownloadLink = data.Download
f.Generated = time.Now()
t.Files[f.Name] = f
}
t.DownloadLinks = downloadLinks
return nil
}
func (r *RealDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
func (r *RealDebrid) ConvertLinksToFiles(links []string) []torrent.File {
files := make([]torrent.File, 0)
for _, l := range links {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
payload := gourl.Values{
"link": {l},
}
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.client.MakeRequest(req)
if err != nil {
continue
}
var data UnrestrictResponse
if err = json.Unmarshal(resp, &data); err != nil {
continue
}
files = append(files, torrent.File{
Name: data.Filename,
Size: data.Filesize,
Link: l,
DownloadLink: data.Download,
Generated: time.Now(),
})
}
return files
}
func (r *RealDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
payload := gourl.Values{
"link": {file.Link},
@@ -316,32 +335,43 @@ func (r *RealDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *to
if err = json.Unmarshal(resp, &data); err != nil {
return nil
}
return &torrent.DownloadLinks{
Link: data.Link,
Filename: data.Filename,
DownloadLink: data.Download,
}
file.DownloadLink = data.Download
file.Generated = time.Now()
return file
}
func (r *RealDebrid) GetCheckCached() bool {
return r.CheckCached
}
func (r *RealDebrid) getTorrents(offset int, limit int) ([]*torrent.Torrent, error) {
func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*torrent.Torrent, error) {
url := fmt.Sprintf("%s/torrents?limit=%d", r.Host, limit)
torrents := make([]*torrent.Torrent, 0)
if offset > 0 {
url = fmt.Sprintf("%s&offset=%d", url, offset)
}
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
resp, err := r.client.Do(req)
if err != nil {
return nil, err
return 0, torrents, err
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return 0, torrents, fmt.Errorf("realdebrid API error: %d", resp.StatusCode)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return 0, torrents, err
}
totalItems, _ := strconv.Atoi(resp.Header.Get("X-Total-Count"))
var data []TorrentsResponse
if err = json.Unmarshal(resp, &data); err != nil {
return nil, err
if err = json.Unmarshal(body, &data); err != nil {
return 0, nil, err
}
torrents := make([]*torrent.Torrent, 0)
filenames := map[string]bool{}
for _, t := range data {
if _, exists := filenames[t.Filename]; exists {
@@ -356,20 +386,122 @@ func (r *RealDebrid) getTorrents(offset int, limit int) ([]*torrent.Torrent, err
Filename: t.Filename,
OriginalFilename: t.Filename,
Links: t.Links,
Files: make(map[string]torrent.File),
InfoHash: t.Hash,
Debrid: r.Name,
MountPath: r.MountPath,
})
}
return torrents, nil
return totalItems, torrents, nil
}
func (r *RealDebrid) GetTorrents() ([]*torrent.Torrent, error) {
torrents := make([]*torrent.Torrent, 0)
offset := 0
limit := 1000
ts, _ := r.getTorrents(offset, limit)
torrents = append(torrents, ts...)
offset = len(torrents)
return torrents, nil
limit := 5000
// Get first batch and total count
totalItems, firstBatch, err := r.getTorrents(0, limit)
if err != nil {
return nil, err
}
allTorrents := firstBatch
// Calculate remaining requests
remaining := totalItems - len(firstBatch)
if remaining <= 0 {
return allTorrents, nil
}
// Prepare for concurrent fetching
var wg sync.WaitGroup
var mu sync.Mutex
var fetchError error
// Calculate how many more requests we need
batchCount := (remaining + limit - 1) / limit // ceiling division
for i := 1; i <= batchCount; i++ {
wg.Add(1)
go func(batchOffset int) {
defer wg.Done()
_, batch, err := r.getTorrents(batchOffset, limit)
if err != nil {
mu.Lock()
fetchError = err
mu.Unlock()
return
}
mu.Lock()
allTorrents = append(allTorrents, batch...)
mu.Unlock()
}(i * limit)
}
// Wait for all fetches to complete
wg.Wait()
if fetchError != nil {
return nil, fetchError
}
return allTorrents, nil
}
func (r *RealDebrid) GetDownloads() (map[string]torrent.DownloadLinks, error) {
links := make(map[string]torrent.DownloadLinks)
offset := 0
limit := 5000
for {
dl, err := r._getDownloads(offset, limit)
if err != nil {
break
}
if len(dl) == 0 {
break
}
for _, d := range dl {
if _, exists := links[d.Link]; exists {
// This is ordered by date, so we can skip the rest
continue
}
links[d.Link] = d
}
offset += len(dl)
}
return links, nil
}
func (r *RealDebrid) _getDownloads(offset int, limit int) ([]torrent.DownloadLinks, error) {
url := fmt.Sprintf("%s/downloads?limit=%d", r.Host, limit)
if offset > 0 {
url = fmt.Sprintf("%s&offset=%d", url, offset)
}
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
return nil, err
}
var data []DownloadsResponse
if err = json.Unmarshal(resp, &data); err != nil {
return nil, err
}
links := make([]torrent.DownloadLinks, 0)
for _, d := range data {
links = append(links, torrent.DownloadLinks{
Filename: d.Filename,
Size: d.Filesize,
Link: d.Link,
DownloadLink: d.Download,
Generated: d.Generated,
Id: d.Id,
})
}
return links, nil
}
func (r *RealDebrid) GetDownloadingStatus() []string {
@@ -380,21 +512,23 @@ func (r *RealDebrid) GetDownloadUncached() bool {
return r.DownloadUncached
}
func New(dc config.Debrid, cache *cache.Cache) *RealDebrid {
func New(dc config.Debrid) *RealDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
client := request.NewRLHTTPClient(rl, headers)
_log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel)
client := request.New().
WithHeaders(headers).
WithRateLimiter(rl).WithLogger(_log)
return &RealDebrid{
Name: "realdebrid",
Host: dc.Host,
APIKey: dc.APIKey,
DownloadUncached: dc.DownloadUncached,
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel),
CheckCached: dc.CheckCached,
}
}
+16 -2
View File
@@ -1,8 +1,8 @@
package realdebrid
import (
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"time"
)
@@ -98,7 +98,7 @@ type UnrestrictResponse struct {
Id string `json:"id"`
Filename string `json:"filename"`
MimeType string `json:"mimeType"`
Filesize int `json:"filesize"`
Filesize int64 `json:"filesize"`
Link string `json:"link"`
Host string `json:"host"`
Chunks int `json:"chunks"`
@@ -120,3 +120,17 @@ type TorrentsResponse struct {
Links []string `json:"links"`
Ended time.Time `json:"ended"`
}
type DownloadsResponse struct {
Id string `json:"id"`
Filename string `json:"filename"`
MimeType string `json:"mimeType"`
Filesize int64 `json:"filesize"`
Link string `json:"link"`
Host string `json:"host"`
HostIcon string `json:"host_icon"`
Chunks int64 `json:"chunks"`
Download string `json:"download"`
Streamable int `json:"streamable"`
Generated time.Time `json:"generated"`
}
+45 -56
View File
@@ -2,20 +2,19 @@ package torbox
import (
"bytes"
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/internal/cache"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
"time"
"mime/multipart"
"net/http"
gourl "net/url"
"os"
"path"
"path/filepath"
"slices"
@@ -28,11 +27,11 @@ type Torbox struct {
Host string `json:"host"`
APIKey string
DownloadUncached bool
client *request.RLHTTPClient
cache *cache.Cache
MountPath string
logger zerolog.Logger
CheckCached bool
client *request.Client
MountPath string
logger zerolog.Logger
CheckCached bool
}
func (tb *Torbox) GetName() string {
@@ -43,15 +42,9 @@ func (tb *Torbox) GetLogger() zerolog.Logger {
return tb.logger
}
func (tb *Torbox) IsAvailable(infohashes []string) map[string]bool {
func (tb *Torbox) IsAvailable(hashes []string) map[string]bool {
// Check if the infohashes are available in the local cache
hashes, result := torrent.GetLocalCache(infohashes, tb.cache)
if len(hashes) == 0 {
// Either all the infohashes are locally cached or none are
tb.cache.AddMultiple(result)
return result
}
result := make(map[string]bool)
// Divide hashes into groups of 100
for i := 0; i < len(hashes); i += 100 {
@@ -91,13 +84,12 @@ func (tb *Torbox) IsAvailable(infohashes []string) map[string]bool {
return result
}
for h, cache := range *res.Data {
if cache.Size > 0 {
for h, c := range *res.Data {
if c.Size > 0 {
result[strings.ToUpper(h)] = true
}
}
}
tb.cache.AddMultiple(result) // Add the results to the cache
return result
}
@@ -149,17 +141,17 @@ func getTorboxStatus(status string, finished bool) string {
}
}
func (tb *Torbox) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
func (tb *Torbox) UpdateTorrent(t *torrent.Torrent) error {
url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", tb.Host, t.Id)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req)
if err != nil {
return t, err
return err
}
var res InfoResponse
err = json.Unmarshal(resp, &res)
if err != nil {
return t, err
return err
}
data := res.Data
name := data.Name
@@ -174,8 +166,6 @@ func (tb *Torbox) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
t.OriginalFilename = name
t.MountPath = tb.MountPath
t.Debrid = tb.Name
t.DownloadLinks = make(map[string]torrent.DownloadLinks)
files := make([]torrent.File, 0)
cfg := config.GetConfig()
for _, f := range data.Files {
fileName := filepath.Base(f.Name)
@@ -196,35 +186,32 @@ func (tb *Torbox) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
Size: f.Size,
Path: fileName,
}
files = append(files, file)
t.Files[fileName] = file
}
var cleanPath string
if len(files) > 0 {
if len(t.Files) > 0 {
cleanPath = path.Clean(data.Files[0].Name)
} else {
cleanPath = path.Clean(data.Name)
}
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
t.Files = files
//t.Debrid = tb
return t, nil
t.Debrid = tb.Name
return nil
}
func (tb *Torbox) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
for {
t, err := tb.GetTorrent(torrent)
err := tb.UpdateTorrent(torrent)
torrent = t
if err != nil || t == nil {
return t, err
if err != nil || torrent == nil {
return torrent, err
}
status := torrent.Status
if status == "downloaded" {
tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
if !isSymlink {
err = tb.GetDownloadLinks(torrent)
err = tb.GenerateDownloadLinks(torrent)
if err != nil {
return torrent, err
}
@@ -258,8 +245,7 @@ func (tb *Torbox) DeleteTorrent(torrent *torrent.Torrent) {
}
}
func (tb *Torbox) GetDownloadLinks(t *torrent.Torrent) error {
downloadLinks := make(map[string]torrent.DownloadLinks)
func (tb *Torbox) GenerateDownloadLinks(t *torrent.Torrent) error {
for _, file := range t.Files {
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
query := gourl.Values{}
@@ -279,21 +265,15 @@ func (tb *Torbox) GetDownloadLinks(t *torrent.Torrent) error {
if data.Data == nil {
return fmt.Errorf("error getting download links")
}
idx := 0
link := *data.Data
dl := torrent.DownloadLinks{
Link: link,
Filename: t.Files[idx].Name,
DownloadLink: link,
}
downloadLinks[file.Id] = dl
file.DownloadLink = link
file.Generated = time.Now()
t.Files[file.Name] = file
}
t.DownloadLinks = downloadLinks
return nil
}
func (tb *Torbox) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks {
func (tb *Torbox) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File {
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
query := gourl.Values{}
query.Add("torrent_id", t.Id)
@@ -313,11 +293,9 @@ func (tb *Torbox) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torre
return nil
}
link := *data.Data
return &torrent.DownloadLinks{
Link: file.Link,
Filename: file.Name,
DownloadLink: link,
}
file.DownloadLink = link
file.Generated = time.Now()
return file
}
func (tb *Torbox) GetDownloadingStatus() []string {
@@ -336,21 +314,32 @@ func (tb *Torbox) GetDownloadUncached() bool {
return tb.DownloadUncached
}
func New(dc config.Debrid, cache *cache.Cache) *Torbox {
func New(dc config.Debrid) *Torbox {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
client := request.NewRLHTTPClient(rl, headers)
_log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel)
client := request.New().
WithHeaders(headers).
WithRateLimiter(rl).WithLogger(_log)
return &Torbox{
Name: "torbox",
Host: dc.Host,
APIKey: dc.APIKey,
DownloadUncached: dc.DownloadUncached,
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
logger: _log,
CheckCached: dc.CheckCached,
}
}
func (tb *Torbox) ConvertLinksToFiles(links []string) []torrent.File {
return nil
}
func (tb *Torbox) GetDownloads() (map[string]torrent.DownloadLinks, error) {
return nil, nil
}
+31 -54
View File
@@ -2,34 +2,33 @@ package torrent
import (
"fmt"
"github.com/sirrobot01/debrid-blackhole/internal/cache"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"os"
"path/filepath"
"sync"
"time"
)
type Torrent struct {
Id string `json:"id"`
InfoHash string `json:"info_hash"`
Name string `json:"name"`
Folder string `json:"folder"`
Filename string `json:"filename"`
OriginalFilename string `json:"original_filename"`
Size int64 `json:"size"`
Bytes int64 `json:"bytes"` // Size of only the files that are downloaded
Magnet *utils.Magnet `json:"magnet"`
Files []File `json:"files"`
Status string `json:"status"`
Added string `json:"added"`
Progress float64 `json:"progress"`
Speed int64 `json:"speed"`
Seeders int `json:"seeders"`
Links []string `json:"links"`
DownloadLinks map[string]DownloadLinks `json:"download_links"`
MountPath string `json:"mount_path"`
Id string `json:"id"`
InfoHash string `json:"info_hash"`
Name string `json:"name"`
Folder string `json:"folder"`
Filename string `json:"filename"`
OriginalFilename string `json:"original_filename"`
Size int64 `json:"size"`
Bytes int64 `json:"bytes"` // Size of only the files that are downloaded
Magnet *utils.Magnet `json:"magnet"`
Files map[string]File `json:"files"`
Status string `json:"status"`
Added string `json:"added"`
Progress float64 `json:"progress"`
Speed int64 `json:"speed"`
Seeders int `json:"seeders"`
Links []string `json:"links"`
MountPath string `json:"mount_path"`
Debrid string `json:"debrid"`
@@ -40,9 +39,12 @@ type Torrent struct {
}
type DownloadLinks struct {
Filename string `json:"filename"`
Link string `json:"link"`
DownloadLink string `json:"download_link"`
Filename string `json:"filename"`
Link string `json:"link"`
DownloadLink string `json:"download_link"`
Generated time.Time `json:"generated"`
Size int64 `json:"size"`
Id string `json:"id"`
}
func (t *Torrent) GetSymlinkFolder(parent string) string {
@@ -69,11 +71,13 @@ func (t *Torrent) GetMountFolder(rClonePath string) (string, error) {
}
type File struct {
Id string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
Path string `json:"path"`
Link string `json:"link"`
Id string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
Path string `json:"path"`
Link string `json:"link"`
DownloadLink string `json:"download_link"`
Generated time.Time `json:"generated"`
}
func (t *Torrent) Cleanup(remove bool) {
@@ -93,30 +97,3 @@ func (t *Torrent) GetFile(id string) *File {
}
return nil
}
func GetLocalCache(infohashes []string, cache *cache.Cache) ([]string, map[string]bool) {
result := make(map[string]bool)
hashes := make([]string, 0)
if len(infohashes) == 0 {
return hashes, result
}
if len(infohashes) == 1 {
if cache.Exists(infohashes[0]) {
return hashes, map[string]bool{infohashes[0]: true}
}
return infohashes, result
}
cachedHashes := cache.GetMultiple(infohashes)
for _, h := range infohashes {
_, exists := cachedHashes[h]
if !exists {
hashes = append(hashes, h)
} else {
result[h] = true
}
}
return infohashes, result
}
+1 -1
View File
@@ -88,7 +88,7 @@ func NewProxy() *Proxy {
username: cfg.Username,
password: cfg.Password,
cachedOnly: cfg.CachedOnly,
logger: logger.NewLogger("proxy", cfg.LogLevel, os.Stdout),
logger: logger.NewLogger("proxy", cfg.LogLevel),
}
}
+8 -8
View File
@@ -52,7 +52,7 @@ Loop:
func (q *QBit) ProcessManualFile(torrent *Torrent) (string, error) {
debridTorrent := torrent.DebridTorrent
q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.DownloadLinks))
q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
torrentPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, utils.RemoveExtension(debridTorrent.OriginalFilename))
torrentPath = utils.RemoveInvalidChars(torrentPath)
err := os.MkdirAll(torrentPath, os.ModePerm)
@@ -103,21 +103,21 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
Transport: tr,
},
}
for _, link := range debridTorrent.DownloadLinks {
if link.DownloadLink == "" {
q.logger.Info().Msgf("No download link found for %s", link.Filename)
for _, file := range debridTorrent.Files {
if file.DownloadLink == "" {
q.logger.Info().Msgf("No download link found for %s", file.Name)
continue
}
wg.Add(1)
semaphore <- struct{}{}
go func(link debrid.DownloadLinks) {
go func(file debrid.File) {
defer wg.Done()
defer func() { <-semaphore }()
filename := link.Filename
filename := file.Link
err := Download(
client,
link.DownloadLink,
file.DownloadLink,
filepath.Join(parent, filename),
progressCallback,
)
@@ -127,7 +127,7 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
} else {
q.logger.Info().Msgf("Downloaded %s", filename)
}
}(link)
}(file)
}
wg.Wait()
q.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
+1 -1
View File
@@ -34,7 +34,7 @@ func New() *QBit {
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")),
logger: logger.NewLogger("qbit", _cfg.LogLevel, os.Stdout),
logger: logger.NewLogger("qbit", _cfg.LogLevel),
RefreshInterval: refreshInterval,
SkipPreCache: cfg.SkipPreCache,
}
+1 -1
View File
@@ -1,8 +1,8 @@
package qbit
import (
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"os"
"sort"
"sync"
+1 -1
View File
@@ -185,7 +185,7 @@ func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent
}
_db := service.GetDebrid().GetByName(debridTorrent.Debrid)
if debridTorrent.Status != "downloaded" {
debridTorrent, _ = _db.GetTorrent(debridTorrent)
_ = _db.UpdateTorrent(debridTorrent)
}
t = q.UpdateTorrentMin(t, debridTorrent)
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
+4 -4
View File
@@ -2,15 +2,15 @@ package repair
import (
"context"
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid"
"golang.org/x/sync/errgroup"
"net"
"net/http"
@@ -29,7 +29,7 @@ import (
type Repair struct {
Jobs map[string]*Job
arrs *arr.Storage
deb engine.Service
deb debrid.Client
duration time.Duration
runOnStart bool
ZurgURL string
@@ -47,7 +47,7 @@ func New(arrs *arr.Storage) *Repair {
}
r := &Repair{
arrs: arrs,
logger: logger.NewLogger("repair", cfg.LogLevel, os.Stdout),
logger: logger.NewLogger("repair", cfg.LogLevel),
duration: duration,
runOnStart: cfg.Repair.RunOnStart,
ZurgURL: cfg.Repair.ZurgURL,
+1 -1
View File
@@ -23,7 +23,7 @@ type Server struct {
func New() *Server {
cfg := config.GetConfig()
l := logger.NewLogger("http", cfg.LogLevel, os.Stdout)
l := logger.NewLogger("http", cfg.LogLevel)
r := chi.NewRouter()
r.Use(middleware.Recoverer)
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
+1 -1
View File
@@ -2,7 +2,7 @@ package server
import (
"cmp"
"encoding/json"
"github.com/goccy/go-json"
"github.com/sirrobot01/debrid-blackhole/pkg/service"
"net/http"
)
+6 -9
View File
@@ -3,17 +3,15 @@ package service
import (
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
"github.com/sirrobot01/debrid-blackhole/pkg/repair"
"sync"
)
type Service struct {
Repair *repair.Repair
Arr *arr.Storage
Debrid *engine.Engine
DebridCache *cache.Manager
Repair *repair.Repair
Arr *arr.Storage
Debrid *engine.Engine
}
var (
@@ -26,10 +24,9 @@ func New() *Service {
arrs := arr.NewStorage()
deb := debrid.New()
instance = &Service{
Repair: repair.New(arrs),
Arr: arrs,
Debrid: deb,
DebridCache: cache.NewManager(deb),
Repair: repair.New(arrs),
Arr: arrs,
Debrid: deb,
}
})
return instance
+2 -3
View File
@@ -2,8 +2,8 @@ package web
import (
"embed"
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"github.com/gorilla/sessions"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
@@ -14,7 +14,6 @@ import (
"golang.org/x/crypto/bcrypt"
"html/template"
"net/http"
"os"
"strings"
"github.com/go-chi/chi/v5"
@@ -64,7 +63,7 @@ func New(qbit *qbit.QBit) *Handler {
cfg := config.GetConfig()
return &Handler{
qbit: qbit,
logger: logger.NewLogger("ui", cfg.LogLevel, os.Stdout),
logger: logger.NewLogger("ui", cfg.LogLevel),
}
}
+542
View File
@@ -0,0 +1,542 @@
package webdav
import (
"bufio"
"context"
"fmt"
"github.com/dgraph-io/badger/v4"
"github.com/goccy/go-json"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid"
"os"
"path/filepath"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
)
type DownloadLinkCache struct {
Link string `json:"download_link"`
}
type CachedTorrent struct {
*torrent.Torrent
LastRead time.Time `json:"last_read"`
IsComplete bool `json:"is_complete"`
}
type Cache struct {
dir string
client debrid.Client
db *badger.DB
logger zerolog.Logger
torrents map[string]*CachedTorrent // key: torrent.Id, value: *CachedTorrent
torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent
listings atomic.Value
downloadLinks map[string]string // key: file.Link, value: download link
workers int
LastUpdated time.Time `json:"last_updated"`
// refresh mutex
torrentsRefreshMutex sync.Mutex // for refreshing torrents
downloadLinksRefreshMutex sync.Mutex // for refreshing download links
// Mutexes
torrentsMutex sync.RWMutex // for torrents and torrentsNames
downloadLinksMutex sync.Mutex
}
func (c *Cache) setTorrent(t *CachedTorrent) {
c.torrentsMutex.Lock()
defer c.torrentsMutex.Unlock()
c.torrents[t.Id] = t
c.torrentsNames[t.Name] = t
c.refreshListings()
go func() {
if err := c.SaveTorrent(t); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
}
}()
}
func (c *Cache) refreshListings() {
files := make([]os.FileInfo, 0, len(c.torrents))
now := time.Now()
for _, t := range c.torrents {
if t != nil && t.Torrent != nil {
files = append(files, &FileInfo{
name: t.Name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: now,
isDir: true,
})
}
}
// Atomic store of the complete ready-to-use slice
c.listings.Store(files)
}
func (c *Cache) GetListing() []os.FileInfo {
return c.listings.Load().([]os.FileInfo)
}
func (c *Cache) setTorrents(torrents []*CachedTorrent) {
c.torrentsMutex.Lock()
defer c.torrentsMutex.Unlock()
for _, t := range torrents {
c.torrents[t.Id] = t
c.torrentsNames[t.Name] = t
}
go c.refreshListings()
go func() {
if err := c.SaveTorrents(); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to save torrents")
}
}()
}
func (c *Cache) GetTorrents() map[string]*CachedTorrent {
c.torrentsMutex.RLock()
defer c.torrentsMutex.RUnlock()
result := make(map[string]*CachedTorrent, len(c.torrents))
for k, v := range c.torrents {
result[k] = v
}
return result
}
func (c *Cache) GetTorrentNames() map[string]*CachedTorrent {
c.torrentsMutex.RLock()
defer c.torrentsMutex.RUnlock()
return c.torrentsNames
}
type Manager struct {
caches map[string]*Cache
}
func NewCacheManager(clients []debrid.Client) *Manager {
m := &Manager{
caches: make(map[string]*Cache),
}
for _, client := range clients {
m.caches[client.GetName()] = NewCache(client)
}
return m
}
func (m *Manager) GetCaches() map[string]*Cache {
return m.caches
}
func (m *Manager) GetCache(debridName string) *Cache {
return m.caches[debridName]
}
func NewCache(client debrid.Client) *Cache {
dbPath := filepath.Join(config.GetConfig().Path, "cache", client.GetName())
return &Cache{
dir: dbPath,
torrents: make(map[string]*CachedTorrent),
torrentsNames: make(map[string]*CachedTorrent),
client: client,
logger: client.GetLogger(),
workers: 200,
downloadLinks: make(map[string]string),
}
}
func (c *Cache) Start() error {
if err := os.MkdirAll(c.dir, 0755); err != nil {
return fmt.Errorf("failed to create cache directory: %w", err)
}
if err := c.Sync(); err != nil {
return fmt.Errorf("failed to sync cache: %w", err)
}
// initial download links
go func() {
// lock download refresh mutex
c.downloadLinksRefreshMutex.Lock()
defer c.downloadLinksRefreshMutex.Unlock()
// This prevents the download links from being refreshed twice
c.refreshDownloadLinks()
}()
go func() {
err := c.Refresh()
if err != nil {
c.logger.Error().Err(err).Msg("Failed to start cache refresh worker")
}
}()
return nil
}
func (c *Cache) Close() error {
if c.db != nil {
return c.db.Close()
}
return nil
}
func (c *Cache) load() ([]*CachedTorrent, error) {
torrents := make([]*CachedTorrent, 0)
if err := os.MkdirAll(c.dir, 0755); err != nil {
return torrents, fmt.Errorf("failed to create cache directory: %w", err)
}
files, err := os.ReadDir(c.dir)
if err != nil {
return torrents, fmt.Errorf("failed to read cache directory: %w", err)
}
for _, file := range files {
if file.IsDir() || filepath.Ext(file.Name()) != ".json" {
continue
}
filePath := filepath.Join(c.dir, file.Name())
data, err := os.ReadFile(filePath)
if err != nil {
c.logger.Debug().Err(err).Msgf("Failed to read file: %s", filePath)
continue
}
var ct CachedTorrent
if err := json.Unmarshal(data, &ct); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to unmarshal file: %s", filePath)
continue
}
if len(ct.Files) != 0 {
// We can assume the torrent is complete
torrents = append(torrents, &ct)
}
}
return torrents, nil
}
func (c *Cache) GetTorrent(id string) *CachedTorrent {
c.torrentsMutex.RLock()
defer c.torrentsMutex.RUnlock()
if t, ok := c.torrents[id]; ok {
return t
}
return nil
}
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
if t, ok := c.GetTorrentNames()[name]; ok {
return t
}
return nil
}
func (c *Cache) SaveTorrents() error {
for _, ct := range c.GetTorrents() {
if err := c.SaveTorrent(ct); err != nil {
return err
}
}
return nil
}
func (c *Cache) SaveTorrent(ct *CachedTorrent) error {
data, err := json.MarshalIndent(ct, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal torrent: %w", err)
}
fileName := ct.Torrent.Id + ".json"
filePath := filepath.Join(c.dir, fileName)
tmpFile := filePath + ".tmp"
f, err := os.Create(tmpFile)
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
defer f.Close()
w := bufio.NewWriter(f)
if _, err := w.Write(data); err != nil {
return fmt.Errorf("failed to write data: %w", err)
}
if err := w.Flush(); err != nil {
return fmt.Errorf("failed to flush data: %w", err)
}
return os.Rename(tmpFile, filePath)
}
func (c *Cache) Sync() error {
cachedTorrents, err := c.load()
if err != nil {
c.logger.Debug().Err(err).Msg("Failed to load cache")
}
// Write these torrents to the cache
c.setTorrents(cachedTorrents)
c.logger.Info().Msgf("Loaded %d torrents from cache", len(cachedTorrents))
torrents, err := c.client.GetTorrents()
c.logger.Info().Msgf("Got %d torrents from %s", len(torrents), c.client.GetName())
if err != nil {
return fmt.Errorf("failed to sync torrents: %v", err)
}
mewTorrents := make([]*torrent.Torrent, 0)
for _, t := range torrents {
if _, ok := c.torrents[t.Id]; !ok {
mewTorrents = append(mewTorrents, t)
}
}
c.logger.Info().Msgf("Found %d new torrents", len(mewTorrents))
if len(mewTorrents) > 0 {
if err := c.sync(mewTorrents); err != nil {
return fmt.Errorf("failed to sync torrents: %v", err)
}
}
return nil
}
func (c *Cache) sync(torrents []*torrent.Torrent) error {
// Calculate optimal workers - balance between CPU and IO
workers := runtime.NumCPU() * 50 // A more balanced multiplier for BadgerDB
// Create channels with appropriate buffering
workChan := make(chan *torrent.Torrent, workers*2)
// Use an atomic counter for progress tracking
var processed int64
var errorCount int64
// Create a context with cancellation in case of critical errors
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create a wait group for workers
var wg sync.WaitGroup
// Start workers
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case t, ok := <-workChan:
if !ok {
return // Channel closed, exit goroutine
}
if err := c.processTorrent(t); err != nil {
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("sync error")
atomic.AddInt64(&errorCount, 1)
}
count := atomic.AddInt64(&processed, 1)
if count%1000 == 0 {
c.logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents))
}
case <-ctx.Done():
return // Context cancelled, exit goroutine
}
}
}()
}
// Feed work to workers
for _, t := range torrents {
select {
case workChan <- t:
// Work sent successfully
case <-ctx.Done():
break // Context cancelled
}
}
// Signal workers that no more work is coming
close(workChan)
// Wait for all workers to complete
wg.Wait()
c.logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount)
return nil
}
func (c *Cache) processTorrent(t *torrent.Torrent) error {
var err error
err = c.client.UpdateTorrent(t)
if err != nil {
return fmt.Errorf("failed to get torrent files: %v", err)
}
ct := &CachedTorrent{
Torrent: t,
LastRead: time.Now(),
IsComplete: len(t.Files) > 0,
}
c.setTorrent(ct)
return nil
}
func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string {
// Check link cache
if dl := c.checkDownloadLink(fileLink); dl != "" {
return dl
}
ct := c.GetTorrent(torrentId)
if ct == nil {
return ""
}
file := ct.Files[filename]
if file.Link == "" {
// file link is empty, refresh the torrent to get restricted links
if ct.IsComplete {
return ""
}
ct = c.refreshTorrent(ct) // Refresh the torrent from the debrid service
if ct == nil {
return ""
} else {
file = ct.Files[filename]
}
}
c.logger.Debug().Msgf("Getting download link for %s", ct.Name)
f := c.client.GetDownloadLink(ct.Torrent, &file)
if f == nil {
return ""
}
file.DownloadLink = f.DownloadLink
ct.Files[filename] = file
go c.updateDownloadLink(f)
go c.setTorrent(ct)
return f.DownloadLink
}
func (c *Cache) updateDownloadLink(file *torrent.File) {
c.downloadLinksMutex.Lock()
defer c.downloadLinksMutex.Unlock()
c.downloadLinks[file.Link] = file.DownloadLink
}
func (c *Cache) checkDownloadLink(link string) string {
if dl, ok := c.downloadLinks[link]; ok {
return dl
}
return ""
}
func (c *Cache) refreshTorrent(t *CachedTorrent) *CachedTorrent {
_torrent := t.Torrent
err := c.client.UpdateTorrent(_torrent)
if err != nil {
c.logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err)
return nil
}
if len(t.Files) == 0 {
return nil
}
ct := &CachedTorrent{
Torrent: _torrent,
LastRead: time.Now(),
IsComplete: len(t.Files) > 0,
}
c.setTorrent(ct)
return ct
}
func (c *Cache) refreshListingWorker() {
c.logger.Info().Msg("WebDAV Background Refresh Worker started")
refreshTicker := time.NewTicker(10 * time.Second)
defer refreshTicker.Stop()
for {
select {
case <-refreshTicker.C:
if c.torrentsRefreshMutex.TryLock() {
func() {
defer c.torrentsRefreshMutex.Unlock()
c.refreshListings()
}()
} else {
c.logger.Debug().Msg("Refresh already in progress")
}
}
}
}
func (c *Cache) refreshDownloadLinksWorker() {
c.logger.Info().Msg("WebDAV Background Refresh Download Worker started")
refreshTicker := time.NewTicker(40 * time.Minute)
defer refreshTicker.Stop()
for {
select {
case <-refreshTicker.C:
if c.downloadLinksRefreshMutex.TryLock() {
func() {
defer c.downloadLinksRefreshMutex.Unlock()
c.refreshDownloadLinks()
}()
} else {
c.logger.Debug().Msg("Refresh already in progress")
}
}
}
}
func (c *Cache) refreshDownloadLinks() map[string]string {
c.downloadLinksMutex.Lock()
defer c.downloadLinksMutex.Unlock()
downloadLinks, err := c.client.GetDownloads()
if err != nil {
c.logger.Debug().Err(err).Msg("Failed to get download links")
return nil
}
for k, v := range downloadLinks {
c.downloadLinks[k] = v.DownloadLink
}
c.logger.Info().Msgf("Refreshed %d download links", len(downloadLinks))
return c.downloadLinks
}
func (c *Cache) GetClient() debrid.Client {
return c.client
}
func (c *Cache) Refresh() error {
// For now, we just want to refresh the listing
go c.refreshListingWorker()
go c.refreshDownloadLinksWorker()
return nil
}
+100 -55
View File
@@ -2,60 +2,101 @@ package webdav
import (
"fmt"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
"io"
"net/http"
"os"
"time"
)
var sharedClient = &http.Client{
Transport: &http.Transport{
// These settings help maintain persistent connections.
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
DisableCompression: false,
DisableKeepAlives: false,
},
Timeout: 0,
}
type File struct {
cache *cache.Cache
cachedTorrent *cache.CachedTorrent
file *torrent.File
offset int64
isDir bool
children []os.FileInfo
reader io.ReadCloser
cache *Cache
fileId string
torrentId string
size int64
offset int64
isDir bool
children []os.FileInfo
reader io.ReadCloser
seekPending bool
content []byte
name string
downloadLink string
link string
}
// File interface implementations for File
func (f *File) Close() error {
if f.reader != nil {
f.reader.Close()
f.reader = nil
}
return nil
}
func (f *File) GetDownloadLink() string {
file := f.file
link, err := f.cache.GetFileDownloadLink(f.cachedTorrent, file)
if err != nil {
return ""
// Check if we already have a final URL cached
if f.downloadLink != "" {
return f.downloadLink
}
return link
downloadLink := f.cache.GetDownloadLink(f.torrentId, f.name, f.link)
if downloadLink != "" {
f.downloadLink = downloadLink
return downloadLink
}
return ""
}
func (f *File) Read(p []byte) (n int, err error) {
// Directories cannot be read as a byte stream.
if f.isDir {
return 0, os.ErrInvalid
}
// If we haven't started streaming the file yet, open the HTTP connection.
if f.reader == nil {
// Create an HTTP GET request to the file's URL.
// If file content is preloaded, read from memory.
if f.content != nil {
if f.offset >= int64(len(f.content)) {
return 0, io.EOF
}
n = copy(p, f.content[f.offset:])
f.offset += int64(n)
return n, nil
}
// If we haven't started streaming or a seek was requested,
// close the existing stream and start a new HTTP GET request.
if f.reader == nil || f.seekPending {
if f.reader != nil && f.seekPending {
f.reader.Close()
f.reader = nil
}
// Create a new HTTP GET request for the file's URL.
req, err := http.NewRequest("GET", f.GetDownloadLink(), nil)
if err != nil {
return 0, fmt.Errorf("failed to create HTTP request: %w", err)
}
// If we've already read some data (f.offset > 0), request only the remaining bytes.
// If we've already read some data, request only the remaining bytes.
if f.offset > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset))
}
// Execute the HTTP request.
resp, err := http.DefaultClient.Do(req)
resp, err := sharedClient.Do(req)
if err != nil {
return 0, fmt.Errorf("HTTP request error: %w", err)
}
@@ -68,6 +109,8 @@ func (f *File) Read(p []byte) (n int, err error) {
// Store the response body as our reader.
f.reader = resp.Body
// Reset the seek pending flag now that we've reinitialized the reader.
f.seekPending = false
}
// Read data from the HTTP stream.
@@ -88,27 +131,57 @@ func (f *File) Seek(offset int64, whence int) (int64, error) {
return 0, os.ErrInvalid
}
var newOffset int64
switch whence {
case io.SeekStart:
f.offset = offset
newOffset = offset
case io.SeekCurrent:
f.offset += offset
newOffset = f.offset + offset
case io.SeekEnd:
f.offset = f.file.Size - offset
newOffset = f.size - offset
default:
return 0, os.ErrInvalid
}
if f.offset < 0 {
f.offset = 0
if newOffset < 0 {
newOffset = 0
}
if f.offset > f.file.Size {
f.offset = f.file.Size
if newOffset > f.size {
newOffset = f.size
}
// If we're seeking to a new position, mark the reader for reset.
if newOffset != f.offset {
f.offset = newOffset
f.seekPending = true
}
return f.offset, nil
}
func (f *File) Stat() (os.FileInfo, error) {
if f.isDir {
return &FileInfo{
name: f.name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: time.Now(),
isDir: true,
}, nil
}
return &FileInfo{
name: f.name,
size: f.size,
mode: 0644,
modTime: time.Now(),
isDir: false,
}, nil
}
func (f *File) Write(p []byte) (n int, err error) {
return 0, os.ErrPermission
}
func (f *File) Readdir(count int) ([]os.FileInfo, error) {
if !f.isDir {
return nil, os.ErrInvalid
@@ -130,31 +203,3 @@ func (f *File) Readdir(count int) ([]os.FileInfo, error) {
f.children = f.children[count:]
return files, nil
}
func (f *File) Stat() (os.FileInfo, error) {
if f.isDir {
name := "/"
if f.cachedTorrent != nil {
name = f.cachedTorrent.Name
}
return &FileInfo{
name: name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: time.Now(),
isDir: true,
}, nil
}
return &FileInfo{
name: f.file.Name,
size: f.file.Size,
mode: 0644,
modTime: time.Now(),
isDir: false,
}, nil
}
func (f *File) Write(p []byte) (n int, err error) {
return 0, os.ErrPermission
}
+327 -115
View File
@@ -1,100 +1,48 @@
package webdav
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
"golang.org/x/net/webdav"
"html/template"
"io"
"net"
"net/http"
"net/http/httptest"
"os"
"path"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
)
type Handler struct {
Name string
logger zerolog.Logger
cache *cache.Cache
rootListing atomic.Value
lastRefresh time.Time
refreshMutex sync.Mutex
RootPath string
Name string
logger zerolog.Logger
cache *Cache
lastRefresh time.Time
refreshMutex sync.Mutex
RootPath string
responseCache sync.Map
cacheTTL time.Duration
ctx context.Context
}
func NewHandler(name string, cache *cache.Cache, logger zerolog.Logger) *Handler {
func NewHandler(name string, cache *Cache, logger zerolog.Logger) *Handler {
h := &Handler{
Name: name,
cache: cache,
logger: logger,
RootPath: fmt.Sprintf("/%s", name),
ctx: context.Background(),
}
h.refreshRootListing()
// Start background refresh
go h.backgroundRefresh()
return h
}
func (h *Handler) backgroundRefresh() {
ticker := time.NewTicker(5 * time.Minute)
defer ticker.Stop()
for range ticker.C {
h.refreshRootListing()
}
}
func (h *Handler) refreshRootListing() {
h.refreshMutex.Lock()
defer h.refreshMutex.Unlock()
if time.Since(h.lastRefresh) < time.Minute {
return
}
torrents := h.cache.GetTorrentNames()
files := make([]os.FileInfo, 0, len(torrents))
for name, cachedTorrent := range torrents {
if cachedTorrent != nil && cachedTorrent.Torrent != nil {
files = append(files, &FileInfo{
name: name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: time.Now(),
isDir: true,
})
}
}
sort.Slice(files, func(i, j int) bool {
return files[i].Name() < files[j].Name()
})
h.rootListing.Store(files)
h.lastRefresh = time.Now()
}
func (h *Handler) getParentRootPath() string {
return fmt.Sprintf("/webdav/%s", h.Name)
}
func (h *Handler) getRootFileInfos() []os.FileInfo {
if listing := h.rootListing.Load(); listing != nil {
return listing.([]os.FileInfo)
}
return []os.FileInfo{}
}
// Mkdir implements webdav.FileSystem
func (h *Handler) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
return os.ErrPermission // Read-only filesystem
@@ -102,7 +50,27 @@ func (h *Handler) Mkdir(ctx context.Context, name string, perm os.FileMode) erro
// RemoveAll implements webdav.FileSystem
func (h *Handler) RemoveAll(ctx context.Context, name string) error {
return os.ErrPermission // Read-only filesystem
name = path.Clean("/" + name)
rootDir := h.getParentRootPath()
if name == rootDir {
return os.ErrPermission
}
torrentName, filename := getName(rootDir, name)
cachedTorrent := h.cache.GetTorrentByName(torrentName)
if cachedTorrent == nil {
return os.ErrNotExist
}
if filename == "" {
h.cache.GetClient().DeleteTorrent(cachedTorrent.Torrent)
go h.cache.refreshListings()
return nil
}
return os.ErrPermission
}
// Rename implements webdav.FileSystem
@@ -110,55 +78,124 @@ func (h *Handler) Rename(ctx context.Context, oldName, newName string) error {
return os.ErrPermission // Read-only filesystem
}
func (h *Handler) getParentRootPath() string {
return fmt.Sprintf("/webdav/%s", h.Name)
}
func (h *Handler) getTorrentsFolders() []os.FileInfo {
return h.cache.GetListing()
}
func (h *Handler) getParentFiles() []os.FileInfo {
now := time.Now()
rootFiles := []os.FileInfo{
&FileInfo{
name: "__all__",
size: 0,
mode: 0755 | os.ModeDir,
modTime: now,
isDir: true,
},
&FileInfo{
name: "torrents",
size: 0,
mode: 0755 | os.ModeDir,
modTime: now,
isDir: true,
},
&FileInfo{
name: "version.txt",
size: int64(len("v1.0.0")),
mode: 0644,
modTime: now,
isDir: false,
},
}
return rootFiles
}
func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
name = path.Clean("/" + name)
rootDir := h.getParentRootPath()
// Fast path for root directory
if name == h.getParentRootPath() {
// Fast path optimization with a map lookup instead of string comparisons
switch name {
case rootDir:
return &File{
cache: h.cache,
isDir: true,
children: h.getRootFileInfos(),
children: h.getParentFiles(),
name: "/",
}, nil
}
// Remove root directory from path
name = strings.TrimPrefix(name, h.getParentRootPath())
name = strings.TrimPrefix(name, "/")
parts := strings.SplitN(name, "/", 2)
// Get torrent from cache using sync.Map
cachedTorrent := h.cache.GetTorrentByName(parts[0])
if cachedTorrent == nil {
h.logger.Debug().Msgf("Torrent not found: %s", parts[0])
return nil, os.ErrNotExist
}
if len(parts) == 1 {
case path.Join(rootDir, "version.txt"):
return &File{
cache: h.cache,
cachedTorrent: cachedTorrent,
isDir: true,
children: h.getTorrentFileInfos(cachedTorrent.Torrent),
cache: h.cache,
isDir: false,
content: []byte("v1.0.0"),
name: "version.txt",
size: int64(len("v1.0.0")),
}, nil
}
// Use a map for faster file lookup
fileMap := make(map[string]*torrent.File, len(cachedTorrent.Torrent.Files))
for i := range cachedTorrent.Torrent.Files {
fileMap[cachedTorrent.Torrent.Files[i].Name] = &cachedTorrent.Torrent.Files[i]
}
// Single check for top-level folders
if name == path.Join(rootDir, "__all__") || name == path.Join(rootDir, "torrents") {
folderName := strings.TrimPrefix(name, rootDir)
folderName = strings.TrimPrefix(folderName, "/")
// Only fetch the torrent folders once
children := h.getTorrentsFolders()
if file, ok := fileMap[parts[1]]; ok {
return &File{
cache: h.cache,
cachedTorrent: cachedTorrent,
file: file,
isDir: false,
cache: h.cache,
isDir: true,
children: children,
name: folderName,
size: 0,
}, nil
}
h.logger.Debug().Msgf("File not found: %s", name)
_path := strings.TrimPrefix(name, rootDir)
parts := strings.Split(strings.TrimPrefix(_path, "/"), "/")
if len(parts) >= 2 && (parts[0] == "__all__" || parts[0] == "torrents") {
torrentName := parts[1]
cachedTorrent := h.cache.GetTorrentByName(torrentName)
if cachedTorrent == nil {
h.logger.Debug().Msgf("Torrent not found: %s", torrentName)
return nil, os.ErrNotExist
}
if len(parts) == 2 {
// Torrent folder level
return &File{
cache: h.cache,
torrentId: cachedTorrent.Id,
isDir: true,
children: h.getFileInfos(cachedTorrent.Torrent),
name: cachedTorrent.Name,
size: cachedTorrent.Size,
}, nil
}
// Torrent file level
filename := strings.Join(parts[2:], "/")
if file, ok := cachedTorrent.Files[filename]; ok {
fi := &File{
cache: h.cache,
torrentId: cachedTorrent.Id,
fileId: file.Id,
isDir: false,
name: file.Name,
size: file.Size,
link: file.Link,
downloadLink: file.DownloadLink,
}
return fi, nil
}
}
h.logger.Info().Msgf("File not found: %s", name)
return nil, os.ErrNotExist
}
@@ -171,14 +208,15 @@ func (h *Handler) Stat(ctx context.Context, name string) (os.FileInfo, error) {
return f.Stat()
}
func (h *Handler) getTorrentFileInfos(torrent *torrent.Torrent) []os.FileInfo {
func (h *Handler) getFileInfos(torrent *torrent.Torrent) []os.FileInfo {
files := make([]os.FileInfo, 0, len(torrent.Files))
now := time.Now()
for _, file := range torrent.Files {
files = append(files, &FileInfo{
name: file.Name,
size: file.Size,
mode: 0644,
modTime: time.Now(),
modTime: now,
isDir: false,
})
}
@@ -186,13 +224,124 @@ func (h *Handler) getTorrentFileInfos(torrent *torrent.Torrent) []os.FileInfo {
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Handle OPTIONS
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
// Create WebDAV handler
//Add specific PROPFIND optimization
if r.Method == "PROPFIND" {
propfindStart := time.Now()
// Check if this is the slow path we identified
if strings.Contains(r.URL.Path, "__all__") {
// Fast path for this specific directory
depth := r.Header.Get("Depth")
if depth == "1" || depth == "" {
// This is a listing request
// Use a cached response if available
cachedKey := "propfind_" + r.URL.Path
if cachedResponse, ok := h.responseCache.Load(cachedKey); ok {
responseData := cachedResponse.([]byte)
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(responseData)))
w.Write(responseData)
return
}
// Otherwise process normally but cache the result
responseRecorder := httptest.NewRecorder()
// Process the request with the standard handler
handler := &webdav.Handler{
FileSystem: h,
LockSystem: webdav.NewMemLS(),
Logger: func(r *http.Request, err error) {
if err != nil {
h.logger.Error().Err(err).Msg("WebDAV error")
}
},
}
handler.ServeHTTP(responseRecorder, r)
// Cache the response for future requests
responseData := responseRecorder.Body.Bytes()
h.responseCache.Store(cachedKey, responseData)
// Send to the real client
for k, v := range responseRecorder.Header() {
w.Header()[k] = v
}
w.WriteHeader(responseRecorder.Code)
w.Write(responseData)
return
}
}
h.logger.Debug().
Dur("propfind_prepare", time.Since(propfindStart)).
Msg("Proceeding with standard PROPFIND")
}
// Check if this is a GET request for a file
if r.Method == "GET" {
openStart := time.Now()
f, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
if err != nil {
h.logger.Debug().Err(err).Str("path", r.URL.Path).Msg("Failed to open file")
http.NotFound(w, r)
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
h.logger.Error().Err(err).Msg("Failed to stat file")
http.Error(w, "Server Error", http.StatusInternalServerError)
return
}
if fi.IsDir() {
dirStart := time.Now()
h.serveDirectory(w, r, f)
h.logger.Info().
Dur("directory_time", time.Since(dirStart)).
Msg("Directory served")
return
}
// For file requests, use http.ServeContent.
// Ensure f implements io.ReadSeeker.
rs, ok := f.(io.ReadSeeker)
if !ok {
// If not, read the entire file into memory as a fallback.
buf, err := io.ReadAll(f)
if err != nil {
h.logger.Error().Err(err).Msg("Failed to read file content")
http.Error(w, "Server Error", http.StatusInternalServerError)
return
}
rs = bytes.NewReader(buf)
}
// Set Content-Type based on file name.
fileName := fi.Name()
contentType := getContentType(fileName)
w.Header().Set("Content-Type", contentType)
// Serve the file with the correct modification time.
// http.ServeContent automatically handles Range requests.
http.ServeContent(w, r, fileName, fi.ModTime(), rs)
h.logger.Info().
Dur("open_attempt_time", time.Since(openStart)).
Msg("Served file using ServeContent")
return
}
// Default to standard WebDAV handler for other requests
handler := &webdav.Handler{
FileSystem: h,
LockSystem: webdav.NewMemLS(),
@@ -207,19 +356,34 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
},
}
// Special handling for GET requests on directories
if r.Method == "GET" {
if f, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0); err == nil {
if fi, err := f.Stat(); err == nil && fi.IsDir() {
h.serveDirectory(w, r, f)
return
}
f.Close()
}
}
handler.ServeHTTP(w, r)
}
func getContentType(fileName string) string {
contentType := "application/octet-stream"
// Determine content type based on file extension
switch {
case strings.HasSuffix(fileName, ".mp4"):
contentType = "video/mp4"
case strings.HasSuffix(fileName, ".mkv"):
contentType = "video/x-matroska"
case strings.HasSuffix(fileName, ".avi"):
contentType = "video/x-msvideo"
case strings.HasSuffix(fileName, ".mov"):
contentType = "video/quicktime"
case strings.HasSuffix(fileName, ".m4v"):
contentType = "video/x-m4v"
case strings.HasSuffix(fileName, ".ts"):
contentType = "video/mp2t"
case strings.HasSuffix(fileName, ".srt"):
contentType = "application/x-subrip"
case strings.HasSuffix(fileName, ".vtt"):
contentType = "text/vtt"
}
return contentType
}
func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file webdav.File) {
var children []os.FileInfo
if f, ok := file.(*File); ok {
@@ -266,3 +430,51 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we
return
}
}
func (h *Handler) ioCopy(reader io.Reader, w io.Writer) (int64, error) {
// Start with a smaller initial buffer for faster first byte time
buffer := make([]byte, 8*1024) // 8KB initial buffer
written := int64(0)
// First chunk needs to be delivered ASAP
firstChunk := true
for {
n, err := reader.Read(buffer)
if n > 0 {
nw, ew := w.Write(buffer[:n])
if ew != nil {
var opErr *net.OpError
if errors.As(ew, &opErr) && opErr.Err.Error() == "write: broken pipe" {
h.logger.Debug().Msg("Client closed connection (normal for streaming)")
}
break
}
written += int64(nw)
// Flush immediately after first chunk, then less frequently
if firstChunk {
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
firstChunk = false
// Increase buffer size after first chunk
buffer = make([]byte, 64*1024) // 512KB for subsequent reads
} else if written%(2*1024*1024) < int64(n) { // Flush every 2MB
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
}
}
if err != nil {
if err != io.EOF {
h.logger.Error().Err(err).Msg("Error reading from file")
}
break
}
}
return written, nil
}
+14
View File
@@ -0,0 +1,14 @@
package webdav
import "strings"
// getName: Returns the torrent name and filename from the path
// /webdav/alldebrid/__all__/TorrentName
func getName(rootDir, path string) (string, string) {
path = strings.TrimPrefix(path, rootDir)
parts := strings.Split(strings.TrimPrefix(path, "/"), "/")
if len(parts) < 2 {
return "", ""
}
return parts[0], strings.Join(parts[1:], "/")
}
+6 -4
View File
@@ -9,7 +9,6 @@ import (
"github.com/sirrobot01/debrid-blackhole/pkg/service"
"html/template"
"net/http"
"os"
"sync"
)
@@ -23,8 +22,10 @@ func New() *WebDav {
w := &WebDav{
Handlers: make([]*Handler, 0),
}
for name, c := range svc.DebridCache.GetCaches() {
h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name), cfg.LogLevel, os.Stdout))
debrids := svc.Debrid.GetDebrids()
cacheManager := NewCacheManager(debrids)
for name, c := range cacheManager.GetCaches() {
h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name), cfg.LogLevel))
w.Handlers = append(w.Handlers, h)
}
return w
@@ -33,7 +34,7 @@ func New() *WebDav {
func (wd *WebDav) Routes() http.Handler {
chi.RegisterMethod("PROPFIND")
chi.RegisterMethod("PROPPATCH")
chi.RegisterMethod("MKCOL") // Note: it was "MKOL" in your example, should be "MKCOL"
chi.RegisterMethod("MKCOL")
chi.RegisterMethod("COPY")
chi.RegisterMethod("MOVE")
chi.RegisterMethod("LOCK")
@@ -97,6 +98,7 @@ func (wd *WebDav) setupRootHandler(r chi.Router) {
func (wd *WebDav) commonMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("DAV", "1, 2")
w.Header().Set("Cache-Control", "max-age=3600")
w.Header().Set("Allow", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK")
+1 -2
View File
@@ -6,7 +6,6 @@ import (
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/pkg/service"
"os"
"sync"
"time"
)
@@ -20,7 +19,7 @@ func getLogger() zerolog.Logger {
once.Do(func() {
cfg := config.GetConfig()
_logInstance = logger.NewLogger("worker", cfg.LogLevel, os.Stdout)
_logInstance = logger.NewLogger("worker", cfg.LogLevel)
})
return _logInstance
}