initializing webdav server

This commit is contained in:
Mukhtar Akere
2025-03-18 10:02:10 +01:00
parent fa469c64c6
commit 5d2fabe20b
39 changed files with 1650 additions and 1141 deletions

542
pkg/webdav/cache.go Normal file
View File

@@ -0,0 +1,542 @@
package webdav
import (
"bufio"
"context"
"fmt"
"github.com/dgraph-io/badger/v4"
"github.com/goccy/go-json"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid"
"os"
"path/filepath"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
)
type DownloadLinkCache struct {
Link string `json:"download_link"`
}
type CachedTorrent struct {
*torrent.Torrent
LastRead time.Time `json:"last_read"`
IsComplete bool `json:"is_complete"`
}
type Cache struct {
dir string
client debrid.Client
db *badger.DB
logger zerolog.Logger
torrents map[string]*CachedTorrent // key: torrent.Id, value: *CachedTorrent
torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent
listings atomic.Value
downloadLinks map[string]string // key: file.Link, value: download link
workers int
LastUpdated time.Time `json:"last_updated"`
// refresh mutex
torrentsRefreshMutex sync.Mutex // for refreshing torrents
downloadLinksRefreshMutex sync.Mutex // for refreshing download links
// Mutexes
torrentsMutex sync.RWMutex // for torrents and torrentsNames
downloadLinksMutex sync.Mutex
}
func (c *Cache) setTorrent(t *CachedTorrent) {
c.torrentsMutex.Lock()
defer c.torrentsMutex.Unlock()
c.torrents[t.Id] = t
c.torrentsNames[t.Name] = t
c.refreshListings()
go func() {
if err := c.SaveTorrent(t); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
}
}()
}
func (c *Cache) refreshListings() {
files := make([]os.FileInfo, 0, len(c.torrents))
now := time.Now()
for _, t := range c.torrents {
if t != nil && t.Torrent != nil {
files = append(files, &FileInfo{
name: t.Name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: now,
isDir: true,
})
}
}
// Atomic store of the complete ready-to-use slice
c.listings.Store(files)
}
func (c *Cache) GetListing() []os.FileInfo {
return c.listings.Load().([]os.FileInfo)
}
func (c *Cache) setTorrents(torrents []*CachedTorrent) {
c.torrentsMutex.Lock()
defer c.torrentsMutex.Unlock()
for _, t := range torrents {
c.torrents[t.Id] = t
c.torrentsNames[t.Name] = t
}
go c.refreshListings()
go func() {
if err := c.SaveTorrents(); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to save torrents")
}
}()
}
func (c *Cache) GetTorrents() map[string]*CachedTorrent {
c.torrentsMutex.RLock()
defer c.torrentsMutex.RUnlock()
result := make(map[string]*CachedTorrent, len(c.torrents))
for k, v := range c.torrents {
result[k] = v
}
return result
}
func (c *Cache) GetTorrentNames() map[string]*CachedTorrent {
c.torrentsMutex.RLock()
defer c.torrentsMutex.RUnlock()
return c.torrentsNames
}
type Manager struct {
caches map[string]*Cache
}
func NewCacheManager(clients []debrid.Client) *Manager {
m := &Manager{
caches: make(map[string]*Cache),
}
for _, client := range clients {
m.caches[client.GetName()] = NewCache(client)
}
return m
}
func (m *Manager) GetCaches() map[string]*Cache {
return m.caches
}
func (m *Manager) GetCache(debridName string) *Cache {
return m.caches[debridName]
}
func NewCache(client debrid.Client) *Cache {
dbPath := filepath.Join(config.GetConfig().Path, "cache", client.GetName())
return &Cache{
dir: dbPath,
torrents: make(map[string]*CachedTorrent),
torrentsNames: make(map[string]*CachedTorrent),
client: client,
logger: client.GetLogger(),
workers: 200,
downloadLinks: make(map[string]string),
}
}
func (c *Cache) Start() error {
if err := os.MkdirAll(c.dir, 0755); err != nil {
return fmt.Errorf("failed to create cache directory: %w", err)
}
if err := c.Sync(); err != nil {
return fmt.Errorf("failed to sync cache: %w", err)
}
// initial download links
go func() {
// lock download refresh mutex
c.downloadLinksRefreshMutex.Lock()
defer c.downloadLinksRefreshMutex.Unlock()
// This prevents the download links from being refreshed twice
c.refreshDownloadLinks()
}()
go func() {
err := c.Refresh()
if err != nil {
c.logger.Error().Err(err).Msg("Failed to start cache refresh worker")
}
}()
return nil
}
func (c *Cache) Close() error {
if c.db != nil {
return c.db.Close()
}
return nil
}
func (c *Cache) load() ([]*CachedTorrent, error) {
torrents := make([]*CachedTorrent, 0)
if err := os.MkdirAll(c.dir, 0755); err != nil {
return torrents, fmt.Errorf("failed to create cache directory: %w", err)
}
files, err := os.ReadDir(c.dir)
if err != nil {
return torrents, fmt.Errorf("failed to read cache directory: %w", err)
}
for _, file := range files {
if file.IsDir() || filepath.Ext(file.Name()) != ".json" {
continue
}
filePath := filepath.Join(c.dir, file.Name())
data, err := os.ReadFile(filePath)
if err != nil {
c.logger.Debug().Err(err).Msgf("Failed to read file: %s", filePath)
continue
}
var ct CachedTorrent
if err := json.Unmarshal(data, &ct); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to unmarshal file: %s", filePath)
continue
}
if len(ct.Files) != 0 {
// We can assume the torrent is complete
torrents = append(torrents, &ct)
}
}
return torrents, nil
}
func (c *Cache) GetTorrent(id string) *CachedTorrent {
c.torrentsMutex.RLock()
defer c.torrentsMutex.RUnlock()
if t, ok := c.torrents[id]; ok {
return t
}
return nil
}
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
if t, ok := c.GetTorrentNames()[name]; ok {
return t
}
return nil
}
func (c *Cache) SaveTorrents() error {
for _, ct := range c.GetTorrents() {
if err := c.SaveTorrent(ct); err != nil {
return err
}
}
return nil
}
func (c *Cache) SaveTorrent(ct *CachedTorrent) error {
data, err := json.MarshalIndent(ct, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal torrent: %w", err)
}
fileName := ct.Torrent.Id + ".json"
filePath := filepath.Join(c.dir, fileName)
tmpFile := filePath + ".tmp"
f, err := os.Create(tmpFile)
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
defer f.Close()
w := bufio.NewWriter(f)
if _, err := w.Write(data); err != nil {
return fmt.Errorf("failed to write data: %w", err)
}
if err := w.Flush(); err != nil {
return fmt.Errorf("failed to flush data: %w", err)
}
return os.Rename(tmpFile, filePath)
}
func (c *Cache) Sync() error {
cachedTorrents, err := c.load()
if err != nil {
c.logger.Debug().Err(err).Msg("Failed to load cache")
}
// Write these torrents to the cache
c.setTorrents(cachedTorrents)
c.logger.Info().Msgf("Loaded %d torrents from cache", len(cachedTorrents))
torrents, err := c.client.GetTorrents()
c.logger.Info().Msgf("Got %d torrents from %s", len(torrents), c.client.GetName())
if err != nil {
return fmt.Errorf("failed to sync torrents: %v", err)
}
mewTorrents := make([]*torrent.Torrent, 0)
for _, t := range torrents {
if _, ok := c.torrents[t.Id]; !ok {
mewTorrents = append(mewTorrents, t)
}
}
c.logger.Info().Msgf("Found %d new torrents", len(mewTorrents))
if len(mewTorrents) > 0 {
if err := c.sync(mewTorrents); err != nil {
return fmt.Errorf("failed to sync torrents: %v", err)
}
}
return nil
}
func (c *Cache) sync(torrents []*torrent.Torrent) error {
// Calculate optimal workers - balance between CPU and IO
workers := runtime.NumCPU() * 50 // A more balanced multiplier for BadgerDB
// Create channels with appropriate buffering
workChan := make(chan *torrent.Torrent, workers*2)
// Use an atomic counter for progress tracking
var processed int64
var errorCount int64
// Create a context with cancellation in case of critical errors
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create a wait group for workers
var wg sync.WaitGroup
// Start workers
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case t, ok := <-workChan:
if !ok {
return // Channel closed, exit goroutine
}
if err := c.processTorrent(t); err != nil {
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("sync error")
atomic.AddInt64(&errorCount, 1)
}
count := atomic.AddInt64(&processed, 1)
if count%1000 == 0 {
c.logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents))
}
case <-ctx.Done():
return // Context cancelled, exit goroutine
}
}
}()
}
// Feed work to workers
for _, t := range torrents {
select {
case workChan <- t:
// Work sent successfully
case <-ctx.Done():
break // Context cancelled
}
}
// Signal workers that no more work is coming
close(workChan)
// Wait for all workers to complete
wg.Wait()
c.logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount)
return nil
}
func (c *Cache) processTorrent(t *torrent.Torrent) error {
var err error
err = c.client.UpdateTorrent(t)
if err != nil {
return fmt.Errorf("failed to get torrent files: %v", err)
}
ct := &CachedTorrent{
Torrent: t,
LastRead: time.Now(),
IsComplete: len(t.Files) > 0,
}
c.setTorrent(ct)
return nil
}
func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string {
// Check link cache
if dl := c.checkDownloadLink(fileLink); dl != "" {
return dl
}
ct := c.GetTorrent(torrentId)
if ct == nil {
return ""
}
file := ct.Files[filename]
if file.Link == "" {
// file link is empty, refresh the torrent to get restricted links
if ct.IsComplete {
return ""
}
ct = c.refreshTorrent(ct) // Refresh the torrent from the debrid service
if ct == nil {
return ""
} else {
file = ct.Files[filename]
}
}
c.logger.Debug().Msgf("Getting download link for %s", ct.Name)
f := c.client.GetDownloadLink(ct.Torrent, &file)
if f == nil {
return ""
}
file.DownloadLink = f.DownloadLink
ct.Files[filename] = file
go c.updateDownloadLink(f)
go c.setTorrent(ct)
return f.DownloadLink
}
func (c *Cache) updateDownloadLink(file *torrent.File) {
c.downloadLinksMutex.Lock()
defer c.downloadLinksMutex.Unlock()
c.downloadLinks[file.Link] = file.DownloadLink
}
func (c *Cache) checkDownloadLink(link string) string {
if dl, ok := c.downloadLinks[link]; ok {
return dl
}
return ""
}
func (c *Cache) refreshTorrent(t *CachedTorrent) *CachedTorrent {
_torrent := t.Torrent
err := c.client.UpdateTorrent(_torrent)
if err != nil {
c.logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err)
return nil
}
if len(t.Files) == 0 {
return nil
}
ct := &CachedTorrent{
Torrent: _torrent,
LastRead: time.Now(),
IsComplete: len(t.Files) > 0,
}
c.setTorrent(ct)
return ct
}
func (c *Cache) refreshListingWorker() {
c.logger.Info().Msg("WebDAV Background Refresh Worker started")
refreshTicker := time.NewTicker(10 * time.Second)
defer refreshTicker.Stop()
for {
select {
case <-refreshTicker.C:
if c.torrentsRefreshMutex.TryLock() {
func() {
defer c.torrentsRefreshMutex.Unlock()
c.refreshListings()
}()
} else {
c.logger.Debug().Msg("Refresh already in progress")
}
}
}
}
func (c *Cache) refreshDownloadLinksWorker() {
c.logger.Info().Msg("WebDAV Background Refresh Download Worker started")
refreshTicker := time.NewTicker(40 * time.Minute)
defer refreshTicker.Stop()
for {
select {
case <-refreshTicker.C:
if c.downloadLinksRefreshMutex.TryLock() {
func() {
defer c.downloadLinksRefreshMutex.Unlock()
c.refreshDownloadLinks()
}()
} else {
c.logger.Debug().Msg("Refresh already in progress")
}
}
}
}
func (c *Cache) refreshDownloadLinks() map[string]string {
c.downloadLinksMutex.Lock()
defer c.downloadLinksMutex.Unlock()
downloadLinks, err := c.client.GetDownloads()
if err != nil {
c.logger.Debug().Err(err).Msg("Failed to get download links")
return nil
}
for k, v := range downloadLinks {
c.downloadLinks[k] = v.DownloadLink
}
c.logger.Info().Msgf("Refreshed %d download links", len(downloadLinks))
return c.downloadLinks
}
func (c *Cache) GetClient() debrid.Client {
return c.client
}
func (c *Cache) Refresh() error {
// For now, we just want to refresh the listing
go c.refreshListingWorker()
go c.refreshDownloadLinksWorker()
return nil
}

View File

@@ -2,60 +2,101 @@ package webdav
import (
"fmt"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
"io"
"net/http"
"os"
"time"
)
var sharedClient = &http.Client{
Transport: &http.Transport{
// These settings help maintain persistent connections.
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
DisableCompression: false,
DisableKeepAlives: false,
},
Timeout: 0,
}
type File struct {
cache *cache.Cache
cachedTorrent *cache.CachedTorrent
file *torrent.File
offset int64
isDir bool
children []os.FileInfo
reader io.ReadCloser
cache *Cache
fileId string
torrentId string
size int64
offset int64
isDir bool
children []os.FileInfo
reader io.ReadCloser
seekPending bool
content []byte
name string
downloadLink string
link string
}
// File interface implementations for File
func (f *File) Close() error {
if f.reader != nil {
f.reader.Close()
f.reader = nil
}
return nil
}
func (f *File) GetDownloadLink() string {
file := f.file
link, err := f.cache.GetFileDownloadLink(f.cachedTorrent, file)
if err != nil {
return ""
// Check if we already have a final URL cached
if f.downloadLink != "" {
return f.downloadLink
}
return link
downloadLink := f.cache.GetDownloadLink(f.torrentId, f.name, f.link)
if downloadLink != "" {
f.downloadLink = downloadLink
return downloadLink
}
return ""
}
func (f *File) Read(p []byte) (n int, err error) {
// Directories cannot be read as a byte stream.
if f.isDir {
return 0, os.ErrInvalid
}
// If we haven't started streaming the file yet, open the HTTP connection.
if f.reader == nil {
// Create an HTTP GET request to the file's URL.
// If file content is preloaded, read from memory.
if f.content != nil {
if f.offset >= int64(len(f.content)) {
return 0, io.EOF
}
n = copy(p, f.content[f.offset:])
f.offset += int64(n)
return n, nil
}
// If we haven't started streaming or a seek was requested,
// close the existing stream and start a new HTTP GET request.
if f.reader == nil || f.seekPending {
if f.reader != nil && f.seekPending {
f.reader.Close()
f.reader = nil
}
// Create a new HTTP GET request for the file's URL.
req, err := http.NewRequest("GET", f.GetDownloadLink(), nil)
if err != nil {
return 0, fmt.Errorf("failed to create HTTP request: %w", err)
}
// If we've already read some data (f.offset > 0), request only the remaining bytes.
// If we've already read some data, request only the remaining bytes.
if f.offset > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset))
}
// Execute the HTTP request.
resp, err := http.DefaultClient.Do(req)
resp, err := sharedClient.Do(req)
if err != nil {
return 0, fmt.Errorf("HTTP request error: %w", err)
}
@@ -68,6 +109,8 @@ func (f *File) Read(p []byte) (n int, err error) {
// Store the response body as our reader.
f.reader = resp.Body
// Reset the seek pending flag now that we've reinitialized the reader.
f.seekPending = false
}
// Read data from the HTTP stream.
@@ -88,27 +131,57 @@ func (f *File) Seek(offset int64, whence int) (int64, error) {
return 0, os.ErrInvalid
}
var newOffset int64
switch whence {
case io.SeekStart:
f.offset = offset
newOffset = offset
case io.SeekCurrent:
f.offset += offset
newOffset = f.offset + offset
case io.SeekEnd:
f.offset = f.file.Size - offset
newOffset = f.size - offset
default:
return 0, os.ErrInvalid
}
if f.offset < 0 {
f.offset = 0
if newOffset < 0 {
newOffset = 0
}
if f.offset > f.file.Size {
f.offset = f.file.Size
if newOffset > f.size {
newOffset = f.size
}
// If we're seeking to a new position, mark the reader for reset.
if newOffset != f.offset {
f.offset = newOffset
f.seekPending = true
}
return f.offset, nil
}
func (f *File) Stat() (os.FileInfo, error) {
if f.isDir {
return &FileInfo{
name: f.name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: time.Now(),
isDir: true,
}, nil
}
return &FileInfo{
name: f.name,
size: f.size,
mode: 0644,
modTime: time.Now(),
isDir: false,
}, nil
}
func (f *File) Write(p []byte) (n int, err error) {
return 0, os.ErrPermission
}
func (f *File) Readdir(count int) ([]os.FileInfo, error) {
if !f.isDir {
return nil, os.ErrInvalid
@@ -130,31 +203,3 @@ func (f *File) Readdir(count int) ([]os.FileInfo, error) {
f.children = f.children[count:]
return files, nil
}
func (f *File) Stat() (os.FileInfo, error) {
if f.isDir {
name := "/"
if f.cachedTorrent != nil {
name = f.cachedTorrent.Name
}
return &FileInfo{
name: name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: time.Now(),
isDir: true,
}, nil
}
return &FileInfo{
name: f.file.Name,
size: f.file.Size,
mode: 0644,
modTime: time.Now(),
isDir: false,
}, nil
}
func (f *File) Write(p []byte) (n int, err error) {
return 0, os.ErrPermission
}

View File

@@ -1,100 +1,48 @@
package webdav
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent"
"golang.org/x/net/webdav"
"html/template"
"io"
"net"
"net/http"
"net/http/httptest"
"os"
"path"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
)
type Handler struct {
Name string
logger zerolog.Logger
cache *cache.Cache
rootListing atomic.Value
lastRefresh time.Time
refreshMutex sync.Mutex
RootPath string
Name string
logger zerolog.Logger
cache *Cache
lastRefresh time.Time
refreshMutex sync.Mutex
RootPath string
responseCache sync.Map
cacheTTL time.Duration
ctx context.Context
}
func NewHandler(name string, cache *cache.Cache, logger zerolog.Logger) *Handler {
func NewHandler(name string, cache *Cache, logger zerolog.Logger) *Handler {
h := &Handler{
Name: name,
cache: cache,
logger: logger,
RootPath: fmt.Sprintf("/%s", name),
ctx: context.Background(),
}
h.refreshRootListing()
// Start background refresh
go h.backgroundRefresh()
return h
}
func (h *Handler) backgroundRefresh() {
ticker := time.NewTicker(5 * time.Minute)
defer ticker.Stop()
for range ticker.C {
h.refreshRootListing()
}
}
func (h *Handler) refreshRootListing() {
h.refreshMutex.Lock()
defer h.refreshMutex.Unlock()
if time.Since(h.lastRefresh) < time.Minute {
return
}
torrents := h.cache.GetTorrentNames()
files := make([]os.FileInfo, 0, len(torrents))
for name, cachedTorrent := range torrents {
if cachedTorrent != nil && cachedTorrent.Torrent != nil {
files = append(files, &FileInfo{
name: name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: time.Now(),
isDir: true,
})
}
}
sort.Slice(files, func(i, j int) bool {
return files[i].Name() < files[j].Name()
})
h.rootListing.Store(files)
h.lastRefresh = time.Now()
}
func (h *Handler) getParentRootPath() string {
return fmt.Sprintf("/webdav/%s", h.Name)
}
func (h *Handler) getRootFileInfos() []os.FileInfo {
if listing := h.rootListing.Load(); listing != nil {
return listing.([]os.FileInfo)
}
return []os.FileInfo{}
}
// Mkdir implements webdav.FileSystem
func (h *Handler) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
return os.ErrPermission // Read-only filesystem
@@ -102,7 +50,27 @@ func (h *Handler) Mkdir(ctx context.Context, name string, perm os.FileMode) erro
// RemoveAll implements webdav.FileSystem
func (h *Handler) RemoveAll(ctx context.Context, name string) error {
return os.ErrPermission // Read-only filesystem
name = path.Clean("/" + name)
rootDir := h.getParentRootPath()
if name == rootDir {
return os.ErrPermission
}
torrentName, filename := getName(rootDir, name)
cachedTorrent := h.cache.GetTorrentByName(torrentName)
if cachedTorrent == nil {
return os.ErrNotExist
}
if filename == "" {
h.cache.GetClient().DeleteTorrent(cachedTorrent.Torrent)
go h.cache.refreshListings()
return nil
}
return os.ErrPermission
}
// Rename implements webdav.FileSystem
@@ -110,55 +78,124 @@ func (h *Handler) Rename(ctx context.Context, oldName, newName string) error {
return os.ErrPermission // Read-only filesystem
}
func (h *Handler) getParentRootPath() string {
return fmt.Sprintf("/webdav/%s", h.Name)
}
func (h *Handler) getTorrentsFolders() []os.FileInfo {
return h.cache.GetListing()
}
func (h *Handler) getParentFiles() []os.FileInfo {
now := time.Now()
rootFiles := []os.FileInfo{
&FileInfo{
name: "__all__",
size: 0,
mode: 0755 | os.ModeDir,
modTime: now,
isDir: true,
},
&FileInfo{
name: "torrents",
size: 0,
mode: 0755 | os.ModeDir,
modTime: now,
isDir: true,
},
&FileInfo{
name: "version.txt",
size: int64(len("v1.0.0")),
mode: 0644,
modTime: now,
isDir: false,
},
}
return rootFiles
}
func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
name = path.Clean("/" + name)
rootDir := h.getParentRootPath()
// Fast path for root directory
if name == h.getParentRootPath() {
// Fast path optimization with a map lookup instead of string comparisons
switch name {
case rootDir:
return &File{
cache: h.cache,
isDir: true,
children: h.getRootFileInfos(),
children: h.getParentFiles(),
name: "/",
}, nil
}
// Remove root directory from path
name = strings.TrimPrefix(name, h.getParentRootPath())
name = strings.TrimPrefix(name, "/")
parts := strings.SplitN(name, "/", 2)
// Get torrent from cache using sync.Map
cachedTorrent := h.cache.GetTorrentByName(parts[0])
if cachedTorrent == nil {
h.logger.Debug().Msgf("Torrent not found: %s", parts[0])
return nil, os.ErrNotExist
}
if len(parts) == 1 {
case path.Join(rootDir, "version.txt"):
return &File{
cache: h.cache,
cachedTorrent: cachedTorrent,
isDir: true,
children: h.getTorrentFileInfos(cachedTorrent.Torrent),
cache: h.cache,
isDir: false,
content: []byte("v1.0.0"),
name: "version.txt",
size: int64(len("v1.0.0")),
}, nil
}
// Use a map for faster file lookup
fileMap := make(map[string]*torrent.File, len(cachedTorrent.Torrent.Files))
for i := range cachedTorrent.Torrent.Files {
fileMap[cachedTorrent.Torrent.Files[i].Name] = &cachedTorrent.Torrent.Files[i]
}
// Single check for top-level folders
if name == path.Join(rootDir, "__all__") || name == path.Join(rootDir, "torrents") {
folderName := strings.TrimPrefix(name, rootDir)
folderName = strings.TrimPrefix(folderName, "/")
// Only fetch the torrent folders once
children := h.getTorrentsFolders()
if file, ok := fileMap[parts[1]]; ok {
return &File{
cache: h.cache,
cachedTorrent: cachedTorrent,
file: file,
isDir: false,
cache: h.cache,
isDir: true,
children: children,
name: folderName,
size: 0,
}, nil
}
h.logger.Debug().Msgf("File not found: %s", name)
_path := strings.TrimPrefix(name, rootDir)
parts := strings.Split(strings.TrimPrefix(_path, "/"), "/")
if len(parts) >= 2 && (parts[0] == "__all__" || parts[0] == "torrents") {
torrentName := parts[1]
cachedTorrent := h.cache.GetTorrentByName(torrentName)
if cachedTorrent == nil {
h.logger.Debug().Msgf("Torrent not found: %s", torrentName)
return nil, os.ErrNotExist
}
if len(parts) == 2 {
// Torrent folder level
return &File{
cache: h.cache,
torrentId: cachedTorrent.Id,
isDir: true,
children: h.getFileInfos(cachedTorrent.Torrent),
name: cachedTorrent.Name,
size: cachedTorrent.Size,
}, nil
}
// Torrent file level
filename := strings.Join(parts[2:], "/")
if file, ok := cachedTorrent.Files[filename]; ok {
fi := &File{
cache: h.cache,
torrentId: cachedTorrent.Id,
fileId: file.Id,
isDir: false,
name: file.Name,
size: file.Size,
link: file.Link,
downloadLink: file.DownloadLink,
}
return fi, nil
}
}
h.logger.Info().Msgf("File not found: %s", name)
return nil, os.ErrNotExist
}
@@ -171,14 +208,15 @@ func (h *Handler) Stat(ctx context.Context, name string) (os.FileInfo, error) {
return f.Stat()
}
func (h *Handler) getTorrentFileInfos(torrent *torrent.Torrent) []os.FileInfo {
func (h *Handler) getFileInfos(torrent *torrent.Torrent) []os.FileInfo {
files := make([]os.FileInfo, 0, len(torrent.Files))
now := time.Now()
for _, file := range torrent.Files {
files = append(files, &FileInfo{
name: file.Name,
size: file.Size,
mode: 0644,
modTime: time.Now(),
modTime: now,
isDir: false,
})
}
@@ -186,13 +224,124 @@ func (h *Handler) getTorrentFileInfos(torrent *torrent.Torrent) []os.FileInfo {
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Handle OPTIONS
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
// Create WebDAV handler
//Add specific PROPFIND optimization
if r.Method == "PROPFIND" {
propfindStart := time.Now()
// Check if this is the slow path we identified
if strings.Contains(r.URL.Path, "__all__") {
// Fast path for this specific directory
depth := r.Header.Get("Depth")
if depth == "1" || depth == "" {
// This is a listing request
// Use a cached response if available
cachedKey := "propfind_" + r.URL.Path
if cachedResponse, ok := h.responseCache.Load(cachedKey); ok {
responseData := cachedResponse.([]byte)
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(responseData)))
w.Write(responseData)
return
}
// Otherwise process normally but cache the result
responseRecorder := httptest.NewRecorder()
// Process the request with the standard handler
handler := &webdav.Handler{
FileSystem: h,
LockSystem: webdav.NewMemLS(),
Logger: func(r *http.Request, err error) {
if err != nil {
h.logger.Error().Err(err).Msg("WebDAV error")
}
},
}
handler.ServeHTTP(responseRecorder, r)
// Cache the response for future requests
responseData := responseRecorder.Body.Bytes()
h.responseCache.Store(cachedKey, responseData)
// Send to the real client
for k, v := range responseRecorder.Header() {
w.Header()[k] = v
}
w.WriteHeader(responseRecorder.Code)
w.Write(responseData)
return
}
}
h.logger.Debug().
Dur("propfind_prepare", time.Since(propfindStart)).
Msg("Proceeding with standard PROPFIND")
}
// Check if this is a GET request for a file
if r.Method == "GET" {
openStart := time.Now()
f, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
if err != nil {
h.logger.Debug().Err(err).Str("path", r.URL.Path).Msg("Failed to open file")
http.NotFound(w, r)
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
h.logger.Error().Err(err).Msg("Failed to stat file")
http.Error(w, "Server Error", http.StatusInternalServerError)
return
}
if fi.IsDir() {
dirStart := time.Now()
h.serveDirectory(w, r, f)
h.logger.Info().
Dur("directory_time", time.Since(dirStart)).
Msg("Directory served")
return
}
// For file requests, use http.ServeContent.
// Ensure f implements io.ReadSeeker.
rs, ok := f.(io.ReadSeeker)
if !ok {
// If not, read the entire file into memory as a fallback.
buf, err := io.ReadAll(f)
if err != nil {
h.logger.Error().Err(err).Msg("Failed to read file content")
http.Error(w, "Server Error", http.StatusInternalServerError)
return
}
rs = bytes.NewReader(buf)
}
// Set Content-Type based on file name.
fileName := fi.Name()
contentType := getContentType(fileName)
w.Header().Set("Content-Type", contentType)
// Serve the file with the correct modification time.
// http.ServeContent automatically handles Range requests.
http.ServeContent(w, r, fileName, fi.ModTime(), rs)
h.logger.Info().
Dur("open_attempt_time", time.Since(openStart)).
Msg("Served file using ServeContent")
return
}
// Default to standard WebDAV handler for other requests
handler := &webdav.Handler{
FileSystem: h,
LockSystem: webdav.NewMemLS(),
@@ -207,19 +356,34 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
},
}
// Special handling for GET requests on directories
if r.Method == "GET" {
if f, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0); err == nil {
if fi, err := f.Stat(); err == nil && fi.IsDir() {
h.serveDirectory(w, r, f)
return
}
f.Close()
}
}
handler.ServeHTTP(w, r)
}
func getContentType(fileName string) string {
contentType := "application/octet-stream"
// Determine content type based on file extension
switch {
case strings.HasSuffix(fileName, ".mp4"):
contentType = "video/mp4"
case strings.HasSuffix(fileName, ".mkv"):
contentType = "video/x-matroska"
case strings.HasSuffix(fileName, ".avi"):
contentType = "video/x-msvideo"
case strings.HasSuffix(fileName, ".mov"):
contentType = "video/quicktime"
case strings.HasSuffix(fileName, ".m4v"):
contentType = "video/x-m4v"
case strings.HasSuffix(fileName, ".ts"):
contentType = "video/mp2t"
case strings.HasSuffix(fileName, ".srt"):
contentType = "application/x-subrip"
case strings.HasSuffix(fileName, ".vtt"):
contentType = "text/vtt"
}
return contentType
}
func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file webdav.File) {
var children []os.FileInfo
if f, ok := file.(*File); ok {
@@ -266,3 +430,51 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we
return
}
}
func (h *Handler) ioCopy(reader io.Reader, w io.Writer) (int64, error) {
// Start with a smaller initial buffer for faster first byte time
buffer := make([]byte, 8*1024) // 8KB initial buffer
written := int64(0)
// First chunk needs to be delivered ASAP
firstChunk := true
for {
n, err := reader.Read(buffer)
if n > 0 {
nw, ew := w.Write(buffer[:n])
if ew != nil {
var opErr *net.OpError
if errors.As(ew, &opErr) && opErr.Err.Error() == "write: broken pipe" {
h.logger.Debug().Msg("Client closed connection (normal for streaming)")
}
break
}
written += int64(nw)
// Flush immediately after first chunk, then less frequently
if firstChunk {
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
firstChunk = false
// Increase buffer size after first chunk
buffer = make([]byte, 64*1024) // 512KB for subsequent reads
} else if written%(2*1024*1024) < int64(n) { // Flush every 2MB
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
}
}
if err != nil {
if err != io.EOF {
h.logger.Error().Err(err).Msg("Error reading from file")
}
break
}
}
return written, nil
}

14
pkg/webdav/misc.go Normal file
View File

@@ -0,0 +1,14 @@
package webdav
import "strings"
// getName: Returns the torrent name and filename from the path
// /webdav/alldebrid/__all__/TorrentName
func getName(rootDir, path string) (string, string) {
path = strings.TrimPrefix(path, rootDir)
parts := strings.Split(strings.TrimPrefix(path, "/"), "/")
if len(parts) < 2 {
return "", ""
}
return parts[0], strings.Join(parts[1:], "/")
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/sirrobot01/debrid-blackhole/pkg/service"
"html/template"
"net/http"
"os"
"sync"
)
@@ -23,8 +22,10 @@ func New() *WebDav {
w := &WebDav{
Handlers: make([]*Handler, 0),
}
for name, c := range svc.DebridCache.GetCaches() {
h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name), cfg.LogLevel, os.Stdout))
debrids := svc.Debrid.GetDebrids()
cacheManager := NewCacheManager(debrids)
for name, c := range cacheManager.GetCaches() {
h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name), cfg.LogLevel))
w.Handlers = append(w.Handlers, h)
}
return w
@@ -33,7 +34,7 @@ func New() *WebDav {
func (wd *WebDav) Routes() http.Handler {
chi.RegisterMethod("PROPFIND")
chi.RegisterMethod("PROPPATCH")
chi.RegisterMethod("MKCOL") // Note: it was "MKOL" in your example, should be "MKCOL"
chi.RegisterMethod("MKCOL")
chi.RegisterMethod("COPY")
chi.RegisterMethod("MOVE")
chi.RegisterMethod("LOCK")
@@ -97,6 +98,7 @@ func (wd *WebDav) setupRootHandler(r chi.Router) {
func (wd *WebDav) commonMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("DAV", "1, 2")
w.Header().Set("Cache-Control", "max-age=3600")
w.Header().Set("Allow", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK")