Update repair; fix minor bugs with namings
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid"
|
||||
"io"
|
||||
@@ -11,11 +12,11 @@ import (
|
||||
|
||||
var sharedClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
// These settings help maintain persistent connections.
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
DisableCompression: false,
|
||||
DisableKeepAlives: false,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: 0,
|
||||
}
|
||||
@@ -39,6 +40,24 @@ type File struct {
|
||||
link string
|
||||
}
|
||||
|
||||
type bufferedReadCloser struct {
|
||||
*bufio.Reader
|
||||
closer io.Closer
|
||||
}
|
||||
|
||||
// Create a new bufferedReadCloser with a larger buffer
|
||||
func newBufferedReadCloser(rc io.ReadCloser) *bufferedReadCloser {
|
||||
return &bufferedReadCloser{
|
||||
Reader: bufio.NewReaderSize(rc, 64*1024), // Increase to 1MB buffer
|
||||
closer: rc,
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements ReadCloser interface
|
||||
func (brc *bufferedReadCloser) Close() error {
|
||||
return brc.closer.Close()
|
||||
}
|
||||
|
||||
// File interface implementations for File
|
||||
|
||||
func (f *File) Close() error {
|
||||
@@ -82,40 +101,48 @@ func (f *File) Read(p []byte) (n int, err error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// If we haven't started streaming or a seek was requested,
|
||||
// close the existing stream and start a new HTTP GET request.
|
||||
// If we haven't started streaming the file yet or need to reposition
|
||||
if f.reader == nil || f.seekPending {
|
||||
// Close existing reader if we're repositioning
|
||||
if f.reader != nil && f.seekPending {
|
||||
f.reader.Close()
|
||||
f.reader = nil
|
||||
}
|
||||
|
||||
// Create a new HTTP GET request for the file's URL.
|
||||
req, err := http.NewRequest("GET", f.GetDownloadLink(), nil)
|
||||
downloadLink := f.GetDownloadLink()
|
||||
if downloadLink == "" {
|
||||
return 0, fmt.Errorf("failed to get download link for file")
|
||||
}
|
||||
|
||||
// Create an HTTP GET request to the file's URL.
|
||||
req, err := http.NewRequest("GET", downloadLink, nil)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
// If we've already read some data, request only the remaining bytes.
|
||||
// Request only the bytes starting from our current offset
|
||||
if f.offset > 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset))
|
||||
}
|
||||
|
||||
// Execute the HTTP request.
|
||||
// Add important headers for streaming
|
||||
req.Header.Set("Connection", "keep-alive")
|
||||
req.Header.Set("Accept", "*/*")
|
||||
req.Header.Set("User-Agent", "Infuse/7.0.2 (iOS)")
|
||||
req.Header.Set("Accept-Encoding", "gzip, deflate, br")
|
||||
|
||||
resp, err := sharedClient.Do(req)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("HTTP request error: %w", err)
|
||||
}
|
||||
|
||||
// Accept a 200 (OK) or 206 (Partial Content) status.
|
||||
// Check response codes more carefully
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||
resp.Body.Close()
|
||||
return 0, fmt.Errorf("unexpected HTTP status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Store the response body as our reader.
|
||||
f.reader = resp.Body
|
||||
// Reset the seek pending flag now that we've reinitialized the reader.
|
||||
f.reader = newBufferedReadCloser(resp.Body)
|
||||
f.seekPending = false
|
||||
}
|
||||
|
||||
@@ -123,10 +150,12 @@ func (f *File) Read(p []byte) (n int, err error) {
|
||||
n, err = f.reader.Read(p)
|
||||
f.offset += int64(n)
|
||||
|
||||
// When we reach the end of the stream, close the reader.
|
||||
if err == io.EOF {
|
||||
f.reader.Close()
|
||||
f.reader = nil
|
||||
} else if err != nil {
|
||||
f.reader.Close()
|
||||
f.reader = nil
|
||||
}
|
||||
|
||||
return n, err
|
||||
@@ -137,12 +166,12 @@ func (f *File) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, os.ErrInvalid
|
||||
}
|
||||
|
||||
var newOffset int64
|
||||
newOffset := f.offset
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
newOffset = offset
|
||||
case io.SeekCurrent:
|
||||
newOffset = f.offset + offset
|
||||
newOffset += offset
|
||||
case io.SeekEnd:
|
||||
newOffset = f.size + offset
|
||||
default:
|
||||
@@ -156,7 +185,7 @@ func (f *File) Seek(offset int64, whence int) (int64, error) {
|
||||
newOffset = f.size
|
||||
}
|
||||
|
||||
// If we're seeking to a new position, mark the reader for reset.
|
||||
// Only mark seek as pending if position actually changed
|
||||
if newOffset != f.offset {
|
||||
f.offset = newOffset
|
||||
f.seekPending = true
|
||||
@@ -184,6 +213,24 @@ func (f *File) Stat() (os.FileInfo, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *File) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
// Save current position
|
||||
|
||||
// Seek to requested position
|
||||
_, err = f.Seek(off, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Read the data
|
||||
n, err = f.Read(p)
|
||||
|
||||
// Don't restore position for Infuse compatibility
|
||||
// Infuse expects sequential reads after the initial seek
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *File) Write(p []byte) (n int, err error) {
|
||||
return 0, os.ErrPermission
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package webdav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -68,7 +67,7 @@ func (h *Handler) RemoveAll(ctx context.Context, name string) error {
|
||||
}
|
||||
|
||||
if filename == "" {
|
||||
h.cache.GetClient().DeleteTorrent(cachedTorrent.Torrent)
|
||||
h.cache.GetClient().DeleteTorrent(cachedTorrent.Torrent.Id)
|
||||
h.cache.OnRemove(cachedTorrent.Id)
|
||||
return nil
|
||||
}
|
||||
@@ -259,7 +258,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// - Otherwise, for deeper (torrent folder) paths, use a longer TTL.
|
||||
ttl := 30 * time.Minute
|
||||
if h.isParentPath(r.URL.Path) {
|
||||
ttl = 20 * time.Second
|
||||
ttl = 30 * time.Second
|
||||
}
|
||||
|
||||
if served := h.serveFromCacheIfValid(w, r, cacheKey, ttl); served {
|
||||
@@ -281,22 +280,12 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
responseData := responseRecorder.Body.Bytes()
|
||||
|
||||
// Create compressed version
|
||||
var gzippedData []byte
|
||||
if len(responseData) > 0 {
|
||||
var buf bytes.Buffer
|
||||
gzw := gzip.NewWriter(&buf)
|
||||
if _, err := gzw.Write(responseData); err == nil {
|
||||
if err := gzw.Close(); err == nil {
|
||||
gzippedData = buf.Bytes()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h.cache.PropfindResp.Store(cacheKey, debrid.PropfindResponse{
|
||||
Data: responseData,
|
||||
GzippedData: gzippedData,
|
||||
Ts: time.Now(),
|
||||
})
|
||||
//h.cache.PropfindResp.Store(cacheKey, debrid.PropfindResponse{
|
||||
// Data: responseData,
|
||||
// GzippedData: request.Gzip(responseData),
|
||||
// Ts: time.Now(),
|
||||
//})
|
||||
|
||||
// Forward the captured response to the client.
|
||||
for k, v := range responseRecorder.Header() {
|
||||
@@ -417,7 +406,6 @@ func (h *Handler) serveFromCacheIfValid(w http.ResponseWriter, r *http.Request,
|
||||
|
||||
if time.Since(respCache.Ts) >= ttl {
|
||||
// Remove expired cache entry
|
||||
h.cache.PropfindResp.Delete(cacheKey)
|
||||
return false
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
|
||||
|
||||
Reference in New Issue
Block a user