Fix issues with repair, move to a different streaming option
This commit is contained in:
@@ -51,7 +51,7 @@ Create a `config.json` file in `/opt/decypharr/` with your Decypharr configurati
|
|||||||
"folder": "/mnt/remote/realdebrid/__all__/",
|
"folder": "/mnt/remote/realdebrid/__all__/",
|
||||||
"rate_limit": "250/minute",
|
"rate_limit": "250/minute",
|
||||||
"use_webdav": true,
|
"use_webdav": true,
|
||||||
"rc_url": "http://your-ip-address:5572" // Rclone RC URL
|
"rc_url": "http://your-ip-address:5572"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"qbittorrent": {
|
"qbittorrent": {
|
||||||
@@ -99,7 +99,7 @@ services:
|
|||||||
decypharr:
|
decypharr:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
restart: true
|
restart: true
|
||||||
command: "mount decypharr: /data --allow-non-empty --allow-other --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth "
|
command: "mount decypharr: /data --allow-non-empty --allow-other --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth"
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Docker Notes
|
#### Docker Notes
|
||||||
|
|||||||
@@ -111,6 +111,8 @@ func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
|
|||||||
}(f)
|
}(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
// Try to reinsert the torrent if it's broken
|
// Try to reinsert the torrent if it's broken
|
||||||
if len(brokenFiles) > 0 && t.Torrent != nil {
|
if len(brokenFiles) > 0 && t.Torrent != nil {
|
||||||
// Check if the torrent is already in progress
|
// Check if the torrent is already in progress
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
for _, url := range urlList {
|
for _, url := range urlList {
|
||||||
if err := q.addMagnet(ctx, url, _arr, debridName, action); err != nil {
|
if err := q.addMagnet(ctx, url, _arr, debridName, action); err != nil {
|
||||||
q.logger.Debug().Err(err).Msgf("Error adding magnet")
|
q.logger.Debug().Msgf("Error adding magnet: %s", err.Error())
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,35 +27,52 @@ var sharedClient = &http.Client{
|
|||||||
Timeout: 0,
|
Timeout: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type streamError struct {
|
||||||
|
Err error
|
||||||
|
StatusCode int
|
||||||
|
IsClientDisconnection bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *streamError) Error() string {
|
||||||
|
return e.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *streamError) Unwrap() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
cache *store.Cache
|
|
||||||
fileId string
|
|
||||||
torrentName string
|
|
||||||
|
|
||||||
modTime time.Time
|
|
||||||
|
|
||||||
size int64
|
|
||||||
offset int64
|
|
||||||
isDir bool
|
|
||||||
children []os.FileInfo
|
|
||||||
reader io.ReadCloser
|
|
||||||
seekPending bool
|
|
||||||
content []byte
|
|
||||||
isRar bool
|
|
||||||
name string
|
name string
|
||||||
metadataOnly bool
|
torrentName string
|
||||||
|
|
||||||
downloadLink string
|
|
||||||
link string
|
link string
|
||||||
|
downloadLink string
|
||||||
|
size int64
|
||||||
|
isDir bool
|
||||||
|
fileId string
|
||||||
|
isRar bool
|
||||||
|
metadataOnly bool
|
||||||
|
content []byte
|
||||||
|
children []os.FileInfo // For directories
|
||||||
|
cache *store.Cache
|
||||||
|
modTime time.Time
|
||||||
|
|
||||||
|
// Minimal state for interface compliance only
|
||||||
|
readOffset int64 // Only used for Read() method compliance
|
||||||
}
|
}
|
||||||
|
|
||||||
// File interface implementations for File
|
// File interface implementations for File
|
||||||
|
|
||||||
func (f *File) Close() error {
|
func (f *File) Close() error {
|
||||||
if f.reader != nil {
|
if f.isDir {
|
||||||
f.reader.Close()
|
return nil // No resources to close for directories
|
||||||
f.reader = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For files, we don't have any resources to close either
|
||||||
|
// This is just to satisfy the os.File interface
|
||||||
|
f.content = nil
|
||||||
|
f.children = nil
|
||||||
|
f.downloadLink = ""
|
||||||
|
f.readOffset = 0
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,211 +101,274 @@ func (f *File) getDownloadByteRange() (*[2]int64, error) {
|
|||||||
return byteRange, nil
|
return byteRange, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *File) stream() (*http.Response, error) {
|
func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) error {
|
||||||
client := sharedClient
|
content := f.content
|
||||||
|
size := int64(len(content))
|
||||||
|
|
||||||
|
// Handle range requests for preloaded content
|
||||||
|
if rangeHeader := r.Header.Get("Range"); rangeHeader != "" {
|
||||||
|
ranges, err := parseRange(rangeHeader, size)
|
||||||
|
if err != nil || len(ranges) != 1 {
|
||||||
|
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
|
||||||
|
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
|
||||||
|
}
|
||||||
|
|
||||||
|
start, end := ranges[0].start, ranges[0].end
|
||||||
|
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, size))
|
||||||
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1))
|
||||||
|
w.Header().Set("Accept-Ranges", "bytes")
|
||||||
|
w.WriteHeader(http.StatusPartialContent)
|
||||||
|
|
||||||
|
_, err = w.Write(content[start : end+1])
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Full content
|
||||||
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", size))
|
||||||
|
w.Header().Set("Accept-Ranges", "bytes")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
_, err := w.Write(content)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) StreamResponse(w http.ResponseWriter, r *http.Request) error {
|
||||||
|
// Handle preloaded content files
|
||||||
|
if f.content != nil {
|
||||||
|
return f.servePreloadedContent(w, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try streaming with retry logic
|
||||||
|
return f.streamWithRetry(w, r, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCount int) error {
|
||||||
|
const maxRetries = 0
|
||||||
_log := f.cache.Logger()
|
_log := f.cache.Logger()
|
||||||
|
|
||||||
|
// Get download link (with caching optimization)
|
||||||
downloadLink, err := f.getDownloadLink()
|
downloadLink, err := f.getDownloadLink()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_log.Trace().Msgf("Failed to get download link for %s: %v", f.name, err)
|
return &streamError{Err: err, StatusCode: http.StatusPreconditionFailed}
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if downloadLink == "" {
|
if downloadLink == "" {
|
||||||
_log.Trace().Msgf("Failed to get download link for %s. Empty download link", f.name)
|
return &streamError{Err: fmt.Errorf("empty download link"), StatusCode: http.StatusNotFound}
|
||||||
return nil, fmt.Errorf("empty download link")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
byteRange, err := f.getDownloadByteRange()
|
// Create upstream request with streaming optimizations
|
||||||
|
upstreamReq, err := http.NewRequest("GET", downloadLink, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_log.Trace().Msgf("Failed to get download byte range for %s: %v", f.name, err)
|
return &streamError{Err: err, StatusCode: http.StatusInternalServerError}
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", downloadLink, nil)
|
setVideoStreamingHeaders(upstreamReq)
|
||||||
|
|
||||||
|
// Handle range requests (critical for video seeking)
|
||||||
|
isRangeRequest := f.handleRangeRequest(upstreamReq, r, w)
|
||||||
|
if isRangeRequest == -1 {
|
||||||
|
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := sharedClient.Do(upstreamReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_log.Trace().Msgf("Failed to create HTTP request: %v", err)
|
return &streamError{Err: err, StatusCode: http.StatusServiceUnavailable}
|
||||||
return nil, err
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Handle upstream errors with retry logic
|
||||||
|
shouldRetry, retryErr := f.handleUpstream(resp, retryCount, maxRetries)
|
||||||
|
if shouldRetry && retryCount < maxRetries {
|
||||||
|
// Retry with new download link
|
||||||
|
_log.Debug().
|
||||||
|
Int("retry_count", retryCount+1).
|
||||||
|
Str("file", f.name).
|
||||||
|
Msg("Retrying stream request")
|
||||||
|
return f.streamWithRetry(w, r, retryCount+1)
|
||||||
|
}
|
||||||
|
if retryErr != nil {
|
||||||
|
return retryErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if byteRange == nil {
|
setVideoResponseHeaders(w, resp, isRangeRequest == 1)
|
||||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", max(0, f.offset)))
|
|
||||||
} else {
|
|
||||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", byteRange[0]+max(0, f.offset)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the request
|
// Stream with optimized buffering for video
|
||||||
resp, err := client.Do(req)
|
return f.streamVideoOptimized(w, resp.Body)
|
||||||
if err != nil {
|
|
||||||
_log.Trace().Msgf("HTTP request failed: %v", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
|
||||||
f.downloadLink = ""
|
|
||||||
|
|
||||||
cleanupResp := func(resp *http.Response) {
|
|
||||||
if resp.Body != nil {
|
|
||||||
_, _ = io.Copy(io.Discard, resp.Body)
|
|
||||||
resp.Body.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch resp.StatusCode {
|
|
||||||
case http.StatusServiceUnavailable:
|
|
||||||
// Read the body to check for specific error messages
|
|
||||||
body, readErr := io.ReadAll(resp.Body)
|
|
||||||
cleanupResp(resp)
|
|
||||||
|
|
||||||
if readErr != nil {
|
|
||||||
_log.Trace().Msgf("Failed to read response body: %v", readErr)
|
|
||||||
return nil, fmt.Errorf("failed to read error response: %w", readErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
bodyStr := string(body)
|
|
||||||
if strings.Contains(bodyStr, "You can not download this file because you have exceeded your traffic on this hoster") {
|
|
||||||
_log.Trace().Msgf("Bandwidth exceeded for %s. Download token will be disabled if you have more than one", f.name)
|
|
||||||
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "bandwidth_exceeded")
|
|
||||||
// Retry with a different API key if it's available
|
|
||||||
return f.stream()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("service unavailable: %s", bodyStr)
|
|
||||||
|
|
||||||
case http.StatusNotFound:
|
|
||||||
cleanupResp(resp)
|
|
||||||
// Mark download link as not found
|
|
||||||
// Regenerate a new download link
|
|
||||||
_log.Trace().Msgf("Link not found (404) for %s. Marking link as invalid and regenerating", f.name)
|
|
||||||
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found")
|
|
||||||
// Generate a new download link
|
|
||||||
downloadLink, err := f.getDownloadLink()
|
|
||||||
if err != nil {
|
|
||||||
_log.Trace().Msgf("Failed to get download link for %s. %s", f.name, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if downloadLink == "" {
|
|
||||||
_log.Trace().Msgf("Failed to get download link for %s", f.name)
|
|
||||||
return nil, fmt.Errorf("failed to regenerate download link")
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", downloadLink, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the range header again
|
|
||||||
if byteRange == nil {
|
|
||||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", max(0, f.offset)))
|
|
||||||
} else {
|
|
||||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", byteRange[0]+max(0, f.offset)))
|
|
||||||
}
|
|
||||||
|
|
||||||
newResp, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if newResp.StatusCode != http.StatusOK && newResp.StatusCode != http.StatusPartialContent {
|
|
||||||
cleanupResp(newResp)
|
|
||||||
_log.Trace().Msgf("Regenerated link also failed with status %d", newResp.StatusCode)
|
|
||||||
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, newResp.Status)
|
|
||||||
return nil, fmt.Errorf("failed with status code %d even after link regeneration", newResp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
return newResp, nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
body, _ := io.ReadAll(resp.Body)
|
|
||||||
cleanupResp(resp)
|
|
||||||
|
|
||||||
_log.Trace().Msgf("Unexpected status code %d for %s: %s", resp.StatusCode, f.name, string(body))
|
|
||||||
return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *File) Read(p []byte) (n int, err error) {
|
func (f *File) handleUpstream(resp *http.Response, retryCount, maxRetries int) (shouldRetry bool, err error) {
|
||||||
if f.isDir {
|
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent {
|
||||||
return 0, os.ErrInvalid
|
return false, nil
|
||||||
}
|
|
||||||
if f.metadataOnly {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
if f.content != nil {
|
|
||||||
if f.offset >= int64(len(f.content)) {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
n = copy(p, f.content[f.offset:])
|
|
||||||
f.offset += int64(n)
|
|
||||||
return n, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we haven't started streaming the file yet or need to reposition
|
_log := f.cache.Logger()
|
||||||
if f.reader == nil || f.seekPending {
|
|
||||||
if f.reader != nil {
|
// Clean up response body properly
|
||||||
f.reader.Close()
|
cleanupResp := func(resp *http.Response) {
|
||||||
f.reader = nil
|
if resp.Body != nil {
|
||||||
|
_, _ = io.Copy(io.Discard, resp.Body)
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case http.StatusServiceUnavailable:
|
||||||
|
// Read the body to check for specific error messages
|
||||||
|
body, readErr := io.ReadAll(resp.Body)
|
||||||
|
cleanupResp(resp)
|
||||||
|
|
||||||
|
if readErr != nil {
|
||||||
|
_log.Error().Err(readErr).Msg("Failed to read response body")
|
||||||
|
return false, &streamError{
|
||||||
|
Err: fmt.Errorf("failed to read error response: %w", readErr),
|
||||||
|
StatusCode: http.StatusServiceUnavailable,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the request to get the file
|
bodyStr := string(body)
|
||||||
resp, err := f.stream()
|
if strings.Contains(bodyStr, "you have exceeded your traffic") {
|
||||||
if err != nil {
|
_log.Debug().
|
||||||
return 0, err
|
Str("file", f.name).
|
||||||
}
|
Int("retry_count", retryCount).
|
||||||
if resp == nil {
|
Msg("Bandwidth exceeded. Marking link as invalid")
|
||||||
return 0, fmt.Errorf("stream returned nil response")
|
|
||||||
|
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "bandwidth_exceeded")
|
||||||
|
|
||||||
|
// Retry with a different API key if available and we haven't exceeded retries
|
||||||
|
if retryCount < maxRetries {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, &streamError{
|
||||||
|
Err: fmt.Errorf("bandwidth exceeded after %d retries", retryCount),
|
||||||
|
StatusCode: http.StatusServiceUnavailable,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
f.reader = resp.Body
|
return false, &streamError{
|
||||||
f.seekPending = false
|
Err: fmt.Errorf("service unavailable: %s", bodyStr),
|
||||||
}
|
StatusCode: http.StatusServiceUnavailable,
|
||||||
|
}
|
||||||
|
|
||||||
n, err = f.reader.Read(p)
|
case http.StatusNotFound:
|
||||||
f.offset += int64(n)
|
cleanupResp(resp)
|
||||||
|
|
||||||
if err != nil {
|
_log.Debug().
|
||||||
f.reader.Close()
|
Str("file", f.name).
|
||||||
f.reader = nil
|
Int("retry_count", retryCount).
|
||||||
}
|
Msg("Link not found (404). Marking link as invalid and regenerating")
|
||||||
|
|
||||||
return n, err
|
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "link_not_found")
|
||||||
}
|
|
||||||
|
|
||||||
func (f *File) Seek(offset int64, whence int) (int64, error) {
|
// Try to regenerate download link if we haven't exceeded retries
|
||||||
if f.isDir {
|
if retryCount < maxRetries {
|
||||||
return 0, os.ErrInvalid
|
// Clear cached link to force regeneration
|
||||||
}
|
f.downloadLink = ""
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, &streamError{
|
||||||
|
Err: fmt.Errorf("file not found after %d retries", retryCount),
|
||||||
|
StatusCode: http.StatusNotFound,
|
||||||
|
}
|
||||||
|
|
||||||
newOffset := f.offset
|
|
||||||
switch whence {
|
|
||||||
case io.SeekStart:
|
|
||||||
newOffset = offset
|
|
||||||
case io.SeekCurrent:
|
|
||||||
newOffset += offset
|
|
||||||
case io.SeekEnd:
|
|
||||||
newOffset = f.size + offset
|
|
||||||
default:
|
default:
|
||||||
return 0, os.ErrInvalid
|
body, _ := io.ReadAll(resp.Body)
|
||||||
}
|
cleanupResp(resp)
|
||||||
|
|
||||||
if newOffset < 0 {
|
_log.Error().
|
||||||
newOffset = 0
|
Int("status_code", resp.StatusCode).
|
||||||
}
|
Str("file", f.name).
|
||||||
if newOffset > f.size {
|
Str("response_body", string(body)).
|
||||||
newOffset = f.size
|
Msg("Unexpected upstream error")
|
||||||
}
|
|
||||||
|
|
||||||
// Only mark seek as pending if position actually changed
|
return false, &streamError{
|
||||||
if newOffset != f.offset {
|
Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)),
|
||||||
f.offset = newOffset
|
StatusCode: http.StatusBadGateway,
|
||||||
f.seekPending = true
|
}
|
||||||
}
|
}
|
||||||
return f.offset, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *File) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w http.ResponseWriter) int {
|
||||||
|
rangeHeader := r.Header.Get("Range")
|
||||||
|
if rangeHeader == "" {
|
||||||
|
// For video files, apply byte range if exists
|
||||||
|
if byteRange, _ := f.getDownloadByteRange(); byteRange != nil {
|
||||||
|
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", byteRange[0], byteRange[1]))
|
||||||
|
}
|
||||||
|
return 0 // No range request
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse range request
|
||||||
|
ranges, err := parseRange(rangeHeader, f.size)
|
||||||
|
if err != nil || len(ranges) != 1 {
|
||||||
|
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", f.size))
|
||||||
|
return -1 // Invalid range
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply byte range offset if exists
|
||||||
|
byteRange, _ := f.getDownloadByteRange()
|
||||||
|
start, end := ranges[0].start, ranges[0].end
|
||||||
|
|
||||||
|
if byteRange != nil {
|
||||||
|
start += byteRange[0]
|
||||||
|
end += byteRange[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
|
||||||
|
return 1 // Valid range request
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) streamVideoOptimized(w http.ResponseWriter, src io.Reader) error {
|
||||||
|
// Use larger buffer for video streaming (better throughput)
|
||||||
|
buf := make([]byte, 64*1024) // 64KB buffer
|
||||||
|
|
||||||
|
// First chunk optimization - send immediately for faster start
|
||||||
|
n, err := src.Read(buf)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
if isClientDisconnection(err) {
|
||||||
|
return &streamError{Err: err, StatusCode: 0, IsClientDisconnection: true}
|
||||||
|
}
|
||||||
|
return &streamError{Err: err, StatusCode: 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
if n > 0 {
|
||||||
|
// Write first chunk immediately
|
||||||
|
_, writeErr := w.Write(buf[:n])
|
||||||
|
if writeErr != nil {
|
||||||
|
if isClientDisconnection(writeErr) {
|
||||||
|
return &streamError{Err: writeErr, StatusCode: 0, IsClientDisconnection: true}
|
||||||
|
}
|
||||||
|
return &streamError{Err: writeErr, StatusCode: 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush immediately for faster video start
|
||||||
|
if flusher, ok := w.(http.Flusher); ok {
|
||||||
|
flusher.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Continue with optimized copy for remaining data
|
||||||
|
_, err = io.CopyBuffer(w, src, buf)
|
||||||
|
if err != nil {
|
||||||
|
if isClientDisconnection(err) {
|
||||||
|
return &streamError{Err: err, StatusCode: 0, IsClientDisconnection: true}
|
||||||
|
}
|
||||||
|
return &streamError{Err: err, StatusCode: 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
These are the methods that implement the os.File interface for the File type.
|
||||||
|
Only Stat and ReadDir are used
|
||||||
|
*/
|
||||||
|
|
||||||
func (f *File) Stat() (os.FileInfo, error) {
|
func (f *File) Stat() (os.FileInfo, error) {
|
||||||
if f.isDir {
|
if f.isDir {
|
||||||
return &FileInfo{
|
return &FileInfo{
|
||||||
@@ -309,18 +389,61 @@ func (f *File) Stat() (os.FileInfo, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *File) ReadAt(p []byte, off int64) (n int, err error) {
|
func (f *File) Read(p []byte) (n int, err error) {
|
||||||
// Save current position
|
if f.isDir {
|
||||||
|
return 0, os.ErrInvalid
|
||||||
// Seek to requested position
|
|
||||||
_, err = f.Seek(off, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the data
|
if f.metadataOnly {
|
||||||
n, err = f.Read(p)
|
return 0, io.EOF
|
||||||
return n, err
|
}
|
||||||
|
|
||||||
|
// For preloaded content files (like version.txt)
|
||||||
|
if f.content != nil {
|
||||||
|
if f.readOffset >= int64(len(f.content)) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n = copy(p, f.content[f.readOffset:])
|
||||||
|
f.readOffset += int64(n)
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// For streaming files, return an error to force use of StreamResponse
|
||||||
|
return 0, fmt.Errorf("use StreamResponse method for streaming files")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
if f.isDir {
|
||||||
|
return 0, os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only handle seeking for preloaded content
|
||||||
|
if f.content != nil {
|
||||||
|
newOffset := f.readOffset
|
||||||
|
switch whence {
|
||||||
|
case io.SeekStart:
|
||||||
|
newOffset = offset
|
||||||
|
case io.SeekCurrent:
|
||||||
|
newOffset += offset
|
||||||
|
case io.SeekEnd:
|
||||||
|
newOffset = int64(len(f.content)) + offset
|
||||||
|
default:
|
||||||
|
return 0, os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
if newOffset < 0 {
|
||||||
|
newOffset = 0
|
||||||
|
}
|
||||||
|
if newOffset > int64(len(f.content)) {
|
||||||
|
newOffset = int64(len(f.content))
|
||||||
|
}
|
||||||
|
|
||||||
|
f.readOffset = newOffset
|
||||||
|
return f.readOffset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// For streaming files, return error to force use of StreamResponse
|
||||||
|
return 0, fmt.Errorf("use StreamResponse method for streaming files")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *File) Write(p []byte) (n int, err error) {
|
func (f *File) Write(p []byte) (n int, err error) {
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package webdav
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
"golang.org/x/net/webdav"
|
"golang.org/x/net/webdav"
|
||||||
@@ -415,104 +416,90 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we
|
|||||||
func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
|
func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
|
||||||
fRaw, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
|
fRaw, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error().Err(err).
|
|
||||||
Str("path", r.URL.Path).
|
|
||||||
Msg("Failed to open file")
|
|
||||||
http.NotFound(w, r)
|
http.NotFound(w, r)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer func(fRaw webdav.File) {
|
defer fRaw.Close()
|
||||||
err := fRaw.Close()
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error().Err(err).Msg("Failed to close file")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}(fRaw)
|
|
||||||
|
|
||||||
fi, err := fRaw.Stat()
|
fi, err := fRaw.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error().Err(err).Msg("Failed to stat file")
|
|
||||||
http.Error(w, "Server Error", http.StatusInternalServerError)
|
http.Error(w, "Server Error", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the target is a directory, use your directory listing logic.
|
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
h.serveDirectory(w, r, fRaw)
|
h.serveDirectory(w, r, fRaw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks if the file is a torrent file
|
// Set common headers
|
||||||
// .content is nil if the file is a torrent file
|
|
||||||
// .content means file is preloaded, e.g version.txt
|
|
||||||
if file, ok := fRaw.(*File); ok && file.content == nil {
|
|
||||||
link, err := file.getDownloadLink()
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Debug().
|
|
||||||
Err(err).
|
|
||||||
Str("link", file.link).
|
|
||||||
Str("path", r.URL.Path).
|
|
||||||
Msg("Could not fetch download link")
|
|
||||||
http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if link == "" {
|
|
||||||
http.NotFound(w, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
file.downloadLink = link
|
|
||||||
// If the torrent file is not a RAR file and users enabled proxy streaming
|
|
||||||
if !file.isRar && h.cache.StreamWithRclone() {
|
|
||||||
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
|
||||||
w.Header().Set("Pragma", "no-cache")
|
|
||||||
w.Header().Set("Expires", "0")
|
|
||||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name()))
|
|
||||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size()))
|
|
||||||
w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat))
|
|
||||||
w.Header().Set("Accept-Ranges", "bytes")
|
|
||||||
w.Header().Set("X-Accel-Redirect", file.downloadLink)
|
|
||||||
w.Header().Set("X-Accel-Buffering", "no")
|
|
||||||
http.Redirect(w, r, file.downloadLink, http.StatusFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ETags
|
|
||||||
etag := fmt.Sprintf("\"%x-%x\"", fi.ModTime().Unix(), fi.Size())
|
etag := fmt.Sprintf("\"%x-%x\"", fi.ModTime().Unix(), fi.Size())
|
||||||
w.Header().Set("ETag", etag)
|
w.Header().Set("ETag", etag)
|
||||||
|
w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat))
|
||||||
|
|
||||||
// 7. Content-Type by extension
|
|
||||||
ext := filepath.Ext(fi.Name())
|
ext := filepath.Ext(fi.Name())
|
||||||
contentType := mime.TypeByExtension(ext)
|
if contentType := mime.TypeByExtension(ext); contentType != "" {
|
||||||
if contentType == "" {
|
w.Header().Set("Content-Type", contentType)
|
||||||
contentType = "application/octet-stream"
|
} else {
|
||||||
|
w.Header().Set("Content-Type", "application/octet-stream")
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", contentType)
|
|
||||||
|
|
||||||
rs, ok := fRaw.(io.ReadSeeker)
|
// Handle File struct with direct streaming
|
||||||
if !ok {
|
if file, ok := fRaw.(*File); ok {
|
||||||
if r.Header.Get("Range") != "" {
|
// Handle nginx proxy (X-Accel-Redirect)
|
||||||
http.Error(w, "Range not supported", http.StatusRequestedRangeNotSatisfiable)
|
if file.content == nil && !file.isRar && h.cache.StreamWithRclone() {
|
||||||
|
link, err := file.getDownloadLink()
|
||||||
|
if err != nil || link == "" {
|
||||||
|
http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name()))
|
||||||
|
w.Header().Set("X-Accel-Redirect", link)
|
||||||
|
w.Header().Set("X-Accel-Buffering", "no")
|
||||||
|
http.Redirect(w, r, link, http.StatusFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size()))
|
|
||||||
w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat))
|
if err := file.StreamResponse(w, r); err != nil {
|
||||||
w.Header().Set("Accept-Ranges", "bytes")
|
var streamErr *streamError
|
||||||
ctx := r.Context()
|
if errors.As(err, &streamErr) {
|
||||||
done := make(chan struct{})
|
// Handle client disconnections silently (just debug log)
|
||||||
go func() {
|
if errors.Is(streamErr.Err, context.Canceled) || errors.Is(streamErr.Err, context.DeadlineExceeded) || streamErr.IsClientDisconnection {
|
||||||
defer close(done)
|
return // Don't log as error or try to write response
|
||||||
_, _ = io.Copy(w, fRaw)
|
}
|
||||||
}()
|
|
||||||
select {
|
if streamErr.StatusCode > 0 && !hasHeadersWritten(w) {
|
||||||
case <-ctx.Done():
|
http.Error(w, streamErr.Error(), streamErr.StatusCode)
|
||||||
h.logger.Debug().Msg("Client cancelled download")
|
} else {
|
||||||
return
|
h.logger.Error().
|
||||||
case <-done:
|
Err(streamErr.Err).
|
||||||
|
Str("path", r.URL.Path).
|
||||||
|
Msg("Stream error")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Generic error
|
||||||
|
if !hasHeadersWritten(w) {
|
||||||
|
http.Error(w, "Stream error", http.StatusInternalServerError)
|
||||||
|
} else {
|
||||||
|
h.logger.Error().
|
||||||
|
Err(err).
|
||||||
|
Str("path", r.URL.Path).
|
||||||
|
Msg("Stream error after headers written")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
http.ServeContent(w, r, fi.Name(), fi.ModTime(), rs)
|
|
||||||
|
// Fallback to ServeContent for other webdav.File implementations
|
||||||
|
if rs, ok := fRaw.(io.ReadSeeker); ok {
|
||||||
|
http.ServeContent(w, r, fi.Name(), fi.ModTime(), rs)
|
||||||
|
} else {
|
||||||
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size()))
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_, _ = io.Copy(w, fRaw)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) handleHead(w http.ResponseWriter, r *http.Request) {
|
func (h *Handler) handleHead(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package webdav
|
package webdav
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"github.com/stanNthe5/stringbuf"
|
"github.com/stanNthe5/stringbuf"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@@ -132,3 +133,122 @@ func writeXml(w http.ResponseWriter, status int, buf stringbuf.StringBuf) {
|
|||||||
w.WriteHeader(status)
|
w.WriteHeader(status)
|
||||||
_, _ = w.Write(buf.Bytes())
|
_, _ = w.Write(buf.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hasHeadersWritten(w http.ResponseWriter) bool {
|
||||||
|
// Most ResponseWriter implementations support this
|
||||||
|
if hw, ok := w.(interface{ Written() bool }); ok {
|
||||||
|
return hw.Written()
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isClientDisconnection(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
errStr := err.Error()
|
||||||
|
// Common client disconnection error patterns
|
||||||
|
return strings.Contains(errStr, "broken pipe") ||
|
||||||
|
strings.Contains(errStr, "connection reset by peer") ||
|
||||||
|
strings.Contains(errStr, "write: connection reset") ||
|
||||||
|
strings.Contains(errStr, "read: connection reset") ||
|
||||||
|
strings.Contains(errStr, "context canceled") ||
|
||||||
|
strings.Contains(errStr, "context deadline exceeded") ||
|
||||||
|
strings.Contains(errStr, "client disconnected") ||
|
||||||
|
strings.Contains(errStr, "EOF")
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpRange struct{ start, end int64 }
|
||||||
|
|
||||||
|
func parseRange(s string, size int64) ([]httpRange, error) {
|
||||||
|
if s == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
const b = "bytes="
|
||||||
|
if !strings.HasPrefix(s, b) {
|
||||||
|
return nil, fmt.Errorf("invalid range")
|
||||||
|
}
|
||||||
|
|
||||||
|
var ranges []httpRange
|
||||||
|
for _, ra := range strings.Split(s[len(b):], ",") {
|
||||||
|
ra = strings.TrimSpace(ra)
|
||||||
|
if ra == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i := strings.Index(ra, "-")
|
||||||
|
if i < 0 {
|
||||||
|
return nil, fmt.Errorf("invalid range")
|
||||||
|
}
|
||||||
|
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
|
||||||
|
var r httpRange
|
||||||
|
if start == "" {
|
||||||
|
i, err := strconv.ParseInt(end, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid range")
|
||||||
|
}
|
||||||
|
if i > size {
|
||||||
|
i = size
|
||||||
|
}
|
||||||
|
r.start = size - i
|
||||||
|
r.end = size - 1
|
||||||
|
} else {
|
||||||
|
i, err := strconv.ParseInt(start, 10, 64)
|
||||||
|
if err != nil || i < 0 {
|
||||||
|
return nil, fmt.Errorf("invalid range")
|
||||||
|
}
|
||||||
|
r.start = i
|
||||||
|
if end == "" {
|
||||||
|
r.end = size - 1
|
||||||
|
} else {
|
||||||
|
i, err := strconv.ParseInt(end, 10, 64)
|
||||||
|
if err != nil || r.start > i {
|
||||||
|
return nil, fmt.Errorf("invalid range")
|
||||||
|
}
|
||||||
|
if i >= size {
|
||||||
|
i = size - 1
|
||||||
|
}
|
||||||
|
r.end = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r.start > size-1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ranges = append(ranges, r)
|
||||||
|
}
|
||||||
|
return ranges, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setVideoStreamingHeaders(req *http.Request) {
|
||||||
|
// Request optimizations for faster response
|
||||||
|
req.Header.Set("Accept", "*/*")
|
||||||
|
req.Header.Set("Accept-Encoding", "identity")
|
||||||
|
req.Header.Set("Connection", "keep-alive")
|
||||||
|
req.Header.Set("User-Agent", "VideoStream/1.0")
|
||||||
|
req.Header.Set("Priority", "u=1")
|
||||||
|
}
|
||||||
|
|
||||||
|
func setVideoResponseHeaders(w http.ResponseWriter, resp *http.Response, isRange bool) {
|
||||||
|
// Copy essential headers from upstream
|
||||||
|
if contentLength := resp.Header.Get("Content-Length"); contentLength != "" {
|
||||||
|
w.Header().Set("Content-Length", contentLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
if contentRange := resp.Header.Get("Content-Range"); contentRange != "" && isRange {
|
||||||
|
w.Header().Set("Content-Range", contentRange)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Video streaming optimizations
|
||||||
|
w.Header().Set("Accept-Ranges", "bytes") // Enable seeking
|
||||||
|
w.Header().Set("Connection", "keep-alive") // Keep connection open
|
||||||
|
|
||||||
|
// Prevent buffering in proxies/CDNs
|
||||||
|
w.Header().Set("X-Accel-Buffering", "no") // Nginx
|
||||||
|
w.Header().Set("Proxy-Buffering", "off") // General proxy
|
||||||
|
|
||||||
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
w.Header().Set("Access-Control-Allow-Headers", "Range")
|
||||||
|
w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range")
|
||||||
|
|
||||||
|
w.WriteHeader(resp.StatusCode)
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user