Fix issues with repair, move to a different streaming option

This commit is contained in:
Mukhtar Akere
2025-06-18 10:42:44 +01:00
parent 5661b05ec1
commit b2e99585f7
6 changed files with 507 additions and 275 deletions

View File

@@ -51,7 +51,7 @@ Create a `config.json` file in `/opt/decypharr/` with your Decypharr configurati
"folder": "/mnt/remote/realdebrid/__all__/",
"rate_limit": "250/minute",
"use_webdav": true,
"rc_url": "http://your-ip-address:5572" // Rclone RC URL
"rc_url": "http://your-ip-address:5572"
}
],
"qbittorrent": {
@@ -99,7 +99,7 @@ services:
decypharr:
condition: service_healthy
restart: true
command: "mount decypharr: /data --allow-non-empty --allow-other --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth "
command: "mount decypharr: /data --allow-non-empty --allow-other --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth"
```
#### Docker Notes

View File

@@ -111,6 +111,8 @@ func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
}(f)
}
wg.Wait()
// Try to reinsert the torrent if it's broken
if len(brokenFiles) > 0 && t.Torrent != nil {
// Check if the torrent is already in progress

View File

@@ -108,7 +108,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
}
for _, url := range urlList {
if err := q.addMagnet(ctx, url, _arr, debridName, action); err != nil {
q.logger.Debug().Err(err).Msgf("Error adding magnet")
q.logger.Debug().Msgf("Error adding magnet: %s", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}

View File

@@ -27,35 +27,52 @@ var sharedClient = &http.Client{
Timeout: 0,
}
type streamError struct {
Err error
StatusCode int
IsClientDisconnection bool
}
func (e *streamError) Error() string {
return e.Err.Error()
}
func (e *streamError) Unwrap() error {
return e.Err
}
type File struct {
cache *store.Cache
fileId string
torrentName string
modTime time.Time
size int64
offset int64
isDir bool
children []os.FileInfo
reader io.ReadCloser
seekPending bool
content []byte
isRar bool
name string
metadataOnly bool
downloadLink string
torrentName string
link string
downloadLink string
size int64
isDir bool
fileId string
isRar bool
metadataOnly bool
content []byte
children []os.FileInfo // For directories
cache *store.Cache
modTime time.Time
// Minimal state for interface compliance only
readOffset int64 // Only used for Read() method compliance
}
// File interface implementations for File
func (f *File) Close() error {
if f.reader != nil {
f.reader.Close()
f.reader = nil
if f.isDir {
return nil // No resources to close for directories
}
// For files, we don't have any resources to close either
// This is just to satisfy the os.File interface
f.content = nil
f.children = nil
f.downloadLink = ""
f.readOffset = 0
return nil
}
@@ -84,211 +101,274 @@ func (f *File) getDownloadByteRange() (*[2]int64, error) {
return byteRange, nil
}
func (f *File) stream() (*http.Response, error) {
client := sharedClient
func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) error {
content := f.content
size := int64(len(content))
// Handle range requests for preloaded content
if rangeHeader := r.Header.Get("Range"); rangeHeader != "" {
ranges, err := parseRange(rangeHeader, size)
if err != nil || len(ranges) != 1 {
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
start, end := ranges[0].start, ranges[0].end
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, size))
w.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusPartialContent)
_, err = w.Write(content[start : end+1])
return err
}
// Full content
w.Header().Set("Content-Length", fmt.Sprintf("%d", size))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusOK)
_, err := w.Write(content)
return err
}
func (f *File) StreamResponse(w http.ResponseWriter, r *http.Request) error {
// Handle preloaded content files
if f.content != nil {
return f.servePreloadedContent(w, r)
}
// Try streaming with retry logic
return f.streamWithRetry(w, r, 0)
}
func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCount int) error {
const maxRetries = 0
_log := f.cache.Logger()
// Get download link (with caching optimization)
downloadLink, err := f.getDownloadLink()
if err != nil {
_log.Trace().Msgf("Failed to get download link for %s: %v", f.name, err)
return nil, err
return &streamError{Err: err, StatusCode: http.StatusPreconditionFailed}
}
if downloadLink == "" {
_log.Trace().Msgf("Failed to get download link for %s. Empty download link", f.name)
return nil, fmt.Errorf("empty download link")
return &streamError{Err: fmt.Errorf("empty download link"), StatusCode: http.StatusNotFound}
}
byteRange, err := f.getDownloadByteRange()
// Create upstream request with streaming optimizations
upstreamReq, err := http.NewRequest("GET", downloadLink, nil)
if err != nil {
_log.Trace().Msgf("Failed to get download byte range for %s: %v", f.name, err)
return nil, err
return &streamError{Err: err, StatusCode: http.StatusInternalServerError}
}
req, err := http.NewRequest("GET", downloadLink, nil)
setVideoStreamingHeaders(upstreamReq)
// Handle range requests (critical for video seeking)
isRangeRequest := f.handleRangeRequest(upstreamReq, r, w)
if isRangeRequest == -1 {
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
resp, err := sharedClient.Do(upstreamReq)
if err != nil {
_log.Trace().Msgf("Failed to create HTTP request: %v", err)
return nil, err
return &streamError{Err: err, StatusCode: http.StatusServiceUnavailable}
}
defer resp.Body.Close()
// Handle upstream errors with retry logic
shouldRetry, retryErr := f.handleUpstream(resp, retryCount, maxRetries)
if shouldRetry && retryCount < maxRetries {
// Retry with new download link
_log.Debug().
Int("retry_count", retryCount+1).
Str("file", f.name).
Msg("Retrying stream request")
return f.streamWithRetry(w, r, retryCount+1)
}
if retryErr != nil {
return retryErr
}
if byteRange == nil {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", max(0, f.offset)))
} else {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", byteRange[0]+max(0, f.offset)))
}
setVideoResponseHeaders(w, resp, isRangeRequest == 1)
// Make the request
resp, err := client.Do(req)
if err != nil {
_log.Trace().Msgf("HTTP request failed: %v", err)
return nil, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
f.downloadLink = ""
cleanupResp := func(resp *http.Response) {
if resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
switch resp.StatusCode {
case http.StatusServiceUnavailable:
// Read the body to check for specific error messages
body, readErr := io.ReadAll(resp.Body)
cleanupResp(resp)
if readErr != nil {
_log.Trace().Msgf("Failed to read response body: %v", readErr)
return nil, fmt.Errorf("failed to read error response: %w", readErr)
}
bodyStr := string(body)
if strings.Contains(bodyStr, "You can not download this file because you have exceeded your traffic on this hoster") {
_log.Trace().Msgf("Bandwidth exceeded for %s. Download token will be disabled if you have more than one", f.name)
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "bandwidth_exceeded")
// Retry with a different API key if it's available
return f.stream()
}
return nil, fmt.Errorf("service unavailable: %s", bodyStr)
case http.StatusNotFound:
cleanupResp(resp)
// Mark download link as not found
// Regenerate a new download link
_log.Trace().Msgf("Link not found (404) for %s. Marking link as invalid and regenerating", f.name)
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found")
// Generate a new download link
downloadLink, err := f.getDownloadLink()
if err != nil {
_log.Trace().Msgf("Failed to get download link for %s. %s", f.name, err)
return nil, err
}
if downloadLink == "" {
_log.Trace().Msgf("Failed to get download link for %s", f.name)
return nil, fmt.Errorf("failed to regenerate download link")
}
req, err := http.NewRequest("GET", downloadLink, nil)
if err != nil {
return nil, err
}
// Set the range header again
if byteRange == nil {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", max(0, f.offset)))
} else {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", byteRange[0]+max(0, f.offset)))
}
newResp, err := client.Do(req)
if err != nil {
return nil, err
}
if newResp.StatusCode != http.StatusOK && newResp.StatusCode != http.StatusPartialContent {
cleanupResp(newResp)
_log.Trace().Msgf("Regenerated link also failed with status %d", newResp.StatusCode)
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, newResp.Status)
return nil, fmt.Errorf("failed with status code %d even after link regeneration", newResp.StatusCode)
}
return newResp, nil
default:
body, _ := io.ReadAll(resp.Body)
cleanupResp(resp)
_log.Trace().Msgf("Unexpected status code %d for %s: %s", resp.StatusCode, f.name, string(body))
return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body))
}
}
return resp, nil
// Stream with optimized buffering for video
return f.streamVideoOptimized(w, resp.Body)
}
func (f *File) Read(p []byte) (n int, err error) {
if f.isDir {
return 0, os.ErrInvalid
}
if f.metadataOnly {
return 0, io.EOF
}
if f.content != nil {
if f.offset >= int64(len(f.content)) {
return 0, io.EOF
}
n = copy(p, f.content[f.offset:])
f.offset += int64(n)
return n, nil
func (f *File) handleUpstream(resp *http.Response, retryCount, maxRetries int) (shouldRetry bool, err error) {
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent {
return false, nil
}
// If we haven't started streaming the file yet or need to reposition
if f.reader == nil || f.seekPending {
if f.reader != nil {
f.reader.Close()
f.reader = nil
_log := f.cache.Logger()
// Clean up response body properly
cleanupResp := func(resp *http.Response) {
if resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
switch resp.StatusCode {
case http.StatusServiceUnavailable:
// Read the body to check for specific error messages
body, readErr := io.ReadAll(resp.Body)
cleanupResp(resp)
if readErr != nil {
_log.Error().Err(readErr).Msg("Failed to read response body")
return false, &streamError{
Err: fmt.Errorf("failed to read error response: %w", readErr),
StatusCode: http.StatusServiceUnavailable,
}
}
// Make the request to get the file
resp, err := f.stream()
if err != nil {
return 0, err
}
if resp == nil {
return 0, fmt.Errorf("stream returned nil response")
bodyStr := string(body)
if strings.Contains(bodyStr, "you have exceeded your traffic") {
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Bandwidth exceeded. Marking link as invalid")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "bandwidth_exceeded")
// Retry with a different API key if available and we haven't exceeded retries
if retryCount < maxRetries {
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("bandwidth exceeded after %d retries", retryCount),
StatusCode: http.StatusServiceUnavailable,
}
}
f.reader = resp.Body
f.seekPending = false
}
return false, &streamError{
Err: fmt.Errorf("service unavailable: %s", bodyStr),
StatusCode: http.StatusServiceUnavailable,
}
n, err = f.reader.Read(p)
f.offset += int64(n)
case http.StatusNotFound:
cleanupResp(resp)
if err != nil {
f.reader.Close()
f.reader = nil
}
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Link not found (404). Marking link as invalid and regenerating")
return n, err
}
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "link_not_found")
func (f *File) Seek(offset int64, whence int) (int64, error) {
if f.isDir {
return 0, os.ErrInvalid
}
// Try to regenerate download link if we haven't exceeded retries
if retryCount < maxRetries {
// Clear cached link to force regeneration
f.downloadLink = ""
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("file not found after %d retries", retryCount),
StatusCode: http.StatusNotFound,
}
newOffset := f.offset
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset += offset
case io.SeekEnd:
newOffset = f.size + offset
default:
return 0, os.ErrInvalid
}
body, _ := io.ReadAll(resp.Body)
cleanupResp(resp)
if newOffset < 0 {
newOffset = 0
}
if newOffset > f.size {
newOffset = f.size
}
_log.Error().
Int("status_code", resp.StatusCode).
Str("file", f.name).
Str("response_body", string(body)).
Msg("Unexpected upstream error")
// Only mark seek as pending if position actually changed
if newOffset != f.offset {
f.offset = newOffset
f.seekPending = true
return false, &streamError{
Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)),
StatusCode: http.StatusBadGateway,
}
}
return f.offset, nil
}
func (f *File) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w http.ResponseWriter) int {
rangeHeader := r.Header.Get("Range")
if rangeHeader == "" {
// For video files, apply byte range if exists
if byteRange, _ := f.getDownloadByteRange(); byteRange != nil {
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", byteRange[0], byteRange[1]))
}
return 0 // No range request
}
// Parse range request
ranges, err := parseRange(rangeHeader, f.size)
if err != nil || len(ranges) != 1 {
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", f.size))
return -1 // Invalid range
}
// Apply byte range offset if exists
byteRange, _ := f.getDownloadByteRange()
start, end := ranges[0].start, ranges[0].end
if byteRange != nil {
start += byteRange[0]
end += byteRange[0]
}
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
return 1 // Valid range request
}
func (f *File) streamVideoOptimized(w http.ResponseWriter, src io.Reader) error {
// Use larger buffer for video streaming (better throughput)
buf := make([]byte, 64*1024) // 64KB buffer
// First chunk optimization - send immediately for faster start
n, err := src.Read(buf)
if err != nil && err != io.EOF {
if isClientDisconnection(err) {
return &streamError{Err: err, StatusCode: 0, IsClientDisconnection: true}
}
return &streamError{Err: err, StatusCode: 0}
}
if n > 0 {
// Write first chunk immediately
_, writeErr := w.Write(buf[:n])
if writeErr != nil {
if isClientDisconnection(writeErr) {
return &streamError{Err: writeErr, StatusCode: 0, IsClientDisconnection: true}
}
return &streamError{Err: writeErr, StatusCode: 0}
}
// Flush immediately for faster video start
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
}
if err == io.EOF {
return nil
}
// Continue with optimized copy for remaining data
_, err = io.CopyBuffer(w, src, buf)
if err != nil {
if isClientDisconnection(err) {
return &streamError{Err: err, StatusCode: 0, IsClientDisconnection: true}
}
return &streamError{Err: err, StatusCode: 0}
}
return nil
}
/*
These are the methods that implement the os.File interface for the File type.
Only Stat and ReadDir are used
*/
func (f *File) Stat() (os.FileInfo, error) {
if f.isDir {
return &FileInfo{
@@ -309,18 +389,61 @@ func (f *File) Stat() (os.FileInfo, error) {
}, nil
}
func (f *File) ReadAt(p []byte, off int64) (n int, err error) {
// Save current position
// Seek to requested position
_, err = f.Seek(off, io.SeekStart)
if err != nil {
return 0, err
func (f *File) Read(p []byte) (n int, err error) {
if f.isDir {
return 0, os.ErrInvalid
}
// Read the data
n, err = f.Read(p)
return n, err
if f.metadataOnly {
return 0, io.EOF
}
// For preloaded content files (like version.txt)
if f.content != nil {
if f.readOffset >= int64(len(f.content)) {
return 0, io.EOF
}
n = copy(p, f.content[f.readOffset:])
f.readOffset += int64(n)
return n, nil
}
// For streaming files, return an error to force use of StreamResponse
return 0, fmt.Errorf("use StreamResponse method for streaming files")
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
if f.isDir {
return 0, os.ErrInvalid
}
// Only handle seeking for preloaded content
if f.content != nil {
newOffset := f.readOffset
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset += offset
case io.SeekEnd:
newOffset = int64(len(f.content)) + offset
default:
return 0, os.ErrInvalid
}
if newOffset < 0 {
newOffset = 0
}
if newOffset > int64(len(f.content)) {
newOffset = int64(len(f.content))
}
f.readOffset = newOffset
return f.readOffset, nil
}
// For streaming files, return error to force use of StreamResponse
return 0, fmt.Errorf("use StreamResponse method for streaming files")
}
func (f *File) Write(p []byte) (n int, err error) {

View File

@@ -2,6 +2,7 @@ package webdav
import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"golang.org/x/net/webdav"
@@ -415,104 +416,90 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we
func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
fRaw, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
if err != nil {
h.logger.Error().Err(err).
Str("path", r.URL.Path).
Msg("Failed to open file")
http.NotFound(w, r)
return
}
defer func(fRaw webdav.File) {
err := fRaw.Close()
if err != nil {
h.logger.Error().Err(err).Msg("Failed to close file")
return
}
}(fRaw)
defer fRaw.Close()
fi, err := fRaw.Stat()
if err != nil {
h.logger.Error().Err(err).Msg("Failed to stat file")
http.Error(w, "Server Error", http.StatusInternalServerError)
return
}
// If the target is a directory, use your directory listing logic.
if fi.IsDir() {
h.serveDirectory(w, r, fRaw)
return
}
// Checks if the file is a torrent file
// .content is nil if the file is a torrent file
// .content means file is preloaded, e.g version.txt
if file, ok := fRaw.(*File); ok && file.content == nil {
link, err := file.getDownloadLink()
if err != nil {
h.logger.Debug().
Err(err).
Str("link", file.link).
Str("path", r.URL.Path).
Msg("Could not fetch download link")
http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed)
return
}
if link == "" {
http.NotFound(w, r)
return
}
file.downloadLink = link
// If the torrent file is not a RAR file and users enabled proxy streaming
if !file.isRar && h.cache.StreamWithRclone() {
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name()))
w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size()))
w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat))
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("X-Accel-Redirect", file.downloadLink)
w.Header().Set("X-Accel-Buffering", "no")
http.Redirect(w, r, file.downloadLink, http.StatusFound)
return
}
}
// ETags
// Set common headers
etag := fmt.Sprintf("\"%x-%x\"", fi.ModTime().Unix(), fi.Size())
w.Header().Set("ETag", etag)
w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat))
// 7. Content-Type by extension
ext := filepath.Ext(fi.Name())
contentType := mime.TypeByExtension(ext)
if contentType == "" {
contentType = "application/octet-stream"
if contentType := mime.TypeByExtension(ext); contentType != "" {
w.Header().Set("Content-Type", contentType)
} else {
w.Header().Set("Content-Type", "application/octet-stream")
}
w.Header().Set("Content-Type", contentType)
rs, ok := fRaw.(io.ReadSeeker)
if !ok {
if r.Header.Get("Range") != "" {
http.Error(w, "Range not supported", http.StatusRequestedRangeNotSatisfiable)
// Handle File struct with direct streaming
if file, ok := fRaw.(*File); ok {
// Handle nginx proxy (X-Accel-Redirect)
if file.content == nil && !file.isRar && h.cache.StreamWithRclone() {
link, err := file.getDownloadLink()
if err != nil || link == "" {
http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed)
return
}
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name()))
w.Header().Set("X-Accel-Redirect", link)
w.Header().Set("X-Accel-Buffering", "no")
http.Redirect(w, r, link, http.StatusFound)
return
}
w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size()))
w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat))
w.Header().Set("Accept-Ranges", "bytes")
ctx := r.Context()
done := make(chan struct{})
go func() {
defer close(done)
_, _ = io.Copy(w, fRaw)
}()
select {
case <-ctx.Done():
h.logger.Debug().Msg("Client cancelled download")
return
case <-done:
if err := file.StreamResponse(w, r); err != nil {
var streamErr *streamError
if errors.As(err, &streamErr) {
// Handle client disconnections silently (just debug log)
if errors.Is(streamErr.Err, context.Canceled) || errors.Is(streamErr.Err, context.DeadlineExceeded) || streamErr.IsClientDisconnection {
return // Don't log as error or try to write response
}
if streamErr.StatusCode > 0 && !hasHeadersWritten(w) {
http.Error(w, streamErr.Error(), streamErr.StatusCode)
} else {
h.logger.Error().
Err(streamErr.Err).
Str("path", r.URL.Path).
Msg("Stream error")
}
} else {
// Generic error
if !hasHeadersWritten(w) {
http.Error(w, "Stream error", http.StatusInternalServerError)
} else {
h.logger.Error().
Err(err).
Str("path", r.URL.Path).
Msg("Stream error after headers written")
}
}
}
return
}
http.ServeContent(w, r, fi.Name(), fi.ModTime(), rs)
// Fallback to ServeContent for other webdav.File implementations
if rs, ok := fRaw.(io.ReadSeeker); ok {
http.ServeContent(w, r, fi.Name(), fi.ModTime(), rs)
} else {
w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size()))
w.WriteHeader(http.StatusOK)
_, _ = io.Copy(w, fRaw)
}
}
func (h *Handler) handleHead(w http.ResponseWriter, r *http.Request) {

View File

@@ -1,6 +1,7 @@
package webdav
import (
"fmt"
"github.com/stanNthe5/stringbuf"
"net/http"
"net/url"
@@ -132,3 +133,122 @@ func writeXml(w http.ResponseWriter, status int, buf stringbuf.StringBuf) {
w.WriteHeader(status)
_, _ = w.Write(buf.Bytes())
}
func hasHeadersWritten(w http.ResponseWriter) bool {
// Most ResponseWriter implementations support this
if hw, ok := w.(interface{ Written() bool }); ok {
return hw.Written()
}
return false
}
func isClientDisconnection(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
// Common client disconnection error patterns
return strings.Contains(errStr, "broken pipe") ||
strings.Contains(errStr, "connection reset by peer") ||
strings.Contains(errStr, "write: connection reset") ||
strings.Contains(errStr, "read: connection reset") ||
strings.Contains(errStr, "context canceled") ||
strings.Contains(errStr, "context deadline exceeded") ||
strings.Contains(errStr, "client disconnected") ||
strings.Contains(errStr, "EOF")
}
type httpRange struct{ start, end int64 }
func parseRange(s string, size int64) ([]httpRange, error) {
if s == "" {
return nil, nil
}
const b = "bytes="
if !strings.HasPrefix(s, b) {
return nil, fmt.Errorf("invalid range")
}
var ranges []httpRange
for _, ra := range strings.Split(s[len(b):], ",") {
ra = strings.TrimSpace(ra)
if ra == "" {
continue
}
i := strings.Index(ra, "-")
if i < 0 {
return nil, fmt.Errorf("invalid range")
}
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
var r httpRange
if start == "" {
i, err := strconv.ParseInt(end, 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid range")
}
if i > size {
i = size
}
r.start = size - i
r.end = size - 1
} else {
i, err := strconv.ParseInt(start, 10, 64)
if err != nil || i < 0 {
return nil, fmt.Errorf("invalid range")
}
r.start = i
if end == "" {
r.end = size - 1
} else {
i, err := strconv.ParseInt(end, 10, 64)
if err != nil || r.start > i {
return nil, fmt.Errorf("invalid range")
}
if i >= size {
i = size - 1
}
r.end = i
}
}
if r.start > size-1 {
continue
}
ranges = append(ranges, r)
}
return ranges, nil
}
func setVideoStreamingHeaders(req *http.Request) {
// Request optimizations for faster response
req.Header.Set("Accept", "*/*")
req.Header.Set("Accept-Encoding", "identity")
req.Header.Set("Connection", "keep-alive")
req.Header.Set("User-Agent", "VideoStream/1.0")
req.Header.Set("Priority", "u=1")
}
func setVideoResponseHeaders(w http.ResponseWriter, resp *http.Response, isRange bool) {
// Copy essential headers from upstream
if contentLength := resp.Header.Get("Content-Length"); contentLength != "" {
w.Header().Set("Content-Length", contentLength)
}
if contentRange := resp.Header.Get("Content-Range"); contentRange != "" && isRange {
w.Header().Set("Content-Range", contentRange)
}
// Video streaming optimizations
w.Header().Set("Accept-Ranges", "bytes") // Enable seeking
w.Header().Set("Connection", "keep-alive") // Keep connection open
// Prevent buffering in proxies/CDNs
w.Header().Set("X-Accel-Buffering", "no") // Nginx
w.Header().Set("Proxy-Buffering", "off") // General proxy
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Range")
w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range")
w.WriteHeader(resp.StatusCode)
}