initializing webdav server

This commit is contained in:
Mukhtar Akere
2025-03-18 10:02:10 +01:00
parent fa469c64c6
commit 5d2fabe20b
39 changed files with 1650 additions and 1141 deletions
-90
View File
@@ -1,90 +0,0 @@
package cache
import (
"sync"
)
type Cache struct {
data map[string]struct{}
order []string
maxItems int
mu sync.RWMutex
}
func New(maxItems int) *Cache {
if maxItems <= 0 {
maxItems = 1000
}
return &Cache{
data: make(map[string]struct{}, maxItems),
order: make([]string, 0, maxItems),
maxItems: maxItems,
}
}
func (c *Cache) Add(value string) {
c.mu.Lock()
defer c.mu.Unlock()
if _, exists := c.data[value]; !exists {
if len(c.order) >= c.maxItems {
delete(c.data, c.order[0])
c.order = c.order[1:]
}
c.data[value] = struct{}{}
c.order = append(c.order, value)
}
}
func (c *Cache) AddMultiple(values map[string]bool) {
c.mu.Lock()
defer c.mu.Unlock()
for value, exists := range values {
if !exists {
if _, exists := c.data[value]; !exists {
if len(c.order) >= c.maxItems {
delete(c.data, c.order[0])
c.order = c.order[1:]
}
c.data[value] = struct{}{}
c.order = append(c.order, value)
}
}
}
}
func (c *Cache) Get(index int) (string, bool) {
c.mu.RLock()
defer c.mu.RUnlock()
if index < 0 || index >= len(c.order) {
return "", false
}
return c.order[index], true
}
func (c *Cache) GetMultiple(values []string) map[string]bool {
c.mu.RLock()
defer c.mu.RUnlock()
result := make(map[string]bool, len(values))
for _, value := range values {
if _, exists := c.data[value]; exists {
result[value] = true
}
}
return result
}
func (c *Cache) Exists(value string) bool {
c.mu.RLock()
defer c.mu.RUnlock()
_, exists := c.data[value]
return exists
}
func (c *Cache) Len() int {
c.mu.RLock()
defer c.mu.RUnlock()
return len(c.order)
}
+1 -1
View File
@@ -1,9 +1,9 @@
package config
import (
"encoding/json"
"errors"
"fmt"
"github.com/goccy/go-json"
"os"
"path/filepath"
"sync"
+3 -3
View File
@@ -29,7 +29,7 @@ func GetLogPath() string {
return filepath.Join(logsDir, "decypharr.log")
}
func NewLogger(prefix string, level string, output *os.File) zerolog.Logger {
func NewLogger(prefix string, level string) zerolog.Logger {
rotatingLogFile := &lumberjack.Logger{
Filename: GetLogPath(),
@@ -39,7 +39,7 @@ func NewLogger(prefix string, level string, output *os.File) zerolog.Logger {
}
consoleWriter := zerolog.ConsoleWriter{
Out: output,
Out: os.Stdout,
TimeFormat: "2006-01-02 15:04:05",
NoColor: false, // Set to true if you don't want colors
FormatLevel: func(i interface{}) string {
@@ -87,7 +87,7 @@ func NewLogger(prefix string, level string, output *os.File) zerolog.Logger {
func GetDefaultLogger() zerolog.Logger {
once.Do(func() {
cfg := config.GetConfig()
logger = NewLogger("decypharr", cfg.LogLevel, os.Stdout)
logger = NewLogger("decypharr", cfg.LogLevel)
})
return logger
}
+1 -1
View File
@@ -2,8 +2,8 @@ package request
import (
"bytes"
"encoding/json"
"fmt"
"github.com/goccy/go-json"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"io"
"net/http"
+181 -61
View File
@@ -1,13 +1,18 @@
package request
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"github.com/goccy/go-json"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"golang.org/x/time/rate"
"io"
"log"
"math"
"math/rand"
"net/http"
"net/url"
"regexp"
@@ -35,103 +40,216 @@ func JoinURL(base string, paths ...string) (string, error) {
return joined, nil
}
type RLHTTPClient struct {
client *http.Client
Ratelimiter *rate.Limiter
Headers map[string]string
type ClientOption func(*Client)
// Client represents an HTTP client with additional capabilities
type Client struct {
client *http.Client
rateLimiter *rate.Limiter
headers map[string]string
maxRetries int
timeout time.Duration
skipTLSVerify bool
retryableStatus map[int]bool
logger zerolog.Logger
}
func (c *RLHTTPClient) Doer(req *http.Request) (*http.Response, error) {
if c.Ratelimiter != nil {
err := c.Ratelimiter.Wait(req.Context())
// WithMaxRetries sets the maximum number of retry attempts
func (c *Client) WithMaxRetries(retries int) *Client {
c.maxRetries = retries
return c
}
// WithTimeout sets the request timeout
func (c *Client) WithTimeout(timeout time.Duration) *Client {
c.timeout = timeout
return c
}
// WithRateLimiter sets a rate limiter
func (c *Client) WithRateLimiter(rl *rate.Limiter) *Client {
c.rateLimiter = rl
return c
}
// WithHeaders sets default headers
func (c *Client) WithHeaders(headers map[string]string) *Client {
c.headers = headers
return c
}
func (c *Client) WithLogger(logger zerolog.Logger) *Client {
c.logger = logger
return c
}
// WithRetryableStatus adds status codes that should trigger a retry
func (c *Client) WithRetryableStatus(statusCodes ...int) *Client {
for _, code := range statusCodes {
c.retryableStatus[code] = true
}
return c
}
// doRequest performs a single HTTP request with rate limiting
func (c *Client) doRequest(req *http.Request) (*http.Response, error) {
if c.rateLimiter != nil {
err := c.rateLimiter.Wait(req.Context())
if err != nil {
return nil, err
return nil, fmt.Errorf("rate limiter wait: %w", err)
}
}
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
return resp, nil
return c.client.Do(req)
}
func (c *RLHTTPClient) Do(req *http.Request) (*http.Response, error) {
var resp *http.Response
// Do performs an HTTP request with retries for certain status codes
func (c *Client) Do(req *http.Request) (*http.Response, error) {
// Save the request body for reuse in retries
var bodyBytes []byte
var err error
backoff := time.Millisecond * 500
for i := 0; i < 3; i++ {
resp, err = c.Doer(req)
if req.Body != nil {
bodyBytes, err = io.ReadAll(req.Body)
if err != nil {
return nil, fmt.Errorf("reading request body: %w", err)
}
req.Body.Close()
}
// Apply timeout to the request context if not already present
if c.timeout > 0 {
var cancel context.CancelFunc
ctx := req.Context()
ctx, cancel = context.WithTimeout(ctx, c.timeout)
defer cancel()
req = req.WithContext(ctx)
}
backoff := time.Millisecond * 500
var resp *http.Response
for attempt := 0; attempt <= c.maxRetries; attempt++ {
// Reset the request body if it exists
if bodyBytes != nil {
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
}
// Apply headers
if c.headers != nil {
for key, value := range c.headers {
req.Header.Set(key, value)
}
}
resp, err = c.doRequest(req)
if err != nil {
// Check if this is a network error that might be worth retrying
if attempt < c.maxRetries {
// Apply backoff with jitter
jitter := time.Duration(rand.Int63n(int64(backoff / 4)))
sleepTime := backoff + jitter
select {
case <-req.Context().Done():
return nil, req.Context().Err()
case <-time.After(sleepTime):
// Continue to next retry attempt
}
// Exponential backoff
backoff *= 2
continue
}
return nil, err
}
if resp.StatusCode != http.StatusTooManyRequests {
// Check if the status code is retryable
if !c.retryableStatus[resp.StatusCode] || attempt == c.maxRetries {
return resp, nil
}
// Close the response body to prevent resource leakage
// Close the response body before retrying
resp.Body.Close()
// Wait for the backoff duration before retrying
time.Sleep(backoff)
// Apply backoff with jitter
jitter := time.Duration(rand.Int63n(int64(backoff / 4)))
sleepTime := backoff + jitter
select {
case <-req.Context().Done():
return nil, req.Context().Err()
case <-time.After(sleepTime):
// Continue to next retry attempt
}
// Exponential backoff
backoff *= 2
}
return resp, fmt.Errorf("max retries exceeded")
return nil, fmt.Errorf("max retries exceeded")
}
func (c *RLHTTPClient) MakeRequest(req *http.Request) ([]byte, error) {
if c.Headers != nil {
for key, value := range c.Headers {
req.Header.Set(key, value)
}
}
// MakeRequest performs an HTTP request and returns the response body as bytes
func (c *Client) MakeRequest(req *http.Request) ([]byte, error) {
res, err := c.Do(req)
if err != nil {
return nil, err
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
log.Println(err)
defer func() {
if err := res.Body.Close(); err != nil {
c.logger.Printf("Failed to close response body: %v", err)
}
}(res.Body)
}()
b, err := io.ReadAll(res.Body)
bodyBytes, err := io.ReadAll(res.Body)
if err != nil {
return nil, err
}
statusOk := res.StatusCode >= 200 && res.StatusCode < 300
if !statusOk {
// Add status code error to the body
b = append(b, []byte(fmt.Sprintf("\nstatus code: %d", res.StatusCode))...)
return nil, errors.New(string(b))
return nil, fmt.Errorf("reading response body: %w", err)
}
return b, nil
if res.StatusCode < 200 || res.StatusCode >= 300 {
return nil, fmt.Errorf("HTTP error %d: %s", res.StatusCode, string(bodyBytes))
}
return bodyBytes, nil
}
func NewRLHTTPClient(rl *rate.Limiter, headers map[string]string) *RLHTTPClient {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
c := &RLHTTPClient{
client: &http.Client{
Transport: tr,
// New creates a new HTTP client with the specified options
func New(options ...ClientOption) *Client {
client := &Client{
maxRetries: 3,
skipTLSVerify: true,
retryableStatus: map[int]bool{
http.StatusTooManyRequests: true,
http.StatusInternalServerError: true,
http.StatusBadGateway: true,
http.StatusServiceUnavailable: true,
http.StatusGatewayTimeout: true,
},
logger: logger.NewLogger("request", config.GetConfig().LogLevel),
}
if rl != nil {
c.Ratelimiter = rl
// Apply options
for _, option := range options {
option(client)
}
if headers != nil {
c.Headers = headers
// Create transport
transport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: client.skipTLSVerify,
},
Proxy: http.ProxyFromEnvironment,
}
return c
// Create HTTP client
client.client = &http.Client{
Transport: transport,
Timeout: client.timeout,
}
return client
}
func ParseRateLimit(rateStr string) *rate.Limiter {
@@ -153,9 +271,11 @@ func ParseRateLimit(rateStr string) *rate.Limiter {
switch unit {
case "minute":
reqsPerSecond := float64(count) / 60.0
return rate.NewLimiter(rate.Limit(reqsPerSecond), 5)
burstSize := int(math.Max(30, float64(count)*0.25))
return rate.NewLimiter(rate.Limit(reqsPerSecond), burstSize)
case "second":
return rate.NewLimiter(rate.Limit(float64(count)), 5)
burstSize := int(math.Max(30, float64(count)*5))
return rate.NewLimiter(rate.Limit(float64(count)), burstSize)
default:
return nil
}