From 2d29996d2cf99084e6ba3ec67d926da9805afc25 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sat, 15 Mar 2025 21:08:15 +0100 Subject: [PATCH 01/39] experimental --- cmd/decypharr/main.go | 13 +- go.mod | 18 +- go.sum | 32 +++ pkg/debrid/alldebrid/alldebrid.go | 2 +- pkg/debrid/cache/cache.go | 272 +++++++++++++++++--------- pkg/debrid/debrid_link/debrid_link.go | 2 +- pkg/debrid/realdebrid/realdebrid.go | 21 +- pkg/debrid/torbox/torbox.go | 2 +- pkg/service/service.go | 15 +- pkg/webdav/handler.go | 15 +- pkg/webdav/webdav.go | 16 +- 11 files changed, 281 insertions(+), 127 deletions(-) diff --git a/cmd/decypharr/main.go b/cmd/decypharr/main.go index 9aaaea0..ac42c6a 100644 --- a/cmd/decypharr/main.go +++ b/cmd/decypharr/main.go @@ -11,6 +11,7 @@ import ( "github.com/sirrobot01/debrid-blackhole/pkg/service" "github.com/sirrobot01/debrid-blackhole/pkg/version" "github.com/sirrobot01/debrid-blackhole/pkg/web" + "github.com/sirrobot01/debrid-blackhole/pkg/webdav" "github.com/sirrobot01/debrid-blackhole/pkg/worker" "runtime/debug" "sync" @@ -30,12 +31,16 @@ func Start(ctx context.Context) error { svc := service.New() _qbit := qbit.New() srv := server.New() - webRoutes := web.New(_qbit).Routes() + _webdav := webdav.New() + + ui := web.New(_qbit).Routes() + webdavRoutes := _webdav.Routes() qbitRoutes := _qbit.Routes() // Register routes - srv.Mount("/", webRoutes) + srv.Mount("/", ui) srv.Mount("/api/v2", qbitRoutes) + srv.Mount("/webdav", webdavRoutes) safeGo := func(f func() error) { wg.Add(1) @@ -66,6 +71,10 @@ func Start(ctx context.Context) error { }) } + safeGo(func() error { + return _webdav.Start(ctx) + }) + safeGo(func() error { return srv.Start(ctx) }) diff --git a/go.mod b/go.mod index 8398fe2..9f7940b 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/rs/zerolog v1.33.0 github.com/valyala/fastjson v1.6.4 golang.org/x/crypto v0.33.0 - golang.org/x/net v0.33.0 + golang.org/x/net v0.35.0 golang.org/x/sync v0.11.0 golang.org/x/time v0.8.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 @@ -25,15 +25,29 @@ require ( github.com/anacrolix/missinggo v1.3.0 // indirect github.com/anacrolix/missinggo/v2 v2.7.3 // indirect github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgraph-io/badger/v4 v4.6.0 // indirect + github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/flatbuffers v25.2.10+incompatible // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/huandu/xstrings v1.3.2 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/stretchr/testify v1.10.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/text v0.22.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect ) diff --git a/go.sum b/go.sum index 59ecc6b..1699181 100644 --- a/go.sum +++ b/go.sum @@ -46,16 +46,25 @@ github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaq github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og= github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4= github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger/v4 v4.6.0 h1:acOwfOOZ4p1dPRnYzvkVm7rUk2Y21TgPVepCy5dJdFQ= +github.com/dgraph-io/badger/v4 v4.6.0/go.mod h1:KSJ5VTuZNC3Sd+YhvVjk2nYua9UZnnTr/SkXvdtiPgI= +github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= +github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -78,6 +87,11 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -98,6 +112,8 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= +github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -132,6 +148,8 @@ github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVY github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -163,6 +181,7 @@ github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= @@ -187,6 +206,7 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= @@ -215,6 +235,14 @@ github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPy go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= @@ -236,6 +264,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -288,6 +318,8 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/debrid/alldebrid/alldebrid.go b/pkg/debrid/alldebrid/alldebrid.go index 190b5cb..fd509c2 100644 --- a/pkg/debrid/alldebrid/alldebrid.go +++ b/pkg/debrid/alldebrid/alldebrid.go @@ -273,7 +273,7 @@ func (ad *AllDebrid) GetCheckCached() bool { } func (ad *AllDebrid) GetTorrents() ([]*torrent.Torrent, error) { - return nil, fmt.Errorf("not implemented") + return nil, nil } func (ad *AllDebrid) GetDownloadingStatus() []string { diff --git a/pkg/debrid/cache/cache.go b/pkg/debrid/cache/cache.go index 348a421..849a173 100644 --- a/pkg/debrid/cache/cache.go +++ b/pkg/debrid/cache/cache.go @@ -1,15 +1,17 @@ package cache import ( - "bufio" + "context" "encoding/json" "fmt" + "github.com/dgraph-io/badger/v4" "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/logger" "os" "path/filepath" "runtime" "sync" + "sync/atomic" "time" "github.com/sirrobot01/debrid-blackhole/internal/config" @@ -35,17 +37,45 @@ var ( func getLogger() zerolog.Logger { once.Do(func() { - _logInstance = logger.NewLogger("cache", "info", os.Stdout) + cfg := config.GetConfig() + _logInstance = logger.NewLogger("cache", cfg.LogLevel, os.Stdout) }) return _logInstance } type Cache struct { - dir string - client engine.Service - torrents *sync.Map // key: torrent.Id, value: *CachedTorrent - torrentsNames *sync.Map // key: torrent.Name, value: torrent.Id - LastUpdated time.Time `json:"last_updated"` + dir string + client engine.Service + db *badger.DB + torrents map[string]*CachedTorrent // key: torrent.Id, value: *CachedTorrent + torrentsMutex sync.RWMutex + torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent + torrentNamesMutex sync.RWMutex + LastUpdated time.Time `json:"last_updated"` +} + +func (c *Cache) SetTorrent(t *CachedTorrent) { + c.torrentsMutex.Lock() + defer c.torrentsMutex.Unlock() + c.torrents[t.Id] = t +} + +func (c *Cache) SetTorrentName(name string, t *CachedTorrent) { + c.torrentNamesMutex.Lock() + defer c.torrentNamesMutex.Unlock() + c.torrentsNames[name] = t +} + +func (c *Cache) GetTorrents() map[string]*CachedTorrent { + c.torrentsMutex.RLock() + defer c.torrentsMutex.RUnlock() + return c.torrents +} + +func (c *Cache) GetTorrentNames() map[string]*CachedTorrent { + c.torrentNamesMutex.RLock() + defer c.torrentNamesMutex.RUnlock() + return c.torrentsNames } type Manager struct { @@ -73,10 +103,11 @@ func (m *Manager) GetCache(debridName string) *Cache { } func New(debridService engine.Service, basePath string) *Cache { + dbPath := filepath.Join(basePath, "cache", debridService.GetName(), "db") return &Cache{ - dir: filepath.Join(basePath, "cache", debridService.GetName(), "torrents"), - torrents: &sync.Map{}, - torrentsNames: &sync.Map{}, + dir: dbPath, + torrents: make(map[string]*CachedTorrent), + torrentsNames: make(map[string]*CachedTorrent), client: debridService, } } @@ -84,93 +115,117 @@ func New(debridService engine.Service, basePath string) *Cache { func (c *Cache) Start() error { _logger := getLogger() _logger.Info().Msg("Starting cache for: " + c.client.GetName()) + + // Make sure the directory exists + if err := os.MkdirAll(c.dir, 0755); err != nil { + return fmt.Errorf("failed to create cache directory: %w", err) + } + + // Open BadgerDB + opts := badger.DefaultOptions(c.dir) + opts.Logger = nil // Disable Badger's internal logger + + var err error + c.db, err = badger.Open(opts) + if err != nil { + return fmt.Errorf("failed to open BadgerDB: %w", err) + } + if err := c.Load(); err != nil { return fmt.Errorf("failed to load cache: %v", err) } + if err := c.Sync(); err != nil { return fmt.Errorf("failed to sync cache: %v", err) } + + return nil +} + +func (c *Cache) Close() error { + if c.db != nil { + return c.db.Close() + } return nil } func (c *Cache) Load() error { _logger := getLogger() - if err := os.MkdirAll(c.dir, 0755); err != nil { - return fmt.Errorf("failed to create cache directory: %w", err) - } + err := c.db.View(func(txn *badger.Txn) error { + opts := badger.DefaultIteratorOptions + it := txn.NewIterator(opts) + defer it.Close() - files, err := os.ReadDir(c.dir) - if err != nil { - return fmt.Errorf("failed to read cache directory: %w", err) - } + prefix := []byte("torrent:") + for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { + item := it.Item() - for _, file := range files { - if file.IsDir() || filepath.Ext(file.Name()) != ".json" { - continue + err := item.Value(func(val []byte) error { + var ct CachedTorrent + if err := json.Unmarshal(val, &ct); err != nil { + _logger.Debug().Err(err).Msgf("Failed to unmarshal torrent") + return nil // Continue to next item + } + + if len(ct.Files) > 0 { + c.SetTorrent(&ct) + c.SetTorrentName(ct.Name, &ct) + } + return nil + }) + + if err != nil { + _logger.Debug().Err(err).Msg("Error reading torrent value") + } } + return nil + }) - filePath := filepath.Join(c.dir, file.Name()) - data, err := os.ReadFile(filePath) - if err != nil { - _logger.Debug().Err(err).Msgf("Failed to read file: %s", filePath) - continue - } - - var ct CachedTorrent - if err := json.Unmarshal(data, &ct); err != nil { - _logger.Debug().Err(err).Msgf("Failed to unmarshal file: %s", filePath) - continue - } - if len(ct.Files) > 0 { - c.torrents.Store(ct.Torrent.Id, &ct) - c.torrentsNames.Store(ct.Torrent.Name, ct.Torrent.Id) - } - } - - return nil + return err } func (c *Cache) GetTorrent(id string) *CachedTorrent { - if value, ok := c.torrents.Load(id); ok { - return value.(*CachedTorrent) + if t, ok := c.GetTorrents()[id]; ok { + return t } return nil } func (c *Cache) GetTorrentByName(name string) *CachedTorrent { - if id, ok := c.torrentsNames.Load(name); ok { - return c.GetTorrent(id.(string)) + if t, ok := c.GetTorrentNames()[name]; ok { + return t } return nil } func (c *Cache) SaveTorrent(ct *CachedTorrent) error { - data, err := json.MarshalIndent(ct, "", " ") + data, err := json.Marshal(ct) if err != nil { return fmt.Errorf("failed to marshal torrent: %w", err) } - fileName := ct.Torrent.Id + ".json" - filePath := filepath.Join(c.dir, fileName) - tmpFile := filePath + ".tmp" + key := []byte(fmt.Sprintf("torrent:%s", ct.Torrent.Id)) + + err = c.db.Update(func(txn *badger.Txn) error { + return txn.Set(key, data) + }) - f, err := os.Create(tmpFile) if err != nil { - return fmt.Errorf("failed to create temp file: %w", err) - } - defer f.Close() - - w := bufio.NewWriter(f) - if _, err := w.Write(data); err != nil { - return fmt.Errorf("failed to write data: %w", err) + return fmt.Errorf("failed to save torrent to BadgerDB: %w", err) } - if err := w.Flush(); err != nil { - return fmt.Errorf("failed to flush data: %w", err) + // Also create an index by name for quick lookups + nameKey := []byte(fmt.Sprintf("name:%s", ct.Torrent.Name)) + err = c.db.Update(func(txn *badger.Txn) error { + return txn.Set(nameKey, []byte(ct.Torrent.Id)) + }) + + if err != nil { + return fmt.Errorf("failed to save torrent name index: %w", err) } - return os.Rename(tmpFile, filePath) + return nil } func (c *Cache) SaveAll() error { @@ -192,14 +247,23 @@ func (c *Cache) SaveAll() error { }() } - c.torrents.Range(func(_, value interface{}) bool { - tasks <- value.(*CachedTorrent) - return true - }) + for _, value := range c.GetTorrents() { + tasks <- value + } close(tasks) wg.Wait() c.LastUpdated = time.Now() + + // Run value log garbage collection when appropriate + // This helps reclaim space from deleted/updated values + go func() { + err := c.db.RunValueLogGC(0.5) // Run GC if 50% of the value log can be discarded + if err != nil && err != badger.ErrNoRewrite { + _logger.Debug().Err(err).Msg("BadgerDB value log GC") + } + }() + return nil } @@ -209,44 +273,76 @@ func (c *Cache) Sync() error { if err != nil { return fmt.Errorf("failed to sync torrents: %v", err) } + _logger.Info().Msgf("Syncing %d torrents", len(torrents)) - workers := runtime.NumCPU() * 200 - workChan := make(chan *torrent.Torrent, len(torrents)) - errChan := make(chan error, len(torrents)) + // Calculate optimal workers - balance between CPU and IO + workers := runtime.NumCPU() * 4 // A more balanced multiplier for BadgerDB + // Create channels with appropriate buffering + workChan := make(chan *torrent.Torrent, workers*2) + + // Use an atomic counter for progress tracking + var processed int64 + var errorCount int64 + + // Create a context with cancellation in case of critical errors + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create a wait group for workers var wg sync.WaitGroup + // Start workers for i := 0; i < workers; i++ { wg.Add(1) go func() { defer wg.Done() - for t := range workChan { - if err := c.processTorrent(t); err != nil { - errChan <- err + for { + select { + case t, ok := <-workChan: + if !ok { + return // Channel closed, exit goroutine + } + + if err := c.processTorrent(t); err != nil { + _logger.Error().Err(err).Str("torrent", t.Name).Msg("sync error") + atomic.AddInt64(&errorCount, 1) + } + + count := atomic.AddInt64(&processed, 1) + if count%1000 == 0 { + _logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents)) + } + + case <-ctx.Done(): + return // Context cancelled, exit goroutine } } }() } + // Feed work to workers for _, t := range torrents { - workChan <- t + select { + case workChan <- t: + // Work sent successfully + case <-ctx.Done(): + break // Context cancelled + } } + + // Signal workers that no more work is coming close(workChan) + // Wait for all workers to complete wg.Wait() - close(errChan) - for err := range errChan { - _logger.Error().Err(err).Msg("sync error") - } - - _logger.Info().Msgf("Synced %d torrents", len(torrents)) + _logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount) return nil } func (c *Cache) processTorrent(t *torrent.Torrent) error { - if existing, ok := c.torrents.Load(t.Id); ok { - ct := existing.(*CachedTorrent) + if ct := c.GetTorrent(t.Id); ct != nil { if ct.IsComplete { return nil } @@ -259,7 +355,7 @@ func (c *Cache) AddTorrent(t *torrent.Torrent) { _logger := getLogger() if len(t.Files) == 0 { - tNew, err := c.client.GetTorrent(t.Id) + tNew, err := c.client.GetTorrent(t) _logger.Debug().Msgf("Getting torrent files for %s", t.Id) if err != nil { _logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err) @@ -280,8 +376,8 @@ func (c *Cache) AddTorrent(t *torrent.Torrent) { DownloadLinks: make(map[string]DownloadLinkCache), } - c.torrents.Store(t.Id, ct) - c.torrentsNames.Store(t.Name, t.Id) + c.SetTorrent(ct) + c.SetTorrentName(t.Name, ct) go func() { if err := c.SaveTorrent(ct); err != nil { @@ -290,12 +386,12 @@ func (c *Cache) AddTorrent(t *torrent.Torrent) { }() } -func (c *Cache) RefreshTorrent(torrentId string) *CachedTorrent { +func (c *Cache) RefreshTorrent(torrent *CachedTorrent) *CachedTorrent { _logger := getLogger() - t, err := c.client.GetTorrent(torrentId) + t, err := c.client.GetTorrent(torrent.Torrent) if err != nil { - _logger.Debug().Msgf("Failed to get torrent files for %s: %v", torrentId, err) + _logger.Debug().Msgf("Failed to get torrent files for %s: %v", torrent.Id, err) return nil } if len(t.Files) == 0 { @@ -309,8 +405,8 @@ func (c *Cache) RefreshTorrent(torrentId string) *CachedTorrent { DownloadLinks: make(map[string]DownloadLinkCache), } - c.torrents.Store(t.Id, ct) - c.torrentsNames.Store(t.Name, t.Id) + c.SetTorrent(ct) + c.SetTorrentName(t.Name, ct) go func() { if err := c.SaveTorrent(ct); err != nil { @@ -329,7 +425,7 @@ func (c *Cache) GetFileDownloadLink(t *CachedTorrent, file *torrent.File) (strin } if file.Link == "" { - t = c.RefreshTorrent(t.Id) + t = c.RefreshTorrent(t) if t == nil { return "", fmt.Errorf("torrent not found") } @@ -354,7 +450,3 @@ func (c *Cache) GetFileDownloadLink(t *CachedTorrent, file *torrent.File) (strin return link.DownloadLink, nil } - -func (c *Cache) GetTorrents() *sync.Map { - return c.torrents -} diff --git a/pkg/debrid/debrid_link/debrid_link.go b/pkg/debrid/debrid_link/debrid_link.go index 059eaf5..5cbd447 100644 --- a/pkg/debrid/debrid_link/debrid_link.go +++ b/pkg/debrid/debrid_link/debrid_link.go @@ -292,5 +292,5 @@ func New(dc config.Debrid, cache *cache.Cache) *DebridLink { } func (dl *DebridLink) GetTorrents() ([]*torrent.Torrent, error) { - return nil, fmt.Errorf("not implemented") + return nil, nil } diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index 7a64ff7..8d25bf8 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -345,8 +345,11 @@ func (r *RealDebrid) getTorrents(offset int, limit int) ([]*torrent.Torrent, err return nil, err } torrents := make([]*torrent.Torrent, 0) + filenames := map[string]bool{} for _, t := range data { - + if _, exists := filenames[t.Filename]; exists { + continue + } torrents = append(torrents, &torrent.Torrent{ Id: t.Id, Name: t.Filename, @@ -364,18 +367,10 @@ func (r *RealDebrid) getTorrents(offset int, limit int) ([]*torrent.Torrent, err func (r *RealDebrid) GetTorrents() ([]*torrent.Torrent, error) { torrents := make([]*torrent.Torrent, 0) offset := 0 - limit := 5000 - for { - ts, err := r.getTorrents(offset, limit) - if err != nil { - break - } - if len(ts) == 0 { - break - } - torrents = append(torrents, ts...) - offset = len(torrents) - } + limit := 1000 + ts, _ := r.getTorrents(offset, limit) + torrents = append(torrents, ts...) + offset = len(torrents) return torrents, nil } diff --git a/pkg/debrid/torbox/torbox.go b/pkg/debrid/torbox/torbox.go index d95c7f2..b9346ed 100644 --- a/pkg/debrid/torbox/torbox.go +++ b/pkg/debrid/torbox/torbox.go @@ -329,7 +329,7 @@ func (tb *Torbox) GetCheckCached() bool { } func (tb *Torbox) GetTorrents() ([]*torrent.Torrent, error) { - return nil, fmt.Errorf("not implemented") + return nil, nil } func New(dc config.Debrid, cache *cache.Cache) *Torbox { diff --git a/pkg/service/service.go b/pkg/service/service.go index 104a7c6..1c9e216 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -3,15 +3,17 @@ package service import ( "github.com/sirrobot01/debrid-blackhole/pkg/arr" "github.com/sirrobot01/debrid-blackhole/pkg/debrid" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine" "github.com/sirrobot01/debrid-blackhole/pkg/repair" "sync" ) type Service struct { - Repair *repair.Repair - Arr *arr.Storage - Debrid *engine.Engine + Repair *repair.Repair + Arr *arr.Storage + Debrid *engine.Engine + DebridCache *cache.Manager } var ( @@ -24,9 +26,10 @@ func New() *Service { arrs := arr.NewStorage() deb := debrid.New() instance = &Service{ - Repair: repair.New(arrs), - Arr: arrs, - Debrid: deb, + Repair: repair.New(arrs), + Arr: arrs, + Debrid: deb, + DebridCache: cache.NewManager(deb), } }) return instance diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index 33c18a1..170f7dc 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -11,6 +11,7 @@ import ( "net/http" "os" "path" + "sort" "strings" "sync" "sync/atomic" @@ -60,19 +61,23 @@ func (h *Handler) refreshRootListing() { return } - var files []os.FileInfo - h.cache.GetTorrents().Range(func(key, value interface{}) bool { - cachedTorrent := value.(*cache.CachedTorrent) + torrents := h.cache.GetTorrentNames() + files := make([]os.FileInfo, 0, len(torrents)) + + for name, cachedTorrent := range torrents { if cachedTorrent != nil && cachedTorrent.Torrent != nil { files = append(files, &FileInfo{ - name: cachedTorrent.Torrent.Name, + name: name, size: 0, mode: 0755 | os.ModeDir, modTime: time.Now(), isDir: true, }) } - return true + } + + sort.Slice(files, func(i, j int) bool { + return files[i].Name() < files[j].Name() }) h.rootListing.Store(files) diff --git a/pkg/webdav/webdav.go b/pkg/webdav/webdav.go index a14e639..6457974 100644 --- a/pkg/webdav/webdav.go +++ b/pkg/webdav/webdav.go @@ -4,8 +4,12 @@ import ( "context" "fmt" "github.com/go-chi/chi/v5" + "github.com/sirrobot01/debrid-blackhole/internal/config" + "github.com/sirrobot01/debrid-blackhole/internal/logger" + "github.com/sirrobot01/debrid-blackhole/pkg/service" "html/template" "net/http" + "os" "sync" ) @@ -14,15 +18,15 @@ type WebDav struct { } func New() *WebDav { - //svc := service.GetService() - //cfg := config.GetConfig() + svc := service.GetService() + cfg := config.GetConfig() w := &WebDav{ Handlers: make([]*Handler, 0), } - //for name, c := range svc.DebridCache.GetCaches() { - // h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name), cfg.LogLevel, os.Stdout)) - // w.Handlers = append(w.Handlers, h) - //} + for name, c := range svc.DebridCache.GetCaches() { + h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name), cfg.LogLevel, os.Stdout)) + w.Handlers = append(w.Handlers, h) + } return w } From 5d2fabe20bb1fe11a4312dc109acafd44a71ccac Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Tue, 18 Mar 2025 10:02:10 +0100 Subject: [PATCH 02/39] initializing webdav server --- go.mod | 1 + go.sum | 2 + internal/cache/cache.go | 90 --- internal/config/config.go | 2 +- internal/logger/logger.go | 6 +- internal/request/discord.go | 2 +- internal/request/request.go | 242 ++++++-- main.go | 8 + pkg/arr/arr.go | 2 +- pkg/arr/content.go | 2 +- pkg/arr/history.go | 2 +- pkg/arr/import.go | 2 +- pkg/arr/tmdb.go | 2 +- pkg/debrid/alldebrid/alldebrid.go | 101 ++-- pkg/debrid/cache/cache.go | 452 --------------- pkg/debrid/debrid.go | 32 +- .../{engine/service.go => debrid/debrid.go} | 12 +- pkg/debrid/debrid_link/debrid_link.go | 119 ++-- pkg/debrid/engine/engine.go | 12 +- pkg/debrid/realdebrid/realdebrid.go | 274 ++++++--- pkg/debrid/realdebrid/types.go | 18 +- pkg/debrid/torbox/torbox.go | 101 ++-- pkg/debrid/torrent/torrent.go | 85 +-- pkg/proxy/proxy.go | 2 +- pkg/qbit/downloader.go | 16 +- pkg/qbit/qbit.go | 2 +- pkg/qbit/storage.go | 2 +- pkg/qbit/torrent.go | 2 +- pkg/repair/repair.go | 8 +- pkg/server/server.go | 2 +- pkg/server/webhook.go | 2 +- pkg/service/service.go | 15 +- pkg/web/server.go | 5 +- pkg/webdav/cache.go | 542 ++++++++++++++++++ pkg/webdav/file.go | 155 +++-- pkg/webdav/handler.go | 442 ++++++++++---- pkg/webdav/misc.go | 14 + pkg/webdav/webdav.go | 10 +- pkg/worker/worker.go | 3 +- 39 files changed, 1650 insertions(+), 1141 deletions(-) delete mode 100644 internal/cache/cache.go delete mode 100644 pkg/debrid/cache/cache.go rename pkg/debrid/{engine/service.go => debrid/debrid.go} (68%) create mode 100644 pkg/webdav/cache.go create mode 100644 pkg/webdav/misc.go diff --git a/go.mod b/go.mod index 9f7940b..852c34c 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/google/flatbuffers v25.2.10+incompatible // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/gorilla/securecookie v1.1.2 // indirect diff --git a/go.sum b/go.sum index 1699181..32e6b7a 100644 --- a/go.sum +++ b/go.sum @@ -93,6 +93,8 @@ github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= diff --git a/internal/cache/cache.go b/internal/cache/cache.go deleted file mode 100644 index 2c2aa07..0000000 --- a/internal/cache/cache.go +++ /dev/null @@ -1,90 +0,0 @@ -package cache - -import ( - "sync" -) - -type Cache struct { - data map[string]struct{} - order []string - maxItems int - mu sync.RWMutex -} - -func New(maxItems int) *Cache { - if maxItems <= 0 { - maxItems = 1000 - } - return &Cache{ - data: make(map[string]struct{}, maxItems), - order: make([]string, 0, maxItems), - maxItems: maxItems, - } -} - -func (c *Cache) Add(value string) { - c.mu.Lock() - defer c.mu.Unlock() - - if _, exists := c.data[value]; !exists { - if len(c.order) >= c.maxItems { - delete(c.data, c.order[0]) - c.order = c.order[1:] - } - c.data[value] = struct{}{} - c.order = append(c.order, value) - } -} - -func (c *Cache) AddMultiple(values map[string]bool) { - c.mu.Lock() - defer c.mu.Unlock() - - for value, exists := range values { - if !exists { - if _, exists := c.data[value]; !exists { - if len(c.order) >= c.maxItems { - delete(c.data, c.order[0]) - c.order = c.order[1:] - } - c.data[value] = struct{}{} - c.order = append(c.order, value) - } - } - } -} - -func (c *Cache) Get(index int) (string, bool) { - c.mu.RLock() - defer c.mu.RUnlock() - if index < 0 || index >= len(c.order) { - return "", false - } - return c.order[index], true -} - -func (c *Cache) GetMultiple(values []string) map[string]bool { - c.mu.RLock() - defer c.mu.RUnlock() - - result := make(map[string]bool, len(values)) - for _, value := range values { - if _, exists := c.data[value]; exists { - result[value] = true - } - } - return result -} - -func (c *Cache) Exists(value string) bool { - c.mu.RLock() - defer c.mu.RUnlock() - _, exists := c.data[value] - return exists -} - -func (c *Cache) Len() int { - c.mu.RLock() - defer c.mu.RUnlock() - return len(c.order) -} diff --git a/internal/config/config.go b/internal/config/config.go index 7ceac43..30a2090 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,9 +1,9 @@ package config import ( - "encoding/json" "errors" "fmt" + "github.com/goccy/go-json" "os" "path/filepath" "sync" diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 57d00eb..2ced7a0 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -29,7 +29,7 @@ func GetLogPath() string { return filepath.Join(logsDir, "decypharr.log") } -func NewLogger(prefix string, level string, output *os.File) zerolog.Logger { +func NewLogger(prefix string, level string) zerolog.Logger { rotatingLogFile := &lumberjack.Logger{ Filename: GetLogPath(), @@ -39,7 +39,7 @@ func NewLogger(prefix string, level string, output *os.File) zerolog.Logger { } consoleWriter := zerolog.ConsoleWriter{ - Out: output, + Out: os.Stdout, TimeFormat: "2006-01-02 15:04:05", NoColor: false, // Set to true if you don't want colors FormatLevel: func(i interface{}) string { @@ -87,7 +87,7 @@ func NewLogger(prefix string, level string, output *os.File) zerolog.Logger { func GetDefaultLogger() zerolog.Logger { once.Do(func() { cfg := config.GetConfig() - logger = NewLogger("decypharr", cfg.LogLevel, os.Stdout) + logger = NewLogger("decypharr", cfg.LogLevel) }) return logger } diff --git a/internal/request/discord.go b/internal/request/discord.go index 115d942..a9ec4e0 100644 --- a/internal/request/discord.go +++ b/internal/request/discord.go @@ -2,8 +2,8 @@ package request import ( "bytes" - "encoding/json" "fmt" + "github.com/goccy/go-json" "github.com/sirrobot01/debrid-blackhole/internal/config" "io" "net/http" diff --git a/internal/request/request.go b/internal/request/request.go index fb87cae..bd6ae30 100644 --- a/internal/request/request.go +++ b/internal/request/request.go @@ -1,13 +1,18 @@ package request import ( + "bytes" + "context" "crypto/tls" - "encoding/json" - "errors" "fmt" + "github.com/goccy/go-json" + "github.com/rs/zerolog" + "github.com/sirrobot01/debrid-blackhole/internal/config" + "github.com/sirrobot01/debrid-blackhole/internal/logger" "golang.org/x/time/rate" "io" - "log" + "math" + "math/rand" "net/http" "net/url" "regexp" @@ -35,103 +40,216 @@ func JoinURL(base string, paths ...string) (string, error) { return joined, nil } -type RLHTTPClient struct { - client *http.Client - Ratelimiter *rate.Limiter - Headers map[string]string +type ClientOption func(*Client) + +// Client represents an HTTP client with additional capabilities +type Client struct { + client *http.Client + rateLimiter *rate.Limiter + headers map[string]string + maxRetries int + timeout time.Duration + skipTLSVerify bool + retryableStatus map[int]bool + logger zerolog.Logger } -func (c *RLHTTPClient) Doer(req *http.Request) (*http.Response, error) { - if c.Ratelimiter != nil { - err := c.Ratelimiter.Wait(req.Context()) +// WithMaxRetries sets the maximum number of retry attempts +func (c *Client) WithMaxRetries(retries int) *Client { + c.maxRetries = retries + return c +} + +// WithTimeout sets the request timeout +func (c *Client) WithTimeout(timeout time.Duration) *Client { + c.timeout = timeout + return c +} + +// WithRateLimiter sets a rate limiter +func (c *Client) WithRateLimiter(rl *rate.Limiter) *Client { + c.rateLimiter = rl + return c +} + +// WithHeaders sets default headers +func (c *Client) WithHeaders(headers map[string]string) *Client { + c.headers = headers + return c +} + +func (c *Client) WithLogger(logger zerolog.Logger) *Client { + c.logger = logger + return c +} + +// WithRetryableStatus adds status codes that should trigger a retry +func (c *Client) WithRetryableStatus(statusCodes ...int) *Client { + for _, code := range statusCodes { + c.retryableStatus[code] = true + } + return c +} + +// doRequest performs a single HTTP request with rate limiting +func (c *Client) doRequest(req *http.Request) (*http.Response, error) { + if c.rateLimiter != nil { + err := c.rateLimiter.Wait(req.Context()) if err != nil { - return nil, err + return nil, fmt.Errorf("rate limiter wait: %w", err) } } - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - return resp, nil + + return c.client.Do(req) } -func (c *RLHTTPClient) Do(req *http.Request) (*http.Response, error) { - var resp *http.Response +// Do performs an HTTP request with retries for certain status codes +func (c *Client) Do(req *http.Request) (*http.Response, error) { + // Save the request body for reuse in retries + var bodyBytes []byte var err error - backoff := time.Millisecond * 500 - for i := 0; i < 3; i++ { - resp, err = c.Doer(req) + if req.Body != nil { + bodyBytes, err = io.ReadAll(req.Body) if err != nil { + return nil, fmt.Errorf("reading request body: %w", err) + } + req.Body.Close() + } + + // Apply timeout to the request context if not already present + if c.timeout > 0 { + var cancel context.CancelFunc + ctx := req.Context() + ctx, cancel = context.WithTimeout(ctx, c.timeout) + defer cancel() + req = req.WithContext(ctx) + } + + backoff := time.Millisecond * 500 + var resp *http.Response + + for attempt := 0; attempt <= c.maxRetries; attempt++ { + // Reset the request body if it exists + if bodyBytes != nil { + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + } + + // Apply headers + if c.headers != nil { + for key, value := range c.headers { + req.Header.Set(key, value) + } + } + + resp, err = c.doRequest(req) + if err != nil { + // Check if this is a network error that might be worth retrying + if attempt < c.maxRetries { + // Apply backoff with jitter + jitter := time.Duration(rand.Int63n(int64(backoff / 4))) + sleepTime := backoff + jitter + + select { + case <-req.Context().Done(): + return nil, req.Context().Err() + case <-time.After(sleepTime): + // Continue to next retry attempt + } + + // Exponential backoff + backoff *= 2 + continue + } return nil, err } - if resp.StatusCode != http.StatusTooManyRequests { + // Check if the status code is retryable + if !c.retryableStatus[resp.StatusCode] || attempt == c.maxRetries { return resp, nil } - // Close the response body to prevent resource leakage + // Close the response body before retrying resp.Body.Close() - // Wait for the backoff duration before retrying - time.Sleep(backoff) + // Apply backoff with jitter + jitter := time.Duration(rand.Int63n(int64(backoff / 4))) + sleepTime := backoff + jitter + + select { + case <-req.Context().Done(): + return nil, req.Context().Err() + case <-time.After(sleepTime): + // Continue to next retry attempt + } // Exponential backoff backoff *= 2 } - return resp, fmt.Errorf("max retries exceeded") + return nil, fmt.Errorf("max retries exceeded") } -func (c *RLHTTPClient) MakeRequest(req *http.Request) ([]byte, error) { - if c.Headers != nil { - for key, value := range c.Headers { - req.Header.Set(key, value) - } - } - +// MakeRequest performs an HTTP request and returns the response body as bytes +func (c *Client) MakeRequest(req *http.Request) ([]byte, error) { res, err := c.Do(req) if err != nil { return nil, err } - defer func(Body io.ReadCloser) { - err := Body.Close() - if err != nil { - log.Println(err) + defer func() { + if err := res.Body.Close(); err != nil { + c.logger.Printf("Failed to close response body: %v", err) } - }(res.Body) + }() - b, err := io.ReadAll(res.Body) + bodyBytes, err := io.ReadAll(res.Body) if err != nil { - return nil, err - } - statusOk := res.StatusCode >= 200 && res.StatusCode < 300 - if !statusOk { - // Add status code error to the body - b = append(b, []byte(fmt.Sprintf("\nstatus code: %d", res.StatusCode))...) - return nil, errors.New(string(b)) + return nil, fmt.Errorf("reading response body: %w", err) } - return b, nil + if res.StatusCode < 200 || res.StatusCode >= 300 { + return nil, fmt.Errorf("HTTP error %d: %s", res.StatusCode, string(bodyBytes)) + } + + return bodyBytes, nil } -func NewRLHTTPClient(rl *rate.Limiter, headers map[string]string) *RLHTTPClient { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - c := &RLHTTPClient{ - client: &http.Client{ - Transport: tr, +// New creates a new HTTP client with the specified options +func New(options ...ClientOption) *Client { + client := &Client{ + maxRetries: 3, + skipTLSVerify: true, + retryableStatus: map[int]bool{ + http.StatusTooManyRequests: true, + http.StatusInternalServerError: true, + http.StatusBadGateway: true, + http.StatusServiceUnavailable: true, + http.StatusGatewayTimeout: true, }, + logger: logger.NewLogger("request", config.GetConfig().LogLevel), } - if rl != nil { - c.Ratelimiter = rl + + // Apply options + for _, option := range options { + option(client) } - if headers != nil { - c.Headers = headers + + // Create transport + transport := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: client.skipTLSVerify, + }, + Proxy: http.ProxyFromEnvironment, } - return c + + // Create HTTP client + client.client = &http.Client{ + Transport: transport, + Timeout: client.timeout, + } + + return client } func ParseRateLimit(rateStr string) *rate.Limiter { @@ -153,9 +271,11 @@ func ParseRateLimit(rateStr string) *rate.Limiter { switch unit { case "minute": reqsPerSecond := float64(count) / 60.0 - return rate.NewLimiter(rate.Limit(reqsPerSecond), 5) + burstSize := int(math.Max(30, float64(count)*0.25)) + return rate.NewLimiter(rate.Limit(reqsPerSecond), burstSize) case "second": - return rate.NewLimiter(rate.Limit(float64(count)), 5) + burstSize := int(math.Max(30, float64(count)*5)) + return rate.NewLimiter(rate.Limit(float64(count)), burstSize) default: return nil } diff --git a/main.go b/main.go index 9e162dd..95ca8f4 100644 --- a/main.go +++ b/main.go @@ -6,6 +6,8 @@ import ( "github.com/sirrobot01/debrid-blackhole/cmd/decypharr" "github.com/sirrobot01/debrid-blackhole/internal/config" "log" + "net/http" + _ "net/http/pprof" // registers pprof handlers "runtime/debug" ) @@ -16,6 +18,12 @@ func main() { debug.PrintStack() } }() + + go func() { + if err := http.ListenAndServe(":6060", nil); err != nil { + log.Fatalf("pprof server failed: %v", err) + } + }() var configPath string flag.StringVar(&configPath, "config", "/data", "path to the data folder") flag.Parse() diff --git a/pkg/arr/arr.go b/pkg/arr/arr.go index 45ca1ec..1067956 100644 --- a/pkg/arr/arr.go +++ b/pkg/arr/arr.go @@ -3,8 +3,8 @@ package arr import ( "bytes" "crypto/tls" - "encoding/json" "fmt" + "github.com/goccy/go-json" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/request" "io" diff --git a/pkg/arr/content.go b/pkg/arr/content.go index 04c7e1a..50f4192 100644 --- a/pkg/arr/content.go +++ b/pkg/arr/content.go @@ -1,8 +1,8 @@ package arr import ( - "encoding/json" "fmt" + "github.com/goccy/go-json" "net/http" "strconv" "strings" diff --git a/pkg/arr/history.go b/pkg/arr/history.go index 8d22bf4..f91a791 100644 --- a/pkg/arr/history.go +++ b/pkg/arr/history.go @@ -1,7 +1,7 @@ package arr import ( - "encoding/json" + "github.com/goccy/go-json" "io" "net/http" gourl "net/url" diff --git a/pkg/arr/import.go b/pkg/arr/import.go index 9ef651b..97a107f 100644 --- a/pkg/arr/import.go +++ b/pkg/arr/import.go @@ -1,8 +1,8 @@ package arr import ( - "encoding/json" "fmt" + "github.com/goccy/go-json" "io" "net/http" gourl "net/url" diff --git a/pkg/arr/tmdb.go b/pkg/arr/tmdb.go index 6313c12..3247725 100644 --- a/pkg/arr/tmdb.go +++ b/pkg/arr/tmdb.go @@ -1,7 +1,7 @@ package arr import ( - "encoding/json" + "github.com/goccy/go-json" "net/http" url2 "net/url" ) diff --git a/pkg/debrid/alldebrid/alldebrid.go b/pkg/debrid/alldebrid/alldebrid.go index a9deea3..252f1d6 100644 --- a/pkg/debrid/alldebrid/alldebrid.go +++ b/pkg/debrid/alldebrid/alldebrid.go @@ -1,20 +1,19 @@ package alldebrid import ( - "encoding/json" "fmt" + "github.com/goccy/go-json" "github.com/rs/zerolog" - "github.com/sirrobot01/debrid-blackhole/internal/cache" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" "slices" + "time" "net/http" gourl "net/url" - "os" "path/filepath" "strconv" ) @@ -24,11 +23,11 @@ type AllDebrid struct { Host string `json:"host"` APIKey string DownloadUncached bool - client *request.RLHTTPClient - cache *cache.Cache - MountPath string - logger zerolog.Logger - CheckCached bool + client *request.Client + + MountPath string + logger zerolog.Logger + CheckCached bool } func (ad *AllDebrid) GetName() string { @@ -39,15 +38,9 @@ func (ad *AllDebrid) GetLogger() zerolog.Logger { return ad.logger } -func (ad *AllDebrid) IsAvailable(infohashes []string) map[string]bool { +func (ad *AllDebrid) IsAvailable(hashes []string) map[string]bool { // Check if the infohashes are available in the local cache - hashes, result := torrent.GetLocalCache(infohashes, ad.cache) - - if len(hashes) == 0 { - // Either all the infohashes are locally cached or none are - ad.cache.AddMultiple(result) - return result - } + result := make(map[string]bool) // Divide hashes into groups of 100 // AllDebrid does not support checking cached infohashes @@ -91,8 +84,8 @@ func getAlldebridStatus(statusCode int) string { } } -func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.File { - result := make([]torrent.File, 0) +func flattenFiles(files []MagnetFile, parentPath string, index *int) map[string]torrent.File { + result := make(map[string]torrent.File) cfg := config.GetConfig() @@ -104,7 +97,15 @@ func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.F if f.Elements != nil { // This is a folder, recurse into it - result = append(result, flattenFiles(f.Elements, currentPath, index)...) + subFiles := flattenFiles(f.Elements, currentPath, index) + for k, v := range subFiles { + if _, ok := result[k]; ok { + // File already exists, use path as key + result[v.Path] = v + } else { + result[k] = v + } + } } else { // This is a file fileName := filepath.Base(f.Name) @@ -128,25 +129,25 @@ func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.F Size: f.Size, Path: currentPath, } - result = append(result, file) + result[file.Name] = file } } return result } -func (ad *AllDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) { +func (ad *AllDebrid) UpdateTorrent(t *torrent.Torrent) error { url := fmt.Sprintf("%s/magnet/status?id=%s", ad.Host, t.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := ad.client.MakeRequest(req) if err != nil { - return t, err + return err } var res TorrentInfoResponse err = json.Unmarshal(resp, &res) if err != nil { ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err) - return t, err + return err } data := res.Data.Magnets status := getAlldebridStatus(data.StatusCode) @@ -158,7 +159,6 @@ func (ad *AllDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) { t.Folder = name t.MountPath = ad.MountPath t.Debrid = ad.Name - t.DownloadLinks = make(map[string]torrent.DownloadLinks) if status == "downloaded" { t.Bytes = data.Size @@ -169,23 +169,21 @@ func (ad *AllDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) { files := flattenFiles(data.Files, "", &index) t.Files = files } - return t, nil + return nil } func (ad *AllDebrid) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { for { - tb, err := ad.GetTorrent(torrent) + err := ad.UpdateTorrent(torrent) - torrent = tb - - if err != nil || tb == nil { - return tb, err + if err != nil || torrent == nil { + return torrent, err } status := torrent.Status if status == "downloaded" { ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name) if !isSymlink { - err = ad.GetDownloadLinks(torrent) + err = ad.GenerateDownloadLinks(torrent) if err != nil { return torrent, err } @@ -217,8 +215,7 @@ func (ad *AllDebrid) DeleteTorrent(torrent *torrent.Torrent) { } } -func (ad *AllDebrid) GetDownloadLinks(t *torrent.Torrent) error { - downloadLinks := make(map[string]torrent.DownloadLinks) +func (ad *AllDebrid) GenerateDownloadLinks(t *torrent.Torrent) error { for _, file := range t.Files { url := fmt.Sprintf("%s/link/unlock", ad.Host) query := gourl.Values{} @@ -234,19 +231,15 @@ func (ad *AllDebrid) GetDownloadLinks(t *torrent.Torrent) error { return err } link := data.Data.Link + file.DownloadLink = link + file.Generated = time.Now() + t.Files[file.Name] = file - dl := torrent.DownloadLinks{ - Link: file.Link, - Filename: data.Data.Filename, - DownloadLink: link, - } - downloadLinks[file.Id] = dl } - t.DownloadLinks = downloadLinks return nil } -func (ad *AllDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks { +func (ad *AllDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File { url := fmt.Sprintf("%s/link/unlock", ad.Host) query := gourl.Values{} query.Add("link", file.Link) @@ -261,11 +254,9 @@ func (ad *AllDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *to return nil } link := data.Data.Link - return &torrent.DownloadLinks{ - DownloadLink: link, - Link: file.Link, - Filename: data.Data.Filename, - } + file.DownloadLink = link + file.Generated = time.Now() + return file } func (ad *AllDebrid) GetCheckCached() bool { @@ -276,6 +267,10 @@ func (ad *AllDebrid) GetTorrents() ([]*torrent.Torrent, error) { return nil, nil } +func (ad *AllDebrid) GetDownloads() (map[string]torrent.DownloadLinks, error) { + return nil, nil +} + func (ad *AllDebrid) GetDownloadingStatus() []string { return []string{"downloading"} } @@ -284,21 +279,27 @@ func (ad *AllDebrid) GetDownloadUncached() bool { return ad.DownloadUncached } -func New(dc config.Debrid, cache *cache.Cache) *AllDebrid { +func (ad *AllDebrid) ConvertLinksToFiles(links []string) []torrent.File { + return nil +} + +func New(dc config.Debrid) *AllDebrid { rl := request.ParseRateLimit(dc.RateLimit) headers := map[string]string{ "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), } - client := request.NewRLHTTPClient(rl, headers) + _log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel) + client := request.New(). + WithHeaders(headers). + WithRateLimiter(rl).WithLogger(_log) return &AllDebrid{ Name: "alldebrid", Host: dc.Host, APIKey: dc.APIKey, DownloadUncached: dc.DownloadUncached, client: client, - cache: cache, MountPath: dc.Folder, - logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout), + logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel), CheckCached: dc.CheckCached, } } diff --git a/pkg/debrid/cache/cache.go b/pkg/debrid/cache/cache.go deleted file mode 100644 index 849a173..0000000 --- a/pkg/debrid/cache/cache.go +++ /dev/null @@ -1,452 +0,0 @@ -package cache - -import ( - "context" - "encoding/json" - "fmt" - "github.com/dgraph-io/badger/v4" - "github.com/rs/zerolog" - "github.com/sirrobot01/debrid-blackhole/internal/logger" - "os" - "path/filepath" - "runtime" - "sync" - "sync/atomic" - "time" - - "github.com/sirrobot01/debrid-blackhole/internal/config" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" -) - -type DownloadLinkCache struct { - Link string `json:"download_link"` -} - -type CachedTorrent struct { - *torrent.Torrent - LastRead time.Time `json:"last_read"` - IsComplete bool `json:"is_complete"` - DownloadLinks map[string]DownloadLinkCache `json:"download_links"` -} - -var ( - _logInstance zerolog.Logger - once sync.Once -) - -func getLogger() zerolog.Logger { - once.Do(func() { - cfg := config.GetConfig() - _logInstance = logger.NewLogger("cache", cfg.LogLevel, os.Stdout) - }) - return _logInstance -} - -type Cache struct { - dir string - client engine.Service - db *badger.DB - torrents map[string]*CachedTorrent // key: torrent.Id, value: *CachedTorrent - torrentsMutex sync.RWMutex - torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent - torrentNamesMutex sync.RWMutex - LastUpdated time.Time `json:"last_updated"` -} - -func (c *Cache) SetTorrent(t *CachedTorrent) { - c.torrentsMutex.Lock() - defer c.torrentsMutex.Unlock() - c.torrents[t.Id] = t -} - -func (c *Cache) SetTorrentName(name string, t *CachedTorrent) { - c.torrentNamesMutex.Lock() - defer c.torrentNamesMutex.Unlock() - c.torrentsNames[name] = t -} - -func (c *Cache) GetTorrents() map[string]*CachedTorrent { - c.torrentsMutex.RLock() - defer c.torrentsMutex.RUnlock() - return c.torrents -} - -func (c *Cache) GetTorrentNames() map[string]*CachedTorrent { - c.torrentNamesMutex.RLock() - defer c.torrentNamesMutex.RUnlock() - return c.torrentsNames -} - -type Manager struct { - caches map[string]*Cache -} - -func NewManager(debridService *engine.Engine) *Manager { - cfg := config.GetConfig() - cm := &Manager{ - caches: make(map[string]*Cache), - } - for _, debrid := range debridService.GetDebrids() { - c := New(debrid, cfg.Path) - cm.caches[debrid.GetName()] = c - } - return cm -} - -func (m *Manager) GetCaches() map[string]*Cache { - return m.caches -} - -func (m *Manager) GetCache(debridName string) *Cache { - return m.caches[debridName] -} - -func New(debridService engine.Service, basePath string) *Cache { - dbPath := filepath.Join(basePath, "cache", debridService.GetName(), "db") - return &Cache{ - dir: dbPath, - torrents: make(map[string]*CachedTorrent), - torrentsNames: make(map[string]*CachedTorrent), - client: debridService, - } -} - -func (c *Cache) Start() error { - _logger := getLogger() - _logger.Info().Msg("Starting cache for: " + c.client.GetName()) - - // Make sure the directory exists - if err := os.MkdirAll(c.dir, 0755); err != nil { - return fmt.Errorf("failed to create cache directory: %w", err) - } - - // Open BadgerDB - opts := badger.DefaultOptions(c.dir) - opts.Logger = nil // Disable Badger's internal logger - - var err error - c.db, err = badger.Open(opts) - if err != nil { - return fmt.Errorf("failed to open BadgerDB: %w", err) - } - - if err := c.Load(); err != nil { - return fmt.Errorf("failed to load cache: %v", err) - } - - if err := c.Sync(); err != nil { - return fmt.Errorf("failed to sync cache: %v", err) - } - - return nil -} - -func (c *Cache) Close() error { - if c.db != nil { - return c.db.Close() - } - return nil -} - -func (c *Cache) Load() error { - _logger := getLogger() - - err := c.db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - it := txn.NewIterator(opts) - defer it.Close() - - prefix := []byte("torrent:") - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - item := it.Item() - - err := item.Value(func(val []byte) error { - var ct CachedTorrent - if err := json.Unmarshal(val, &ct); err != nil { - _logger.Debug().Err(err).Msgf("Failed to unmarshal torrent") - return nil // Continue to next item - } - - if len(ct.Files) > 0 { - c.SetTorrent(&ct) - c.SetTorrentName(ct.Name, &ct) - } - return nil - }) - - if err != nil { - _logger.Debug().Err(err).Msg("Error reading torrent value") - } - } - return nil - }) - - return err -} - -func (c *Cache) GetTorrent(id string) *CachedTorrent { - if t, ok := c.GetTorrents()[id]; ok { - return t - } - return nil -} - -func (c *Cache) GetTorrentByName(name string) *CachedTorrent { - if t, ok := c.GetTorrentNames()[name]; ok { - return t - } - return nil -} - -func (c *Cache) SaveTorrent(ct *CachedTorrent) error { - data, err := json.Marshal(ct) - if err != nil { - return fmt.Errorf("failed to marshal torrent: %w", err) - } - - key := []byte(fmt.Sprintf("torrent:%s", ct.Torrent.Id)) - - err = c.db.Update(func(txn *badger.Txn) error { - return txn.Set(key, data) - }) - - if err != nil { - return fmt.Errorf("failed to save torrent to BadgerDB: %w", err) - } - - // Also create an index by name for quick lookups - nameKey := []byte(fmt.Sprintf("name:%s", ct.Torrent.Name)) - err = c.db.Update(func(txn *badger.Txn) error { - return txn.Set(nameKey, []byte(ct.Torrent.Id)) - }) - - if err != nil { - return fmt.Errorf("failed to save torrent name index: %w", err) - } - - return nil -} - -func (c *Cache) SaveAll() error { - const batchSize = 100 - var wg sync.WaitGroup - _logger := getLogger() - - tasks := make(chan *CachedTorrent, batchSize) - - for i := 0; i < runtime.NumCPU(); i++ { - wg.Add(1) - go func() { - defer wg.Done() - for ct := range tasks { - if err := c.SaveTorrent(ct); err != nil { - _logger.Error().Err(err).Msg("failed to save torrent") - } - } - }() - } - - for _, value := range c.GetTorrents() { - tasks <- value - } - - close(tasks) - wg.Wait() - c.LastUpdated = time.Now() - - // Run value log garbage collection when appropriate - // This helps reclaim space from deleted/updated values - go func() { - err := c.db.RunValueLogGC(0.5) // Run GC if 50% of the value log can be discarded - if err != nil && err != badger.ErrNoRewrite { - _logger.Debug().Err(err).Msg("BadgerDB value log GC") - } - }() - - return nil -} - -func (c *Cache) Sync() error { - _logger := getLogger() - torrents, err := c.client.GetTorrents() - if err != nil { - return fmt.Errorf("failed to sync torrents: %v", err) - } - _logger.Info().Msgf("Syncing %d torrents", len(torrents)) - - // Calculate optimal workers - balance between CPU and IO - workers := runtime.NumCPU() * 4 // A more balanced multiplier for BadgerDB - - // Create channels with appropriate buffering - workChan := make(chan *torrent.Torrent, workers*2) - - // Use an atomic counter for progress tracking - var processed int64 - var errorCount int64 - - // Create a context with cancellation in case of critical errors - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Create a wait group for workers - var wg sync.WaitGroup - - // Start workers - for i := 0; i < workers; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case t, ok := <-workChan: - if !ok { - return // Channel closed, exit goroutine - } - - if err := c.processTorrent(t); err != nil { - _logger.Error().Err(err).Str("torrent", t.Name).Msg("sync error") - atomic.AddInt64(&errorCount, 1) - } - - count := atomic.AddInt64(&processed, 1) - if count%1000 == 0 { - _logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents)) - } - - case <-ctx.Done(): - return // Context cancelled, exit goroutine - } - } - }() - } - - // Feed work to workers - for _, t := range torrents { - select { - case workChan <- t: - // Work sent successfully - case <-ctx.Done(): - break // Context cancelled - } - } - - // Signal workers that no more work is coming - close(workChan) - - // Wait for all workers to complete - wg.Wait() - - _logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount) - return nil -} - -func (c *Cache) processTorrent(t *torrent.Torrent) error { - if ct := c.GetTorrent(t.Id); ct != nil { - if ct.IsComplete { - return nil - } - } - c.AddTorrent(t) - return nil -} - -func (c *Cache) AddTorrent(t *torrent.Torrent) { - _logger := getLogger() - - if len(t.Files) == 0 { - tNew, err := c.client.GetTorrent(t) - _logger.Debug().Msgf("Getting torrent files for %s", t.Id) - if err != nil { - _logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err) - return - } - t = tNew - } - - if len(t.Files) == 0 { - _logger.Debug().Msgf("No files found for %s", t.Id) - return - } - - ct := &CachedTorrent{ - Torrent: t, - LastRead: time.Now(), - IsComplete: len(t.Files) > 0, - DownloadLinks: make(map[string]DownloadLinkCache), - } - - c.SetTorrent(ct) - c.SetTorrentName(t.Name, ct) - - go func() { - if err := c.SaveTorrent(ct); err != nil { - _logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id) - } - }() -} - -func (c *Cache) RefreshTorrent(torrent *CachedTorrent) *CachedTorrent { - _logger := getLogger() - - t, err := c.client.GetTorrent(torrent.Torrent) - if err != nil { - _logger.Debug().Msgf("Failed to get torrent files for %s: %v", torrent.Id, err) - return nil - } - if len(t.Files) == 0 { - return nil - } - - ct := &CachedTorrent{ - Torrent: t, - LastRead: time.Now(), - IsComplete: len(t.Files) > 0, - DownloadLinks: make(map[string]DownloadLinkCache), - } - - c.SetTorrent(ct) - c.SetTorrentName(t.Name, ct) - - go func() { - if err := c.SaveTorrent(ct); err != nil { - _logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id) - } - }() - - return ct -} - -func (c *Cache) GetFileDownloadLink(t *CachedTorrent, file *torrent.File) (string, error) { - _logger := getLogger() - - if linkCache, ok := t.DownloadLinks[file.Id]; ok { - return linkCache.Link, nil - } - - if file.Link == "" { - t = c.RefreshTorrent(t) - if t == nil { - return "", fmt.Errorf("torrent not found") - } - file = t.Torrent.GetFile(file.Id) - } - - _logger.Debug().Msgf("Getting download link for %s", t.Name) - link := c.client.GetDownloadLink(t.Torrent, file) - if link == nil { - return "", fmt.Errorf("download link not found") - } - - t.DownloadLinks[file.Id] = DownloadLinkCache{ - Link: link.DownloadLink, - } - - go func() { - if err := c.SaveTorrent(t); err != nil { - _logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id) - } - }() - - return link.DownloadLink, nil -} diff --git a/pkg/debrid/debrid.go b/pkg/debrid/debrid.go index e3dc0df..02073f9 100644 --- a/pkg/debrid/debrid.go +++ b/pkg/debrid/debrid.go @@ -1,13 +1,12 @@ package debrid import ( - "cmp" "fmt" - "github.com/sirrobot01/debrid-blackhole/internal/cache" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/arr" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/alldebrid" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid_link" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/realdebrid" @@ -17,33 +16,33 @@ import ( func New() *engine.Engine { cfg := config.GetConfig() - maxCachedSize := cmp.Or(cfg.MaxCacheSize, 1000) - debrids := make([]engine.Service, 0) - // Divide the cache size by the number of debrids - maxCacheSize := maxCachedSize / len(cfg.Debrids) + debrids := make([]debrid.Client, 0) for _, dc := range cfg.Debrids { - d := createDebrid(dc, cache.New(maxCacheSize)) - logger := d.GetLogger() + client := createDebridClient(dc) + logger := client.GetLogger() logger.Info().Msg("Debrid Service started") - debrids = append(debrids, d) + debrids = append(debrids, client) + } + d := &engine.Engine{ + Debrids: debrids, + LastUsed: 0, } - d := &engine.Engine{Debrids: debrids, LastUsed: 0} return d } -func createDebrid(dc config.Debrid, cache *cache.Cache) engine.Service { +func createDebridClient(dc config.Debrid) debrid.Client { switch dc.Name { case "realdebrid": - return realdebrid.New(dc, cache) + return realdebrid.New(dc) case "torbox": - return torbox.New(dc, cache) + return torbox.New(dc) case "debridlink": - return debrid_link.New(dc, cache) + return debrid_link.New(dc) case "alldebrid": - return alldebrid.New(dc, cache) + return alldebrid.New(dc) default: - return realdebrid.New(dc, cache) + return realdebrid.New(dc) } } @@ -55,6 +54,7 @@ func ProcessTorrent(d *engine.Engine, magnet *utils.Magnet, a *arr.Arr, isSymlin Name: magnet.Name, Arr: a, Size: magnet.Size, + Files: make(map[string]torrent.File), } errs := make([]error, 0) diff --git a/pkg/debrid/engine/service.go b/pkg/debrid/debrid/debrid.go similarity index 68% rename from pkg/debrid/engine/service.go rename to pkg/debrid/debrid/debrid.go index 30dcf25..393abf1 100644 --- a/pkg/debrid/engine/service.go +++ b/pkg/debrid/debrid/debrid.go @@ -1,22 +1,24 @@ -package engine +package debrid import ( "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" ) -type Service interface { +type Client interface { SubmitMagnet(tr *torrent.Torrent) (*torrent.Torrent, error) CheckStatus(tr *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) - GetDownloadLinks(tr *torrent.Torrent) error - GetDownloadLink(tr *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks + GenerateDownloadLinks(tr *torrent.Torrent) error + GetDownloadLink(tr *torrent.Torrent, file *torrent.File) *torrent.File + ConvertLinksToFiles(links []string) []torrent.File DeleteTorrent(tr *torrent.Torrent) IsAvailable(infohashes []string) map[string]bool GetCheckCached() bool GetDownloadUncached() bool - GetTorrent(torrent *torrent.Torrent) (*torrent.Torrent, error) + UpdateTorrent(torrent *torrent.Torrent) error GetTorrents() ([]*torrent.Torrent, error) GetName() string GetLogger() zerolog.Logger GetDownloadingStatus() []string + GetDownloads() (map[string]torrent.DownloadLinks, error) } diff --git a/pkg/debrid/debrid_link/debrid_link.go b/pkg/debrid/debrid_link/debrid_link.go index eac1e09..f287a59 100644 --- a/pkg/debrid/debrid_link/debrid_link.go +++ b/pkg/debrid/debrid_link/debrid_link.go @@ -2,19 +2,18 @@ package debrid_link import ( "bytes" - "encoding/json" "fmt" + "github.com/goccy/go-json" "github.com/rs/zerolog" - "github.com/sirrobot01/debrid-blackhole/internal/cache" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" "slices" + "time" "net/http" - "os" "strings" ) @@ -23,11 +22,11 @@ type DebridLink struct { Host string `json:"host"` APIKey string DownloadUncached bool - client *request.RLHTTPClient - cache *cache.Cache - MountPath string - logger zerolog.Logger - CheckCached bool + client *request.Client + + MountPath string + logger zerolog.Logger + CheckCached bool } func (dl *DebridLink) GetName() string { @@ -38,15 +37,9 @@ func (dl *DebridLink) GetLogger() zerolog.Logger { return dl.logger } -func (dl *DebridLink) IsAvailable(infohashes []string) map[string]bool { +func (dl *DebridLink) IsAvailable(hashes []string) map[string]bool { // Check if the infohashes are available in the local cache - hashes, result := torrent.GetLocalCache(infohashes, dl.cache) - - if len(hashes) == 0 { - // Either all the infohashes are locally cached or none are - dl.cache.AddMultiple(result) - return result - } + result := make(map[string]bool) // Divide hashes into groups of 100 for i := 0; i < len(hashes); i += 100 { @@ -93,32 +86,31 @@ func (dl *DebridLink) IsAvailable(infohashes []string) map[string]bool { } } } - dl.cache.AddMultiple(result) // Add the results to the cache return result } -func (dl *DebridLink) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) { +func (dl *DebridLink) UpdateTorrent(t *torrent.Torrent) error { url := fmt.Sprintf("%s/seedbox/list?ids=%s", dl.Host, t.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := dl.client.MakeRequest(req) if err != nil { - return t, err + return err } var res TorrentInfo err = json.Unmarshal(resp, &res) if err != nil { - return t, err + return err } if !res.Success { - return t, fmt.Errorf("error getting torrent") + return fmt.Errorf("error getting torrent") } if res.Value == nil { - return t, fmt.Errorf("torrent not found") + return fmt.Errorf("torrent not found") } dt := *res.Value if len(dt) == 0 { - return t, fmt.Errorf("torrent not found") + return fmt.Errorf("torrent not found") } data := dt[0] status := "downloading" @@ -136,21 +128,22 @@ func (dl *DebridLink) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) { t.Seeders = data.PeersConnected t.Filename = name t.OriginalFilename = name - files := make([]torrent.File, len(data.Files)) cfg := config.GetConfig() - for i, f := range data.Files { + for _, f := range data.Files { if !cfg.IsSizeAllowed(f.Size) { continue } - files[i] = torrent.File{ - Id: f.ID, - Name: f.Name, - Size: f.Size, - Path: f.Name, + file := torrent.File{ + Id: f.ID, + Name: f.Name, + Size: f.Size, + Path: f.Name, + DownloadLink: f.DownloadURL, + Link: f.DownloadURL, } + t.Files[f.Name] = file } - t.Files = files - return t, nil + return nil } func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) { @@ -185,33 +178,32 @@ func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) t.OriginalFilename = name t.MountPath = dl.MountPath t.Debrid = dl.Name - t.DownloadLinks = make(map[string]torrent.DownloadLinks) - files := make([]torrent.File, len(data.Files)) - for i, f := range data.Files { - files[i] = torrent.File{ - Id: f.ID, - Name: f.Name, - Size: f.Size, - Path: f.Name, - Link: f.DownloadURL, + for _, f := range data.Files { + file := torrent.File{ + Id: f.ID, + Name: f.Name, + Size: f.Size, + Path: f.Name, + Link: f.DownloadURL, + DownloadLink: f.DownloadURL, + Generated: time.Now(), } + t.Files[f.Name] = file } - t.Files = files return t, nil } func (dl *DebridLink) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { for { - t, err := dl.GetTorrent(torrent) - torrent = t + err := dl.UpdateTorrent(torrent) if err != nil || torrent == nil { return torrent, err } status := torrent.Status if status == "downloaded" { dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name) - err = dl.GetDownloadLinks(torrent) + err = dl.GenerateDownloadLinks(torrent) if err != nil { return torrent, err } @@ -242,25 +234,16 @@ func (dl *DebridLink) DeleteTorrent(torrent *torrent.Torrent) { } } -func (dl *DebridLink) GetDownloadLinks(t *torrent.Torrent) error { - downloadLinks := make(map[string]torrent.DownloadLinks) - for _, f := range t.Files { - dl := torrent.DownloadLinks{ - Link: f.Link, - Filename: f.Name, - } - downloadLinks[f.Id] = dl - } - t.DownloadLinks = downloadLinks +func (dl *DebridLink) GenerateDownloadLinks(t *torrent.Torrent) error { return nil } -func (dl *DebridLink) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks { - dlLink, ok := t.DownloadLinks[file.Id] - if !ok { - return nil - } - return &dlLink +func (dl *DebridLink) GetDownloads() (map[string]torrent.DownloadLinks, error) { + return nil, nil +} + +func (dl *DebridLink) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File { + return file } func (dl *DebridLink) GetDownloadingStatus() []string { @@ -275,22 +258,24 @@ func (dl *DebridLink) GetDownloadUncached() bool { return dl.DownloadUncached } -func New(dc config.Debrid, cache *cache.Cache) *DebridLink { +func New(dc config.Debrid) *DebridLink { rl := request.ParseRateLimit(dc.RateLimit) headers := map[string]string{ "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), "Content-Type": "application/json", } - client := request.NewRLHTTPClient(rl, headers) + _log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel) + client := request.New(). + WithHeaders(headers). + WithRateLimiter(rl).WithLogger(_log) return &DebridLink{ Name: "debridlink", Host: dc.Host, APIKey: dc.APIKey, DownloadUncached: dc.DownloadUncached, client: client, - cache: cache, MountPath: dc.Folder, - logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout), + logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel), CheckCached: dc.CheckCached, } } @@ -298,3 +283,7 @@ func New(dc config.Debrid, cache *cache.Cache) *DebridLink { func (dl *DebridLink) GetTorrents() ([]*torrent.Torrent, error) { return nil, nil } + +func (dl *DebridLink) ConvertLinksToFiles(links []string) []torrent.File { + return nil +} diff --git a/pkg/debrid/engine/engine.go b/pkg/debrid/engine/engine.go index 7d0d303..6662ff0 100644 --- a/pkg/debrid/engine/engine.go +++ b/pkg/debrid/engine/engine.go @@ -1,18 +1,22 @@ package engine +import ( + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" +) + type Engine struct { - Debrids []Service + Debrids []debrid.Client LastUsed int } -func (d *Engine) Get() Service { +func (d *Engine) Get() debrid.Client { if d.LastUsed == 0 { return d.Debrids[0] } return d.Debrids[d.LastUsed] } -func (d *Engine) GetByName(name string) Service { +func (d *Engine) GetByName(name string) debrid.Client { for _, deb := range d.Debrids { if deb.GetName() == name { return deb @@ -21,6 +25,6 @@ func (d *Engine) GetByName(name string) Service { return nil } -func (d *Engine) GetDebrids() []Service { +func (d *Engine) GetDebrids() []debrid.Client { return d.Debrids } diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index 74d327b..a83682f 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -1,22 +1,23 @@ package realdebrid import ( - "encoding/json" "fmt" + "github.com/goccy/go-json" "github.com/rs/zerolog" - "github.com/sirrobot01/debrid-blackhole/internal/cache" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + "io" "net/http" gourl "net/url" - "os" "path/filepath" "slices" "strconv" "strings" + "sync" + "time" ) type RealDebrid struct { @@ -24,11 +25,11 @@ type RealDebrid struct { Host string `json:"host"` APIKey string DownloadUncached bool - client *request.RLHTTPClient - cache *cache.Cache - MountPath string - logger zerolog.Logger - CheckCached bool + client *request.Client + + MountPath string + logger zerolog.Logger + CheckCached bool } func (r *RealDebrid) GetName() string { @@ -39,11 +40,11 @@ func (r *RealDebrid) GetLogger() zerolog.Logger { return r.logger } -// GetTorrentFiles returns a list of torrent files from the torrent info +// getTorrentFiles returns a list of torrent files from the torrent info // validate is used to determine if the files should be validated // if validate is false, selected files will be returned -func GetTorrentFiles(data TorrentInfo, validate bool) []torrent.File { - files := make([]torrent.File, 0) +func getTorrentFiles(t *torrent.Torrent, data TorrentInfo, validate bool) map[string]torrent.File { + files := make(map[string]torrent.File) cfg := config.GetConfig() idx := 0 for _, f := range data.Files { @@ -72,6 +73,13 @@ func GetTorrentFiles(data TorrentInfo, validate bool) []torrent.File { if len(data.Links) > idx { _link = data.Links[idx] } + + if a, ok := t.Files[name]; ok { + a.Link = _link + files[name] = a + continue + } + file := torrent.File{ Name: name, Path: name, @@ -79,21 +87,15 @@ func GetTorrentFiles(data TorrentInfo, validate bool) []torrent.File { Id: strconv.Itoa(fileId), Link: _link, } - files = append(files, file) + files[name] = file idx++ } return files } -func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool { +func (r *RealDebrid) IsAvailable(hashes []string) map[string]bool { // Check if the infohashes are available in the local cache - hashes, result := torrent.GetLocalCache(infohashes, r.cache) - - if len(hashes) == 0 { - // Either all the infohashes are locally cached or none are - r.cache.AddMultiple(result) - return result - } + result := make(map[string]bool) // Divide hashes into groups of 100 for i := 0; i < len(hashes); i += 200 { @@ -136,7 +138,6 @@ func (r *RealDebrid) IsAvailable(infohashes []string) map[string]bool { } } } - r.cache.AddMultiple(result) // Add the results to the cache return result } @@ -160,17 +161,17 @@ func (r *RealDebrid) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) return t, nil } -func (r *RealDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) { +func (r *RealDebrid) UpdateTorrent(t *torrent.Torrent) error { url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := r.client.MakeRequest(req) if err != nil { - return t, err + return err } var data TorrentInfo err = json.Unmarshal(resp, &data) if err != nil { - return t, err + return err } name := utils.RemoveInvalidChars(data.OriginalFilename) t.Name = name @@ -185,10 +186,8 @@ func (r *RealDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) { t.Links = data.Links t.MountPath = r.MountPath t.Debrid = r.Name - t.DownloadLinks = make(map[string]torrent.DownloadLinks) - files := GetTorrentFiles(data, false) // Get selected files - t.Files = files - return t, nil + t.Files = getTorrentFiles(t, data, false) // Get selected files + return nil } func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { @@ -219,13 +218,12 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T t.Debrid = r.Name t.MountPath = r.MountPath if status == "waiting_files_selection" { - files := GetTorrentFiles(data, true) // Validate files to be selected - t.Files = files - if len(files) == 0 { + t.Files = getTorrentFiles(t, data, true) + if len(t.Files) == 0 { return t, fmt.Errorf("no video files found") } filesId := make([]string, 0) - for _, f := range files { + for _, f := range t.Files { filesId = append(filesId, f.Id) } p := gourl.Values{ @@ -238,11 +236,10 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T return t, err } } else if status == "downloaded" { - files := GetTorrentFiles(data, false) // Get selected files - t.Files = files + t.Files = getTorrentFiles(t, data, false) // Get selected files r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name) if !isSymlink { - err = r.GetDownloadLinks(t) + err = r.GenerateDownloadLinks(t) if err != nil { return t, err } @@ -271,12 +268,11 @@ func (r *RealDebrid) DeleteTorrent(torrent *torrent.Torrent) { } } -func (r *RealDebrid) GetDownloadLinks(t *torrent.Torrent) error { +func (r *RealDebrid) GenerateDownloadLinks(t *torrent.Torrent) error { url := fmt.Sprintf("%s/unrestrict/link/", r.Host) - downloadLinks := make(map[string]torrent.DownloadLinks) for _, f := range t.Files { - dlLink := t.DownloadLinks[f.Id] - if f.Link == "" || dlLink.DownloadLink != "" { + if f.DownloadLink != "" { + // Or check the generated link continue } payload := gourl.Values{ @@ -291,18 +287,41 @@ func (r *RealDebrid) GetDownloadLinks(t *torrent.Torrent) error { if err = json.Unmarshal(resp, &data); err != nil { return err } - download := torrent.DownloadLinks{ - Link: data.Link, - Filename: data.Filename, - DownloadLink: data.Download, - } - downloadLinks[f.Id] = download + f.DownloadLink = data.Download + f.Generated = time.Now() + t.Files[f.Name] = f } - t.DownloadLinks = downloadLinks return nil } -func (r *RealDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks { +func (r *RealDebrid) ConvertLinksToFiles(links []string) []torrent.File { + files := make([]torrent.File, 0) + for _, l := range links { + url := fmt.Sprintf("%s/unrestrict/link/", r.Host) + payload := gourl.Values{ + "link": {l}, + } + req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) + resp, err := r.client.MakeRequest(req) + if err != nil { + continue + } + var data UnrestrictResponse + if err = json.Unmarshal(resp, &data); err != nil { + continue + } + files = append(files, torrent.File{ + Name: data.Filename, + Size: data.Filesize, + Link: l, + DownloadLink: data.Download, + Generated: time.Now(), + }) + } + return files +} + +func (r *RealDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File { url := fmt.Sprintf("%s/unrestrict/link/", r.Host) payload := gourl.Values{ "link": {file.Link}, @@ -316,32 +335,43 @@ func (r *RealDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *to if err = json.Unmarshal(resp, &data); err != nil { return nil } - return &torrent.DownloadLinks{ - Link: data.Link, - Filename: data.Filename, - DownloadLink: data.Download, - } + file.DownloadLink = data.Download + file.Generated = time.Now() + return file } func (r *RealDebrid) GetCheckCached() bool { return r.CheckCached } -func (r *RealDebrid) getTorrents(offset int, limit int) ([]*torrent.Torrent, error) { +func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*torrent.Torrent, error) { url := fmt.Sprintf("%s/torrents?limit=%d", r.Host, limit) + torrents := make([]*torrent.Torrent, 0) if offset > 0 { url = fmt.Sprintf("%s&offset=%d", url, offset) } req, _ := http.NewRequest(http.MethodGet, url, nil) - resp, err := r.client.MakeRequest(req) + resp, err := r.client.Do(req) + if err != nil { - return nil, err + return 0, torrents, err } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return 0, torrents, fmt.Errorf("realdebrid API error: %d", resp.StatusCode) + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return 0, torrents, err + } + totalItems, _ := strconv.Atoi(resp.Header.Get("X-Total-Count")) var data []TorrentsResponse - if err = json.Unmarshal(resp, &data); err != nil { - return nil, err + if err = json.Unmarshal(body, &data); err != nil { + return 0, nil, err } - torrents := make([]*torrent.Torrent, 0) filenames := map[string]bool{} for _, t := range data { if _, exists := filenames[t.Filename]; exists { @@ -356,20 +386,122 @@ func (r *RealDebrid) getTorrents(offset int, limit int) ([]*torrent.Torrent, err Filename: t.Filename, OriginalFilename: t.Filename, Links: t.Links, + Files: make(map[string]torrent.File), + InfoHash: t.Hash, + Debrid: r.Name, + MountPath: r.MountPath, }) } - return torrents, nil + return totalItems, torrents, nil } func (r *RealDebrid) GetTorrents() ([]*torrent.Torrent, error) { - torrents := make([]*torrent.Torrent, 0) - offset := 0 - limit := 1000 - ts, _ := r.getTorrents(offset, limit) - torrents = append(torrents, ts...) - offset = len(torrents) - return torrents, nil + limit := 5000 + // Get first batch and total count + totalItems, firstBatch, err := r.getTorrents(0, limit) + if err != nil { + return nil, err + } + + allTorrents := firstBatch + + // Calculate remaining requests + remaining := totalItems - len(firstBatch) + if remaining <= 0 { + return allTorrents, nil + } + + // Prepare for concurrent fetching + var wg sync.WaitGroup + var mu sync.Mutex + var fetchError error + + // Calculate how many more requests we need + batchCount := (remaining + limit - 1) / limit // ceiling division + + for i := 1; i <= batchCount; i++ { + wg.Add(1) + go func(batchOffset int) { + defer wg.Done() + + _, batch, err := r.getTorrents(batchOffset, limit) + if err != nil { + mu.Lock() + fetchError = err + mu.Unlock() + return + } + + mu.Lock() + allTorrents = append(allTorrents, batch...) + mu.Unlock() + }(i * limit) + } + + // Wait for all fetches to complete + wg.Wait() + + if fetchError != nil { + return nil, fetchError + } + + return allTorrents, nil +} + +func (r *RealDebrid) GetDownloads() (map[string]torrent.DownloadLinks, error) { + links := make(map[string]torrent.DownloadLinks) + offset := 0 + limit := 5000 + for { + dl, err := r._getDownloads(offset, limit) + if err != nil { + break + } + if len(dl) == 0 { + break + } + + for _, d := range dl { + if _, exists := links[d.Link]; exists { + // This is ordered by date, so we can skip the rest + continue + } + links[d.Link] = d + } + + offset += len(dl) + } + return links, nil +} + +func (r *RealDebrid) _getDownloads(offset int, limit int) ([]torrent.DownloadLinks, error) { + url := fmt.Sprintf("%s/downloads?limit=%d", r.Host, limit) + if offset > 0 { + url = fmt.Sprintf("%s&offset=%d", url, offset) + } + req, _ := http.NewRequest(http.MethodGet, url, nil) + resp, err := r.client.MakeRequest(req) + if err != nil { + return nil, err + } + var data []DownloadsResponse + if err = json.Unmarshal(resp, &data); err != nil { + return nil, err + } + links := make([]torrent.DownloadLinks, 0) + for _, d := range data { + links = append(links, torrent.DownloadLinks{ + Filename: d.Filename, + Size: d.Filesize, + Link: d.Link, + DownloadLink: d.Download, + Generated: d.Generated, + Id: d.Id, + }) + + } + return links, nil } func (r *RealDebrid) GetDownloadingStatus() []string { @@ -380,21 +512,23 @@ func (r *RealDebrid) GetDownloadUncached() bool { return r.DownloadUncached } -func New(dc config.Debrid, cache *cache.Cache) *RealDebrid { +func New(dc config.Debrid) *RealDebrid { rl := request.ParseRateLimit(dc.RateLimit) headers := map[string]string{ "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), } - client := request.NewRLHTTPClient(rl, headers) + _log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel) + client := request.New(). + WithHeaders(headers). + WithRateLimiter(rl).WithLogger(_log) return &RealDebrid{ Name: "realdebrid", Host: dc.Host, APIKey: dc.APIKey, DownloadUncached: dc.DownloadUncached, client: client, - cache: cache, MountPath: dc.Folder, - logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout), + logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel), CheckCached: dc.CheckCached, } } diff --git a/pkg/debrid/realdebrid/types.go b/pkg/debrid/realdebrid/types.go index 48fdc6c..3e9b563 100644 --- a/pkg/debrid/realdebrid/types.go +++ b/pkg/debrid/realdebrid/types.go @@ -1,8 +1,8 @@ package realdebrid import ( - "encoding/json" "fmt" + "github.com/goccy/go-json" "time" ) @@ -98,7 +98,7 @@ type UnrestrictResponse struct { Id string `json:"id"` Filename string `json:"filename"` MimeType string `json:"mimeType"` - Filesize int `json:"filesize"` + Filesize int64 `json:"filesize"` Link string `json:"link"` Host string `json:"host"` Chunks int `json:"chunks"` @@ -120,3 +120,17 @@ type TorrentsResponse struct { Links []string `json:"links"` Ended time.Time `json:"ended"` } + +type DownloadsResponse struct { + Id string `json:"id"` + Filename string `json:"filename"` + MimeType string `json:"mimeType"` + Filesize int64 `json:"filesize"` + Link string `json:"link"` + Host string `json:"host"` + HostIcon string `json:"host_icon"` + Chunks int64 `json:"chunks"` + Download string `json:"download"` + Streamable int `json:"streamable"` + Generated time.Time `json:"generated"` +} diff --git a/pkg/debrid/torbox/torbox.go b/pkg/debrid/torbox/torbox.go index f6f9be3..688296c 100644 --- a/pkg/debrid/torbox/torbox.go +++ b/pkg/debrid/torbox/torbox.go @@ -2,20 +2,19 @@ package torbox import ( "bytes" - "encoding/json" "fmt" + "github.com/goccy/go-json" "github.com/rs/zerolog" - "github.com/sirrobot01/debrid-blackhole/internal/cache" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + "time" "mime/multipart" "net/http" gourl "net/url" - "os" "path" "path/filepath" "slices" @@ -28,11 +27,11 @@ type Torbox struct { Host string `json:"host"` APIKey string DownloadUncached bool - client *request.RLHTTPClient - cache *cache.Cache - MountPath string - logger zerolog.Logger - CheckCached bool + client *request.Client + + MountPath string + logger zerolog.Logger + CheckCached bool } func (tb *Torbox) GetName() string { @@ -43,15 +42,9 @@ func (tb *Torbox) GetLogger() zerolog.Logger { return tb.logger } -func (tb *Torbox) IsAvailable(infohashes []string) map[string]bool { +func (tb *Torbox) IsAvailable(hashes []string) map[string]bool { // Check if the infohashes are available in the local cache - hashes, result := torrent.GetLocalCache(infohashes, tb.cache) - - if len(hashes) == 0 { - // Either all the infohashes are locally cached or none are - tb.cache.AddMultiple(result) - return result - } + result := make(map[string]bool) // Divide hashes into groups of 100 for i := 0; i < len(hashes); i += 100 { @@ -91,13 +84,12 @@ func (tb *Torbox) IsAvailable(infohashes []string) map[string]bool { return result } - for h, cache := range *res.Data { - if cache.Size > 0 { + for h, c := range *res.Data { + if c.Size > 0 { result[strings.ToUpper(h)] = true } } } - tb.cache.AddMultiple(result) // Add the results to the cache return result } @@ -149,17 +141,17 @@ func getTorboxStatus(status string, finished bool) string { } } -func (tb *Torbox) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) { +func (tb *Torbox) UpdateTorrent(t *torrent.Torrent) error { url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", tb.Host, t.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := tb.client.MakeRequest(req) if err != nil { - return t, err + return err } var res InfoResponse err = json.Unmarshal(resp, &res) if err != nil { - return t, err + return err } data := res.Data name := data.Name @@ -174,8 +166,6 @@ func (tb *Torbox) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) { t.OriginalFilename = name t.MountPath = tb.MountPath t.Debrid = tb.Name - t.DownloadLinks = make(map[string]torrent.DownloadLinks) - files := make([]torrent.File, 0) cfg := config.GetConfig() for _, f := range data.Files { fileName := filepath.Base(f.Name) @@ -196,35 +186,32 @@ func (tb *Torbox) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) { Size: f.Size, Path: fileName, } - files = append(files, file) + t.Files[fileName] = file } var cleanPath string - if len(files) > 0 { + if len(t.Files) > 0 { cleanPath = path.Clean(data.Files[0].Name) } else { cleanPath = path.Clean(data.Name) } t.OriginalFilename = strings.Split(cleanPath, "/")[0] - t.Files = files - //t.Debrid = tb - return t, nil + t.Debrid = tb.Name + return nil } func (tb *Torbox) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { for { - t, err := tb.GetTorrent(torrent) + err := tb.UpdateTorrent(torrent) - torrent = t - - if err != nil || t == nil { - return t, err + if err != nil || torrent == nil { + return torrent, err } status := torrent.Status if status == "downloaded" { tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name) if !isSymlink { - err = tb.GetDownloadLinks(torrent) + err = tb.GenerateDownloadLinks(torrent) if err != nil { return torrent, err } @@ -258,8 +245,7 @@ func (tb *Torbox) DeleteTorrent(torrent *torrent.Torrent) { } } -func (tb *Torbox) GetDownloadLinks(t *torrent.Torrent) error { - downloadLinks := make(map[string]torrent.DownloadLinks) +func (tb *Torbox) GenerateDownloadLinks(t *torrent.Torrent) error { for _, file := range t.Files { url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host) query := gourl.Values{} @@ -279,21 +265,15 @@ func (tb *Torbox) GetDownloadLinks(t *torrent.Torrent) error { if data.Data == nil { return fmt.Errorf("error getting download links") } - idx := 0 link := *data.Data - - dl := torrent.DownloadLinks{ - Link: link, - Filename: t.Files[idx].Name, - DownloadLink: link, - } - downloadLinks[file.Id] = dl + file.DownloadLink = link + file.Generated = time.Now() + t.Files[file.Name] = file } - t.DownloadLinks = downloadLinks return nil } -func (tb *Torbox) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.DownloadLinks { +func (tb *Torbox) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File { url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host) query := gourl.Values{} query.Add("torrent_id", t.Id) @@ -313,11 +293,9 @@ func (tb *Torbox) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torre return nil } link := *data.Data - return &torrent.DownloadLinks{ - Link: file.Link, - Filename: file.Name, - DownloadLink: link, - } + file.DownloadLink = link + file.Generated = time.Now() + return file } func (tb *Torbox) GetDownloadingStatus() []string { @@ -336,21 +314,32 @@ func (tb *Torbox) GetDownloadUncached() bool { return tb.DownloadUncached } -func New(dc config.Debrid, cache *cache.Cache) *Torbox { +func New(dc config.Debrid) *Torbox { rl := request.ParseRateLimit(dc.RateLimit) headers := map[string]string{ "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), } - client := request.NewRLHTTPClient(rl, headers) + _log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel) + client := request.New(). + WithHeaders(headers). + WithRateLimiter(rl).WithLogger(_log) + return &Torbox{ Name: "torbox", Host: dc.Host, APIKey: dc.APIKey, DownloadUncached: dc.DownloadUncached, client: client, - cache: cache, MountPath: dc.Folder, - logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout), + logger: _log, CheckCached: dc.CheckCached, } } + +func (tb *Torbox) ConvertLinksToFiles(links []string) []torrent.File { + return nil +} + +func (tb *Torbox) GetDownloads() (map[string]torrent.DownloadLinks, error) { + return nil, nil +} diff --git a/pkg/debrid/torrent/torrent.go b/pkg/debrid/torrent/torrent.go index 9f48e87..2fbbdbb 100644 --- a/pkg/debrid/torrent/torrent.go +++ b/pkg/debrid/torrent/torrent.go @@ -2,34 +2,33 @@ package torrent import ( "fmt" - "github.com/sirrobot01/debrid-blackhole/internal/cache" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/arr" "os" "path/filepath" "sync" + "time" ) type Torrent struct { - Id string `json:"id"` - InfoHash string `json:"info_hash"` - Name string `json:"name"` - Folder string `json:"folder"` - Filename string `json:"filename"` - OriginalFilename string `json:"original_filename"` - Size int64 `json:"size"` - Bytes int64 `json:"bytes"` // Size of only the files that are downloaded - Magnet *utils.Magnet `json:"magnet"` - Files []File `json:"files"` - Status string `json:"status"` - Added string `json:"added"` - Progress float64 `json:"progress"` - Speed int64 `json:"speed"` - Seeders int `json:"seeders"` - Links []string `json:"links"` - DownloadLinks map[string]DownloadLinks `json:"download_links"` - MountPath string `json:"mount_path"` + Id string `json:"id"` + InfoHash string `json:"info_hash"` + Name string `json:"name"` + Folder string `json:"folder"` + Filename string `json:"filename"` + OriginalFilename string `json:"original_filename"` + Size int64 `json:"size"` + Bytes int64 `json:"bytes"` // Size of only the files that are downloaded + Magnet *utils.Magnet `json:"magnet"` + Files map[string]File `json:"files"` + Status string `json:"status"` + Added string `json:"added"` + Progress float64 `json:"progress"` + Speed int64 `json:"speed"` + Seeders int `json:"seeders"` + Links []string `json:"links"` + MountPath string `json:"mount_path"` Debrid string `json:"debrid"` @@ -40,9 +39,12 @@ type Torrent struct { } type DownloadLinks struct { - Filename string `json:"filename"` - Link string `json:"link"` - DownloadLink string `json:"download_link"` + Filename string `json:"filename"` + Link string `json:"link"` + DownloadLink string `json:"download_link"` + Generated time.Time `json:"generated"` + Size int64 `json:"size"` + Id string `json:"id"` } func (t *Torrent) GetSymlinkFolder(parent string) string { @@ -69,11 +71,13 @@ func (t *Torrent) GetMountFolder(rClonePath string) (string, error) { } type File struct { - Id string `json:"id"` - Name string `json:"name"` - Size int64 `json:"size"` - Path string `json:"path"` - Link string `json:"link"` + Id string `json:"id"` + Name string `json:"name"` + Size int64 `json:"size"` + Path string `json:"path"` + Link string `json:"link"` + DownloadLink string `json:"download_link"` + Generated time.Time `json:"generated"` } func (t *Torrent) Cleanup(remove bool) { @@ -93,30 +97,3 @@ func (t *Torrent) GetFile(id string) *File { } return nil } - -func GetLocalCache(infohashes []string, cache *cache.Cache) ([]string, map[string]bool) { - result := make(map[string]bool) - hashes := make([]string, 0) - - if len(infohashes) == 0 { - return hashes, result - } - if len(infohashes) == 1 { - if cache.Exists(infohashes[0]) { - return hashes, map[string]bool{infohashes[0]: true} - } - return infohashes, result - } - - cachedHashes := cache.GetMultiple(infohashes) - for _, h := range infohashes { - _, exists := cachedHashes[h] - if !exists { - hashes = append(hashes, h) - } else { - result[h] = true - } - } - - return infohashes, result -} diff --git a/pkg/proxy/proxy.go b/pkg/proxy/proxy.go index e45a9d4..a2bd768 100644 --- a/pkg/proxy/proxy.go +++ b/pkg/proxy/proxy.go @@ -88,7 +88,7 @@ func NewProxy() *Proxy { username: cfg.Username, password: cfg.Password, cachedOnly: cfg.CachedOnly, - logger: logger.NewLogger("proxy", cfg.LogLevel, os.Stdout), + logger: logger.NewLogger("proxy", cfg.LogLevel), } } diff --git a/pkg/qbit/downloader.go b/pkg/qbit/downloader.go index d2ecb52..2a5ba52 100644 --- a/pkg/qbit/downloader.go +++ b/pkg/qbit/downloader.go @@ -52,7 +52,7 @@ Loop: func (q *QBit) ProcessManualFile(torrent *Torrent) (string, error) { debridTorrent := torrent.DebridTorrent - q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.DownloadLinks)) + q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files)) torrentPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, utils.RemoveExtension(debridTorrent.OriginalFilename)) torrentPath = utils.RemoveInvalidChars(torrentPath) err := os.MkdirAll(torrentPath, os.ModePerm) @@ -103,21 +103,21 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { Transport: tr, }, } - for _, link := range debridTorrent.DownloadLinks { - if link.DownloadLink == "" { - q.logger.Info().Msgf("No download link found for %s", link.Filename) + for _, file := range debridTorrent.Files { + if file.DownloadLink == "" { + q.logger.Info().Msgf("No download link found for %s", file.Name) continue } wg.Add(1) semaphore <- struct{}{} - go func(link debrid.DownloadLinks) { + go func(file debrid.File) { defer wg.Done() defer func() { <-semaphore }() - filename := link.Filename + filename := file.Link err := Download( client, - link.DownloadLink, + file.DownloadLink, filepath.Join(parent, filename), progressCallback, ) @@ -127,7 +127,7 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { } else { q.logger.Info().Msgf("Downloaded %s", filename) } - }(link) + }(file) } wg.Wait() q.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name) diff --git a/pkg/qbit/qbit.go b/pkg/qbit/qbit.go index 84726ea..f860c5e 100644 --- a/pkg/qbit/qbit.go +++ b/pkg/qbit/qbit.go @@ -34,7 +34,7 @@ func New() *QBit { DownloadFolder: cfg.DownloadFolder, Categories: cfg.Categories, Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")), - logger: logger.NewLogger("qbit", _cfg.LogLevel, os.Stdout), + logger: logger.NewLogger("qbit", _cfg.LogLevel), RefreshInterval: refreshInterval, SkipPreCache: cfg.SkipPreCache, } diff --git a/pkg/qbit/storage.go b/pkg/qbit/storage.go index 844bf96..a8a6b0d 100644 --- a/pkg/qbit/storage.go +++ b/pkg/qbit/storage.go @@ -1,8 +1,8 @@ package qbit import ( - "encoding/json" "fmt" + "github.com/goccy/go-json" "os" "sort" "sync" diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index fd09410..95165e3 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -185,7 +185,7 @@ func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent } _db := service.GetDebrid().GetByName(debridTorrent.Debrid) if debridTorrent.Status != "downloaded" { - debridTorrent, _ = _db.GetTorrent(debridTorrent) + _ = _db.UpdateTorrent(debridTorrent) } t = q.UpdateTorrentMin(t, debridTorrent) t.ContentPath = t.TorrentPath + string(os.PathSeparator) diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 725a076..af859f6 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -2,15 +2,15 @@ package repair import ( "context" - "encoding/json" "fmt" + "github.com/goccy/go-json" "github.com/google/uuid" "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/pkg/arr" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "golang.org/x/sync/errgroup" "net" "net/http" @@ -29,7 +29,7 @@ import ( type Repair struct { Jobs map[string]*Job arrs *arr.Storage - deb engine.Service + deb debrid.Client duration time.Duration runOnStart bool ZurgURL string @@ -47,7 +47,7 @@ func New(arrs *arr.Storage) *Repair { } r := &Repair{ arrs: arrs, - logger: logger.NewLogger("repair", cfg.LogLevel, os.Stdout), + logger: logger.NewLogger("repair", cfg.LogLevel), duration: duration, runOnStart: cfg.Repair.RunOnStart, ZurgURL: cfg.Repair.ZurgURL, diff --git a/pkg/server/server.go b/pkg/server/server.go index 4a9f72a..88f036c 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -23,7 +23,7 @@ type Server struct { func New() *Server { cfg := config.GetConfig() - l := logger.NewLogger("http", cfg.LogLevel, os.Stdout) + l := logger.NewLogger("http", cfg.LogLevel) r := chi.NewRouter() r.Use(middleware.Recoverer) r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static")))) diff --git a/pkg/server/webhook.go b/pkg/server/webhook.go index 3d1bf13..86ac786 100644 --- a/pkg/server/webhook.go +++ b/pkg/server/webhook.go @@ -2,7 +2,7 @@ package server import ( "cmp" - "encoding/json" + "github.com/goccy/go-json" "github.com/sirrobot01/debrid-blackhole/pkg/service" "net/http" ) diff --git a/pkg/service/service.go b/pkg/service/service.go index 1c9e216..104a7c6 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -3,17 +3,15 @@ package service import ( "github.com/sirrobot01/debrid-blackhole/pkg/arr" "github.com/sirrobot01/debrid-blackhole/pkg/debrid" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine" "github.com/sirrobot01/debrid-blackhole/pkg/repair" "sync" ) type Service struct { - Repair *repair.Repair - Arr *arr.Storage - Debrid *engine.Engine - DebridCache *cache.Manager + Repair *repair.Repair + Arr *arr.Storage + Debrid *engine.Engine } var ( @@ -26,10 +24,9 @@ func New() *Service { arrs := arr.NewStorage() deb := debrid.New() instance = &Service{ - Repair: repair.New(arrs), - Arr: arrs, - Debrid: deb, - DebridCache: cache.NewManager(deb), + Repair: repair.New(arrs), + Arr: arrs, + Debrid: deb, } }) return instance diff --git a/pkg/web/server.go b/pkg/web/server.go index 16dacdc..56027d0 100644 --- a/pkg/web/server.go +++ b/pkg/web/server.go @@ -2,8 +2,8 @@ package web import ( "embed" - "encoding/json" "fmt" + "github.com/goccy/go-json" "github.com/gorilla/sessions" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" @@ -14,7 +14,6 @@ import ( "golang.org/x/crypto/bcrypt" "html/template" "net/http" - "os" "strings" "github.com/go-chi/chi/v5" @@ -64,7 +63,7 @@ func New(qbit *qbit.QBit) *Handler { cfg := config.GetConfig() return &Handler{ qbit: qbit, - logger: logger.NewLogger("ui", cfg.LogLevel, os.Stdout), + logger: logger.NewLogger("ui", cfg.LogLevel), } } diff --git a/pkg/webdav/cache.go b/pkg/webdav/cache.go new file mode 100644 index 0000000..50144fc --- /dev/null +++ b/pkg/webdav/cache.go @@ -0,0 +1,542 @@ +package webdav + +import ( + "bufio" + "context" + "fmt" + "github.com/dgraph-io/badger/v4" + "github.com/goccy/go-json" + "github.com/rs/zerolog" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" + "os" + "path/filepath" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/sirrobot01/debrid-blackhole/internal/config" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" +) + +type DownloadLinkCache struct { + Link string `json:"download_link"` +} + +type CachedTorrent struct { + *torrent.Torrent + LastRead time.Time `json:"last_read"` + IsComplete bool `json:"is_complete"` +} + +type Cache struct { + dir string + client debrid.Client + db *badger.DB + logger zerolog.Logger + + torrents map[string]*CachedTorrent // key: torrent.Id, value: *CachedTorrent + torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent + listings atomic.Value + downloadLinks map[string]string // key: file.Link, value: download link + + workers int + + LastUpdated time.Time `json:"last_updated"` + + // refresh mutex + torrentsRefreshMutex sync.Mutex // for refreshing torrents + downloadLinksRefreshMutex sync.Mutex // for refreshing download links + + // Mutexes + torrentsMutex sync.RWMutex // for torrents and torrentsNames + downloadLinksMutex sync.Mutex +} + +func (c *Cache) setTorrent(t *CachedTorrent) { + c.torrentsMutex.Lock() + defer c.torrentsMutex.Unlock() + c.torrents[t.Id] = t + c.torrentsNames[t.Name] = t + + c.refreshListings() + + go func() { + if err := c.SaveTorrent(t); err != nil { + c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id) + } + }() +} + +func (c *Cache) refreshListings() { + files := make([]os.FileInfo, 0, len(c.torrents)) + now := time.Now() + for _, t := range c.torrents { + if t != nil && t.Torrent != nil { + files = append(files, &FileInfo{ + name: t.Name, + size: 0, + mode: 0755 | os.ModeDir, + modTime: now, + isDir: true, + }) + } + } + // Atomic store of the complete ready-to-use slice + c.listings.Store(files) +} + +func (c *Cache) GetListing() []os.FileInfo { + return c.listings.Load().([]os.FileInfo) +} + +func (c *Cache) setTorrents(torrents []*CachedTorrent) { + c.torrentsMutex.Lock() + defer c.torrentsMutex.Unlock() + for _, t := range torrents { + c.torrents[t.Id] = t + c.torrentsNames[t.Name] = t + } + + go c.refreshListings() + + go func() { + if err := c.SaveTorrents(); err != nil { + c.logger.Debug().Err(err).Msgf("Failed to save torrents") + } + }() +} + +func (c *Cache) GetTorrents() map[string]*CachedTorrent { + c.torrentsMutex.RLock() + defer c.torrentsMutex.RUnlock() + result := make(map[string]*CachedTorrent, len(c.torrents)) + for k, v := range c.torrents { + result[k] = v + } + return result +} + +func (c *Cache) GetTorrentNames() map[string]*CachedTorrent { + c.torrentsMutex.RLock() + defer c.torrentsMutex.RUnlock() + return c.torrentsNames +} + +type Manager struct { + caches map[string]*Cache +} + +func NewCacheManager(clients []debrid.Client) *Manager { + m := &Manager{ + caches: make(map[string]*Cache), + } + + for _, client := range clients { + m.caches[client.GetName()] = NewCache(client) + } + + return m +} + +func (m *Manager) GetCaches() map[string]*Cache { + return m.caches +} + +func (m *Manager) GetCache(debridName string) *Cache { + return m.caches[debridName] +} + +func NewCache(client debrid.Client) *Cache { + dbPath := filepath.Join(config.GetConfig().Path, "cache", client.GetName()) + return &Cache{ + dir: dbPath, + torrents: make(map[string]*CachedTorrent), + torrentsNames: make(map[string]*CachedTorrent), + client: client, + logger: client.GetLogger(), + workers: 200, + downloadLinks: make(map[string]string), + } +} + +func (c *Cache) Start() error { + if err := os.MkdirAll(c.dir, 0755); err != nil { + return fmt.Errorf("failed to create cache directory: %w", err) + } + + if err := c.Sync(); err != nil { + return fmt.Errorf("failed to sync cache: %w", err) + } + + // initial download links + go func() { + // lock download refresh mutex + c.downloadLinksRefreshMutex.Lock() + defer c.downloadLinksRefreshMutex.Unlock() + // This prevents the download links from being refreshed twice + c.refreshDownloadLinks() + }() + + go func() { + err := c.Refresh() + if err != nil { + c.logger.Error().Err(err).Msg("Failed to start cache refresh worker") + } + }() + + return nil +} + +func (c *Cache) Close() error { + if c.db != nil { + return c.db.Close() + } + return nil +} + +func (c *Cache) load() ([]*CachedTorrent, error) { + torrents := make([]*CachedTorrent, 0) + if err := os.MkdirAll(c.dir, 0755); err != nil { + return torrents, fmt.Errorf("failed to create cache directory: %w", err) + } + + files, err := os.ReadDir(c.dir) + if err != nil { + return torrents, fmt.Errorf("failed to read cache directory: %w", err) + } + + for _, file := range files { + if file.IsDir() || filepath.Ext(file.Name()) != ".json" { + continue + } + + filePath := filepath.Join(c.dir, file.Name()) + data, err := os.ReadFile(filePath) + if err != nil { + c.logger.Debug().Err(err).Msgf("Failed to read file: %s", filePath) + continue + } + + var ct CachedTorrent + if err := json.Unmarshal(data, &ct); err != nil { + c.logger.Debug().Err(err).Msgf("Failed to unmarshal file: %s", filePath) + continue + } + if len(ct.Files) != 0 { + // We can assume the torrent is complete + torrents = append(torrents, &ct) + } + } + + return torrents, nil +} + +func (c *Cache) GetTorrent(id string) *CachedTorrent { + c.torrentsMutex.RLock() + defer c.torrentsMutex.RUnlock() + if t, ok := c.torrents[id]; ok { + return t + } + return nil +} + +func (c *Cache) GetTorrentByName(name string) *CachedTorrent { + if t, ok := c.GetTorrentNames()[name]; ok { + return t + } + return nil +} + +func (c *Cache) SaveTorrents() error { + for _, ct := range c.GetTorrents() { + if err := c.SaveTorrent(ct); err != nil { + return err + } + } + return nil +} + +func (c *Cache) SaveTorrent(ct *CachedTorrent) error { + data, err := json.MarshalIndent(ct, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal torrent: %w", err) + } + + fileName := ct.Torrent.Id + ".json" + filePath := filepath.Join(c.dir, fileName) + tmpFile := filePath + ".tmp" + + f, err := os.Create(tmpFile) + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + defer f.Close() + + w := bufio.NewWriter(f) + if _, err := w.Write(data); err != nil { + return fmt.Errorf("failed to write data: %w", err) + } + + if err := w.Flush(); err != nil { + return fmt.Errorf("failed to flush data: %w", err) + } + + return os.Rename(tmpFile, filePath) +} + +func (c *Cache) Sync() error { + cachedTorrents, err := c.load() + if err != nil { + c.logger.Debug().Err(err).Msg("Failed to load cache") + } + // Write these torrents to the cache + c.setTorrents(cachedTorrents) + c.logger.Info().Msgf("Loaded %d torrents from cache", len(cachedTorrents)) + + torrents, err := c.client.GetTorrents() + + c.logger.Info().Msgf("Got %d torrents from %s", len(torrents), c.client.GetName()) + if err != nil { + return fmt.Errorf("failed to sync torrents: %v", err) + } + + mewTorrents := make([]*torrent.Torrent, 0) + for _, t := range torrents { + if _, ok := c.torrents[t.Id]; !ok { + mewTorrents = append(mewTorrents, t) + } + } + c.logger.Info().Msgf("Found %d new torrents", len(mewTorrents)) + + if len(mewTorrents) > 0 { + if err := c.sync(mewTorrents); err != nil { + return fmt.Errorf("failed to sync torrents: %v", err) + } + } + + return nil +} + +func (c *Cache) sync(torrents []*torrent.Torrent) error { + // Calculate optimal workers - balance between CPU and IO + workers := runtime.NumCPU() * 50 // A more balanced multiplier for BadgerDB + + // Create channels with appropriate buffering + workChan := make(chan *torrent.Torrent, workers*2) + + // Use an atomic counter for progress tracking + var processed int64 + var errorCount int64 + + // Create a context with cancellation in case of critical errors + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create a wait group for workers + var wg sync.WaitGroup + + // Start workers + for i := 0; i < workers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case t, ok := <-workChan: + if !ok { + return // Channel closed, exit goroutine + } + + if err := c.processTorrent(t); err != nil { + c.logger.Error().Err(err).Str("torrent", t.Name).Msg("sync error") + atomic.AddInt64(&errorCount, 1) + } + + count := atomic.AddInt64(&processed, 1) + if count%1000 == 0 { + c.logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents)) + } + + case <-ctx.Done(): + return // Context cancelled, exit goroutine + } + } + }() + } + + // Feed work to workers + for _, t := range torrents { + select { + case workChan <- t: + // Work sent successfully + case <-ctx.Done(): + break // Context cancelled + } + } + + // Signal workers that no more work is coming + close(workChan) + + // Wait for all workers to complete + wg.Wait() + + c.logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount) + return nil +} + +func (c *Cache) processTorrent(t *torrent.Torrent) error { + var err error + err = c.client.UpdateTorrent(t) + if err != nil { + return fmt.Errorf("failed to get torrent files: %v", err) + } + + ct := &CachedTorrent{ + Torrent: t, + LastRead: time.Now(), + IsComplete: len(t.Files) > 0, + } + c.setTorrent(ct) + return nil +} + +func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string { + + // Check link cache + if dl := c.checkDownloadLink(fileLink); dl != "" { + return dl + } + + ct := c.GetTorrent(torrentId) + if ct == nil { + return "" + } + file := ct.Files[filename] + + if file.Link == "" { + // file link is empty, refresh the torrent to get restricted links + if ct.IsComplete { + return "" + } + ct = c.refreshTorrent(ct) // Refresh the torrent from the debrid service + if ct == nil { + return "" + } else { + file = ct.Files[filename] + } + } + + c.logger.Debug().Msgf("Getting download link for %s", ct.Name) + f := c.client.GetDownloadLink(ct.Torrent, &file) + if f == nil { + return "" + } + file.DownloadLink = f.DownloadLink + ct.Files[filename] = file + + go c.updateDownloadLink(f) + go c.setTorrent(ct) + return f.DownloadLink +} + +func (c *Cache) updateDownloadLink(file *torrent.File) { + c.downloadLinksMutex.Lock() + defer c.downloadLinksMutex.Unlock() + c.downloadLinks[file.Link] = file.DownloadLink +} + +func (c *Cache) checkDownloadLink(link string) string { + if dl, ok := c.downloadLinks[link]; ok { + return dl + } + return "" +} + +func (c *Cache) refreshTorrent(t *CachedTorrent) *CachedTorrent { + _torrent := t.Torrent + err := c.client.UpdateTorrent(_torrent) + if err != nil { + c.logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err) + return nil + } + if len(t.Files) == 0 { + return nil + } + + ct := &CachedTorrent{ + Torrent: _torrent, + LastRead: time.Now(), + IsComplete: len(t.Files) > 0, + } + c.setTorrent(ct) + + return ct +} + +func (c *Cache) refreshListingWorker() { + c.logger.Info().Msg("WebDAV Background Refresh Worker started") + refreshTicker := time.NewTicker(10 * time.Second) + defer refreshTicker.Stop() + + for { + select { + case <-refreshTicker.C: + if c.torrentsRefreshMutex.TryLock() { + func() { + defer c.torrentsRefreshMutex.Unlock() + c.refreshListings() + }() + } else { + c.logger.Debug().Msg("Refresh already in progress") + } + } + } +} + +func (c *Cache) refreshDownloadLinksWorker() { + c.logger.Info().Msg("WebDAV Background Refresh Download Worker started") + refreshTicker := time.NewTicker(40 * time.Minute) + defer refreshTicker.Stop() + + for { + select { + case <-refreshTicker.C: + if c.downloadLinksRefreshMutex.TryLock() { + func() { + defer c.downloadLinksRefreshMutex.Unlock() + c.refreshDownloadLinks() + }() + } else { + c.logger.Debug().Msg("Refresh already in progress") + } + } + } +} + +func (c *Cache) refreshDownloadLinks() map[string]string { + c.downloadLinksMutex.Lock() + defer c.downloadLinksMutex.Unlock() + + downloadLinks, err := c.client.GetDownloads() + if err != nil { + c.logger.Debug().Err(err).Msg("Failed to get download links") + return nil + } + for k, v := range downloadLinks { + c.downloadLinks[k] = v.DownloadLink + } + c.logger.Info().Msgf("Refreshed %d download links", len(downloadLinks)) + return c.downloadLinks +} + +func (c *Cache) GetClient() debrid.Client { + return c.client +} + +func (c *Cache) Refresh() error { + // For now, we just want to refresh the listing + go c.refreshListingWorker() + go c.refreshDownloadLinksWorker() + return nil +} diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 72ef509..9df3409 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -2,60 +2,101 @@ package webdav import ( "fmt" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" "io" "net/http" "os" "time" ) +var sharedClient = &http.Client{ + Transport: &http.Transport{ + // These settings help maintain persistent connections. + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + DisableCompression: false, + DisableKeepAlives: false, + }, + Timeout: 0, +} + type File struct { - cache *cache.Cache - cachedTorrent *cache.CachedTorrent - file *torrent.File - offset int64 - isDir bool - children []os.FileInfo - reader io.ReadCloser + cache *Cache + fileId string + torrentId string + + size int64 + offset int64 + isDir bool + children []os.FileInfo + reader io.ReadCloser + seekPending bool + content []byte + name string + + downloadLink string + link string } // File interface implementations for File func (f *File) Close() error { + if f.reader != nil { + f.reader.Close() + f.reader = nil + } return nil } func (f *File) GetDownloadLink() string { - file := f.file - link, err := f.cache.GetFileDownloadLink(f.cachedTorrent, file) - if err != nil { - return "" + // Check if we already have a final URL cached + if f.downloadLink != "" { + return f.downloadLink } - return link + downloadLink := f.cache.GetDownloadLink(f.torrentId, f.name, f.link) + if downloadLink != "" { + f.downloadLink = downloadLink + return downloadLink + } + + return "" } func (f *File) Read(p []byte) (n int, err error) { - // Directories cannot be read as a byte stream. if f.isDir { return 0, os.ErrInvalid } - // If we haven't started streaming the file yet, open the HTTP connection. - if f.reader == nil { - // Create an HTTP GET request to the file's URL. + // If file content is preloaded, read from memory. + if f.content != nil { + if f.offset >= int64(len(f.content)) { + return 0, io.EOF + } + n = copy(p, f.content[f.offset:]) + f.offset += int64(n) + return n, nil + } + + // If we haven't started streaming or a seek was requested, + // close the existing stream and start a new HTTP GET request. + if f.reader == nil || f.seekPending { + if f.reader != nil && f.seekPending { + f.reader.Close() + f.reader = nil + } + + // Create a new HTTP GET request for the file's URL. req, err := http.NewRequest("GET", f.GetDownloadLink(), nil) if err != nil { return 0, fmt.Errorf("failed to create HTTP request: %w", err) } - // If we've already read some data (f.offset > 0), request only the remaining bytes. + // If we've already read some data, request only the remaining bytes. if f.offset > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset)) } // Execute the HTTP request. - resp, err := http.DefaultClient.Do(req) + resp, err := sharedClient.Do(req) if err != nil { return 0, fmt.Errorf("HTTP request error: %w", err) } @@ -68,6 +109,8 @@ func (f *File) Read(p []byte) (n int, err error) { // Store the response body as our reader. f.reader = resp.Body + // Reset the seek pending flag now that we've reinitialized the reader. + f.seekPending = false } // Read data from the HTTP stream. @@ -88,27 +131,57 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { return 0, os.ErrInvalid } + var newOffset int64 switch whence { case io.SeekStart: - f.offset = offset + newOffset = offset case io.SeekCurrent: - f.offset += offset + newOffset = f.offset + offset case io.SeekEnd: - f.offset = f.file.Size - offset + newOffset = f.size - offset default: return 0, os.ErrInvalid } - if f.offset < 0 { - f.offset = 0 + if newOffset < 0 { + newOffset = 0 } - if f.offset > f.file.Size { - f.offset = f.file.Size + if newOffset > f.size { + newOffset = f.size } + // If we're seeking to a new position, mark the reader for reset. + if newOffset != f.offset { + f.offset = newOffset + f.seekPending = true + } return f.offset, nil } +func (f *File) Stat() (os.FileInfo, error) { + if f.isDir { + return &FileInfo{ + name: f.name, + size: 0, + mode: 0755 | os.ModeDir, + modTime: time.Now(), + isDir: true, + }, nil + } + + return &FileInfo{ + name: f.name, + size: f.size, + mode: 0644, + modTime: time.Now(), + isDir: false, + }, nil +} + +func (f *File) Write(p []byte) (n int, err error) { + return 0, os.ErrPermission +} + func (f *File) Readdir(count int) ([]os.FileInfo, error) { if !f.isDir { return nil, os.ErrInvalid @@ -130,31 +203,3 @@ func (f *File) Readdir(count int) ([]os.FileInfo, error) { f.children = f.children[count:] return files, nil } - -func (f *File) Stat() (os.FileInfo, error) { - if f.isDir { - name := "/" - if f.cachedTorrent != nil { - name = f.cachedTorrent.Name - } - return &FileInfo{ - name: name, - size: 0, - mode: 0755 | os.ModeDir, - modTime: time.Now(), - isDir: true, - }, nil - } - - return &FileInfo{ - name: f.file.Name, - size: f.file.Size, - mode: 0644, - modTime: time.Now(), - isDir: false, - }, nil -} - -func (f *File) Write(p []byte) (n int, err error) { - return 0, os.ErrPermission -} diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index 170f7dc..fa9b220 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -1,100 +1,48 @@ package webdav import ( + "bytes" "context" + "errors" "fmt" "github.com/rs/zerolog" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/cache" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" "golang.org/x/net/webdav" "html/template" + "io" + "net" "net/http" + "net/http/httptest" "os" "path" - "sort" "strings" "sync" - "sync/atomic" "time" ) type Handler struct { - Name string - logger zerolog.Logger - cache *cache.Cache - rootListing atomic.Value - lastRefresh time.Time - refreshMutex sync.Mutex - RootPath string + Name string + logger zerolog.Logger + cache *Cache + lastRefresh time.Time + refreshMutex sync.Mutex + RootPath string + responseCache sync.Map + cacheTTL time.Duration + ctx context.Context } -func NewHandler(name string, cache *cache.Cache, logger zerolog.Logger) *Handler { +func NewHandler(name string, cache *Cache, logger zerolog.Logger) *Handler { h := &Handler{ Name: name, cache: cache, logger: logger, RootPath: fmt.Sprintf("/%s", name), + ctx: context.Background(), } - - h.refreshRootListing() - - // Start background refresh - go h.backgroundRefresh() - return h } -func (h *Handler) backgroundRefresh() { - ticker := time.NewTicker(5 * time.Minute) - defer ticker.Stop() - - for range ticker.C { - h.refreshRootListing() - } -} - -func (h *Handler) refreshRootListing() { - h.refreshMutex.Lock() - defer h.refreshMutex.Unlock() - - if time.Since(h.lastRefresh) < time.Minute { - return - } - - torrents := h.cache.GetTorrentNames() - files := make([]os.FileInfo, 0, len(torrents)) - - for name, cachedTorrent := range torrents { - if cachedTorrent != nil && cachedTorrent.Torrent != nil { - files = append(files, &FileInfo{ - name: name, - size: 0, - mode: 0755 | os.ModeDir, - modTime: time.Now(), - isDir: true, - }) - } - } - - sort.Slice(files, func(i, j int) bool { - return files[i].Name() < files[j].Name() - }) - - h.rootListing.Store(files) - h.lastRefresh = time.Now() -} - -func (h *Handler) getParentRootPath() string { - return fmt.Sprintf("/webdav/%s", h.Name) -} - -func (h *Handler) getRootFileInfos() []os.FileInfo { - if listing := h.rootListing.Load(); listing != nil { - return listing.([]os.FileInfo) - } - return []os.FileInfo{} -} - // Mkdir implements webdav.FileSystem func (h *Handler) Mkdir(ctx context.Context, name string, perm os.FileMode) error { return os.ErrPermission // Read-only filesystem @@ -102,7 +50,27 @@ func (h *Handler) Mkdir(ctx context.Context, name string, perm os.FileMode) erro // RemoveAll implements webdav.FileSystem func (h *Handler) RemoveAll(ctx context.Context, name string) error { - return os.ErrPermission // Read-only filesystem + name = path.Clean("/" + name) + + rootDir := h.getParentRootPath() + + if name == rootDir { + return os.ErrPermission + } + + torrentName, filename := getName(rootDir, name) + cachedTorrent := h.cache.GetTorrentByName(torrentName) + if cachedTorrent == nil { + return os.ErrNotExist + } + + if filename == "" { + h.cache.GetClient().DeleteTorrent(cachedTorrent.Torrent) + go h.cache.refreshListings() + return nil + } + + return os.ErrPermission } // Rename implements webdav.FileSystem @@ -110,55 +78,124 @@ func (h *Handler) Rename(ctx context.Context, oldName, newName string) error { return os.ErrPermission // Read-only filesystem } +func (h *Handler) getParentRootPath() string { + return fmt.Sprintf("/webdav/%s", h.Name) +} + +func (h *Handler) getTorrentsFolders() []os.FileInfo { + return h.cache.GetListing() +} + +func (h *Handler) getParentFiles() []os.FileInfo { + now := time.Now() + rootFiles := []os.FileInfo{ + &FileInfo{ + name: "__all__", + size: 0, + mode: 0755 | os.ModeDir, + modTime: now, + isDir: true, + }, + &FileInfo{ + name: "torrents", + size: 0, + mode: 0755 | os.ModeDir, + modTime: now, + isDir: true, + }, + &FileInfo{ + name: "version.txt", + size: int64(len("v1.0.0")), + mode: 0644, + modTime: now, + isDir: false, + }, + } + return rootFiles +} + func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) { name = path.Clean("/" + name) + rootDir := h.getParentRootPath() - // Fast path for root directory - if name == h.getParentRootPath() { + // Fast path optimization with a map lookup instead of string comparisons + switch name { + case rootDir: return &File{ cache: h.cache, isDir: true, - children: h.getRootFileInfos(), + children: h.getParentFiles(), + name: "/", }, nil - } - - // Remove root directory from path - name = strings.TrimPrefix(name, h.getParentRootPath()) - name = strings.TrimPrefix(name, "/") - parts := strings.SplitN(name, "/", 2) - - // Get torrent from cache using sync.Map - cachedTorrent := h.cache.GetTorrentByName(parts[0]) - if cachedTorrent == nil { - h.logger.Debug().Msgf("Torrent not found: %s", parts[0]) - return nil, os.ErrNotExist - } - - if len(parts) == 1 { + case path.Join(rootDir, "version.txt"): return &File{ - cache: h.cache, - cachedTorrent: cachedTorrent, - isDir: true, - children: h.getTorrentFileInfos(cachedTorrent.Torrent), + cache: h.cache, + isDir: false, + content: []byte("v1.0.0"), + name: "version.txt", + size: int64(len("v1.0.0")), }, nil } - // Use a map for faster file lookup - fileMap := make(map[string]*torrent.File, len(cachedTorrent.Torrent.Files)) - for i := range cachedTorrent.Torrent.Files { - fileMap[cachedTorrent.Torrent.Files[i].Name] = &cachedTorrent.Torrent.Files[i] - } + // Single check for top-level folders + if name == path.Join(rootDir, "__all__") || name == path.Join(rootDir, "torrents") { + folderName := strings.TrimPrefix(name, rootDir) + folderName = strings.TrimPrefix(folderName, "/") + + // Only fetch the torrent folders once + children := h.getTorrentsFolders() - if file, ok := fileMap[parts[1]]; ok { return &File{ - cache: h.cache, - cachedTorrent: cachedTorrent, - file: file, - isDir: false, + cache: h.cache, + isDir: true, + children: children, + name: folderName, + size: 0, }, nil } - h.logger.Debug().Msgf("File not found: %s", name) + _path := strings.TrimPrefix(name, rootDir) + parts := strings.Split(strings.TrimPrefix(_path, "/"), "/") + + if len(parts) >= 2 && (parts[0] == "__all__" || parts[0] == "torrents") { + + torrentName := parts[1] + cachedTorrent := h.cache.GetTorrentByName(torrentName) + if cachedTorrent == nil { + h.logger.Debug().Msgf("Torrent not found: %s", torrentName) + return nil, os.ErrNotExist + } + + if len(parts) == 2 { + // Torrent folder level + return &File{ + cache: h.cache, + torrentId: cachedTorrent.Id, + isDir: true, + children: h.getFileInfos(cachedTorrent.Torrent), + name: cachedTorrent.Name, + size: cachedTorrent.Size, + }, nil + } + + // Torrent file level + filename := strings.Join(parts[2:], "/") + if file, ok := cachedTorrent.Files[filename]; ok { + fi := &File{ + cache: h.cache, + torrentId: cachedTorrent.Id, + fileId: file.Id, + isDir: false, + name: file.Name, + size: file.Size, + link: file.Link, + downloadLink: file.DownloadLink, + } + return fi, nil + } + } + + h.logger.Info().Msgf("File not found: %s", name) return nil, os.ErrNotExist } @@ -171,14 +208,15 @@ func (h *Handler) Stat(ctx context.Context, name string) (os.FileInfo, error) { return f.Stat() } -func (h *Handler) getTorrentFileInfos(torrent *torrent.Torrent) []os.FileInfo { +func (h *Handler) getFileInfos(torrent *torrent.Torrent) []os.FileInfo { files := make([]os.FileInfo, 0, len(torrent.Files)) + now := time.Now() for _, file := range torrent.Files { files = append(files, &FileInfo{ name: file.Name, size: file.Size, mode: 0644, - modTime: time.Now(), + modTime: now, isDir: false, }) } @@ -186,13 +224,124 @@ func (h *Handler) getTorrentFileInfos(torrent *torrent.Torrent) []os.FileInfo { } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Handle OPTIONS if r.Method == "OPTIONS" { w.WriteHeader(http.StatusOK) return } - // Create WebDAV handler + //Add specific PROPFIND optimization + if r.Method == "PROPFIND" { + propfindStart := time.Now() + + // Check if this is the slow path we identified + if strings.Contains(r.URL.Path, "__all__") { + // Fast path for this specific directory + depth := r.Header.Get("Depth") + if depth == "1" || depth == "" { + // This is a listing request + + // Use a cached response if available + cachedKey := "propfind_" + r.URL.Path + if cachedResponse, ok := h.responseCache.Load(cachedKey); ok { + responseData := cachedResponse.([]byte) + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(responseData))) + w.Write(responseData) + return + } + + // Otherwise process normally but cache the result + responseRecorder := httptest.NewRecorder() + + // Process the request with the standard handler + handler := &webdav.Handler{ + FileSystem: h, + LockSystem: webdav.NewMemLS(), + Logger: func(r *http.Request, err error) { + if err != nil { + h.logger.Error().Err(err).Msg("WebDAV error") + } + }, + } + handler.ServeHTTP(responseRecorder, r) + + // Cache the response for future requests + responseData := responseRecorder.Body.Bytes() + h.responseCache.Store(cachedKey, responseData) + + // Send to the real client + for k, v := range responseRecorder.Header() { + w.Header()[k] = v + } + w.WriteHeader(responseRecorder.Code) + w.Write(responseData) + return + } + } + + h.logger.Debug(). + Dur("propfind_prepare", time.Since(propfindStart)). + Msg("Proceeding with standard PROPFIND") + } + + // Check if this is a GET request for a file + if r.Method == "GET" { + openStart := time.Now() + f, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0) + if err != nil { + h.logger.Debug().Err(err).Str("path", r.URL.Path).Msg("Failed to open file") + http.NotFound(w, r) + return + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + h.logger.Error().Err(err).Msg("Failed to stat file") + http.Error(w, "Server Error", http.StatusInternalServerError) + return + } + + if fi.IsDir() { + dirStart := time.Now() + h.serveDirectory(w, r, f) + h.logger.Info(). + Dur("directory_time", time.Since(dirStart)). + Msg("Directory served") + return + } + + // For file requests, use http.ServeContent. + // Ensure f implements io.ReadSeeker. + rs, ok := f.(io.ReadSeeker) + if !ok { + // If not, read the entire file into memory as a fallback. + buf, err := io.ReadAll(f) + if err != nil { + h.logger.Error().Err(err).Msg("Failed to read file content") + http.Error(w, "Server Error", http.StatusInternalServerError) + return + } + rs = bytes.NewReader(buf) + } + + // Set Content-Type based on file name. + fileName := fi.Name() + contentType := getContentType(fileName) + w.Header().Set("Content-Type", contentType) + + // Serve the file with the correct modification time. + // http.ServeContent automatically handles Range requests. + http.ServeContent(w, r, fileName, fi.ModTime(), rs) + h.logger.Info(). + Dur("open_attempt_time", time.Since(openStart)). + Msg("Served file using ServeContent") + return + } + + // Default to standard WebDAV handler for other requests handler := &webdav.Handler{ FileSystem: h, LockSystem: webdav.NewMemLS(), @@ -207,19 +356,34 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { }, } - // Special handling for GET requests on directories - if r.Method == "GET" { - if f, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0); err == nil { - if fi, err := f.Stat(); err == nil && fi.IsDir() { - h.serveDirectory(w, r, f) - return - } - f.Close() - } - } handler.ServeHTTP(w, r) } +func getContentType(fileName string) string { + contentType := "application/octet-stream" + + // Determine content type based on file extension + switch { + case strings.HasSuffix(fileName, ".mp4"): + contentType = "video/mp4" + case strings.HasSuffix(fileName, ".mkv"): + contentType = "video/x-matroska" + case strings.HasSuffix(fileName, ".avi"): + contentType = "video/x-msvideo" + case strings.HasSuffix(fileName, ".mov"): + contentType = "video/quicktime" + case strings.HasSuffix(fileName, ".m4v"): + contentType = "video/x-m4v" + case strings.HasSuffix(fileName, ".ts"): + contentType = "video/mp2t" + case strings.HasSuffix(fileName, ".srt"): + contentType = "application/x-subrip" + case strings.HasSuffix(fileName, ".vtt"): + contentType = "text/vtt" + } + return contentType +} + func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file webdav.File) { var children []os.FileInfo if f, ok := file.(*File); ok { @@ -266,3 +430,51 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we return } } + +func (h *Handler) ioCopy(reader io.Reader, w io.Writer) (int64, error) { + // Start with a smaller initial buffer for faster first byte time + buffer := make([]byte, 8*1024) // 8KB initial buffer + written := int64(0) + + // First chunk needs to be delivered ASAP + firstChunk := true + + for { + n, err := reader.Read(buffer) + if n > 0 { + nw, ew := w.Write(buffer[:n]) + if ew != nil { + var opErr *net.OpError + if errors.As(ew, &opErr) && opErr.Err.Error() == "write: broken pipe" { + h.logger.Debug().Msg("Client closed connection (normal for streaming)") + } + break + } + written += int64(nw) + + // Flush immediately after first chunk, then less frequently + if firstChunk { + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + firstChunk = false + + // Increase buffer size after first chunk + buffer = make([]byte, 64*1024) // 512KB for subsequent reads + } else if written%(2*1024*1024) < int64(n) { // Flush every 2MB + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + } + } + + if err != nil { + if err != io.EOF { + h.logger.Error().Err(err).Msg("Error reading from file") + } + break + } + } + + return written, nil +} diff --git a/pkg/webdav/misc.go b/pkg/webdav/misc.go new file mode 100644 index 0000000..ebbaa7f --- /dev/null +++ b/pkg/webdav/misc.go @@ -0,0 +1,14 @@ +package webdav + +import "strings" + +// getName: Returns the torrent name and filename from the path +// /webdav/alldebrid/__all__/TorrentName +func getName(rootDir, path string) (string, string) { + path = strings.TrimPrefix(path, rootDir) + parts := strings.Split(strings.TrimPrefix(path, "/"), "/") + if len(parts) < 2 { + return "", "" + } + return parts[0], strings.Join(parts[1:], "/") +} diff --git a/pkg/webdav/webdav.go b/pkg/webdav/webdav.go index 6457974..ffa4394 100644 --- a/pkg/webdav/webdav.go +++ b/pkg/webdav/webdav.go @@ -9,7 +9,6 @@ import ( "github.com/sirrobot01/debrid-blackhole/pkg/service" "html/template" "net/http" - "os" "sync" ) @@ -23,8 +22,10 @@ func New() *WebDav { w := &WebDav{ Handlers: make([]*Handler, 0), } - for name, c := range svc.DebridCache.GetCaches() { - h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name), cfg.LogLevel, os.Stdout)) + debrids := svc.Debrid.GetDebrids() + cacheManager := NewCacheManager(debrids) + for name, c := range cacheManager.GetCaches() { + h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name), cfg.LogLevel)) w.Handlers = append(w.Handlers, h) } return w @@ -33,7 +34,7 @@ func New() *WebDav { func (wd *WebDav) Routes() http.Handler { chi.RegisterMethod("PROPFIND") chi.RegisterMethod("PROPPATCH") - chi.RegisterMethod("MKCOL") // Note: it was "MKOL" in your example, should be "MKCOL" + chi.RegisterMethod("MKCOL") chi.RegisterMethod("COPY") chi.RegisterMethod("MOVE") chi.RegisterMethod("LOCK") @@ -97,6 +98,7 @@ func (wd *WebDav) setupRootHandler(r chi.Router) { func (wd *WebDav) commonMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("DAV", "1, 2") + w.Header().Set("Cache-Control", "max-age=3600") w.Header().Set("Allow", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK") w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK") diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go index 1eb5be1..05dbeb7 100644 --- a/pkg/worker/worker.go +++ b/pkg/worker/worker.go @@ -6,7 +6,6 @@ import ( "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/pkg/service" - "os" "sync" "time" ) @@ -20,7 +19,7 @@ func getLogger() zerolog.Logger { once.Do(func() { cfg := config.GetConfig() - _logInstance = logger.NewLogger("worker", cfg.LogLevel, os.Stdout) + _logInstance = logger.NewLogger("worker", cfg.LogLevel) }) return _logInstance } From 0d178992ef951dd45aa9c49b34c01a0fa1f5d766 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Wed, 19 Mar 2025 03:08:22 +0100 Subject: [PATCH 03/39] Improve webdav; add workers for refreshes --- internal/logger/logger.go | 7 +- internal/request/request.go | 3 +- main.go | 14 +- pkg/debrid/alldebrid/alldebrid.go | 4 +- pkg/debrid/debrid_link/debrid_link.go | 4 +- pkg/debrid/realdebrid/realdebrid.go | 4 +- pkg/debrid/torbox/torbox.go | 2 +- pkg/proxy/proxy.go | 2 +- pkg/qbit/qbit.go | 2 +- pkg/repair/repair.go | 2 +- pkg/server/server.go | 3 +- pkg/web/server.go | 3 +- pkg/webdav/cache.go | 269 ++++++++++++++++++-------- pkg/webdav/file.go | 2 +- pkg/webdav/handler.go | 251 ++++++++++++++---------- pkg/webdav/webdav.go | 4 +- pkg/webdav/workers.go | 69 +++++++ pkg/worker/worker.go | 3 +- 18 files changed, 438 insertions(+), 210 deletions(-) create mode 100644 pkg/webdav/workers.go diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 2ced7a0..5ad3229 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -29,7 +29,9 @@ func GetLogPath() string { return filepath.Join(logsDir, "decypharr.log") } -func NewLogger(prefix string, level string) zerolog.Logger { +func NewLogger(prefix string) zerolog.Logger { + + level := config.GetConfig().LogLevel rotatingLogFile := &lumberjack.Logger{ Filename: GetLogPath(), @@ -86,8 +88,7 @@ func NewLogger(prefix string, level string) zerolog.Logger { func GetDefaultLogger() zerolog.Logger { once.Do(func() { - cfg := config.GetConfig() - logger = NewLogger("decypharr", cfg.LogLevel) + logger = NewLogger("decypharr") }) return logger } diff --git a/internal/request/request.go b/internal/request/request.go index bd6ae30..d59cdac 100644 --- a/internal/request/request.go +++ b/internal/request/request.go @@ -7,7 +7,6 @@ import ( "fmt" "github.com/goccy/go-json" "github.com/rs/zerolog" - "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "golang.org/x/time/rate" "io" @@ -227,7 +226,7 @@ func New(options ...ClientOption) *Client { http.StatusServiceUnavailable: true, http.StatusGatewayTimeout: true, }, - logger: logger.NewLogger("request", config.GetConfig().LogLevel), + logger: logger.NewLogger("request"), } // Apply options diff --git a/main.go b/main.go index 95ca8f4..3e3cfd0 100644 --- a/main.go +++ b/main.go @@ -5,6 +5,7 @@ import ( "flag" "github.com/sirrobot01/debrid-blackhole/cmd/decypharr" "github.com/sirrobot01/debrid-blackhole/internal/config" + "github.com/sirrobot01/debrid-blackhole/pkg/version" "log" "net/http" _ "net/http/pprof" // registers pprof handlers @@ -19,11 +20,14 @@ func main() { } }() - go func() { - if err := http.ListenAndServe(":6060", nil); err != nil { - log.Fatalf("pprof server failed: %v", err) - } - }() + if version.GetInfo().Channel == "dev" { + log.Println("Running in dev mode") + go func() { + if err := http.ListenAndServe(":6060", nil); err != nil { + log.Fatalf("pprof server failed: %v", err) + } + }() + } var configPath string flag.StringVar(&configPath, "config", "/data", "path to the data folder") flag.Parse() diff --git a/pkg/debrid/alldebrid/alldebrid.go b/pkg/debrid/alldebrid/alldebrid.go index 252f1d6..ae82df8 100644 --- a/pkg/debrid/alldebrid/alldebrid.go +++ b/pkg/debrid/alldebrid/alldebrid.go @@ -288,7 +288,7 @@ func New(dc config.Debrid) *AllDebrid { headers := map[string]string{ "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), } - _log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel) + _log := logger.NewLogger(dc.Name) client := request.New(). WithHeaders(headers). WithRateLimiter(rl).WithLogger(_log) @@ -299,7 +299,7 @@ func New(dc config.Debrid) *AllDebrid { DownloadUncached: dc.DownloadUncached, client: client, MountPath: dc.Folder, - logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel), + logger: logger.NewLogger(dc.Name), CheckCached: dc.CheckCached, } } diff --git a/pkg/debrid/debrid_link/debrid_link.go b/pkg/debrid/debrid_link/debrid_link.go index f287a59..a65b624 100644 --- a/pkg/debrid/debrid_link/debrid_link.go +++ b/pkg/debrid/debrid_link/debrid_link.go @@ -264,7 +264,7 @@ func New(dc config.Debrid) *DebridLink { "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), "Content-Type": "application/json", } - _log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel) + _log := logger.NewLogger(dc.Name) client := request.New(). WithHeaders(headers). WithRateLimiter(rl).WithLogger(_log) @@ -275,7 +275,7 @@ func New(dc config.Debrid) *DebridLink { DownloadUncached: dc.DownloadUncached, client: client, MountPath: dc.Folder, - logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel), + logger: logger.NewLogger(dc.Name), CheckCached: dc.CheckCached, } } diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index a83682f..adea14b 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -517,7 +517,7 @@ func New(dc config.Debrid) *RealDebrid { headers := map[string]string{ "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), } - _log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel) + _log := logger.NewLogger(dc.Name) client := request.New(). WithHeaders(headers). WithRateLimiter(rl).WithLogger(_log) @@ -528,7 +528,7 @@ func New(dc config.Debrid) *RealDebrid { DownloadUncached: dc.DownloadUncached, client: client, MountPath: dc.Folder, - logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel), + logger: logger.NewLogger(dc.Name), CheckCached: dc.CheckCached, } } diff --git a/pkg/debrid/torbox/torbox.go b/pkg/debrid/torbox/torbox.go index 688296c..4769c08 100644 --- a/pkg/debrid/torbox/torbox.go +++ b/pkg/debrid/torbox/torbox.go @@ -319,7 +319,7 @@ func New(dc config.Debrid) *Torbox { headers := map[string]string{ "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), } - _log := logger.NewLogger(dc.Name, config.GetConfig().LogLevel) + _log := logger.NewLogger(dc.Name) client := request.New(). WithHeaders(headers). WithRateLimiter(rl).WithLogger(_log) diff --git a/pkg/proxy/proxy.go b/pkg/proxy/proxy.go index a2bd768..ccc5f0a 100644 --- a/pkg/proxy/proxy.go +++ b/pkg/proxy/proxy.go @@ -88,7 +88,7 @@ func NewProxy() *Proxy { username: cfg.Username, password: cfg.Password, cachedOnly: cfg.CachedOnly, - logger: logger.NewLogger("proxy", cfg.LogLevel), + logger: logger.NewLogger("proxy"), } } diff --git a/pkg/qbit/qbit.go b/pkg/qbit/qbit.go index f860c5e..d0fce74 100644 --- a/pkg/qbit/qbit.go +++ b/pkg/qbit/qbit.go @@ -34,7 +34,7 @@ func New() *QBit { DownloadFolder: cfg.DownloadFolder, Categories: cfg.Categories, Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")), - logger: logger.NewLogger("qbit", _cfg.LogLevel), + logger: logger.NewLogger("qbit"), RefreshInterval: refreshInterval, SkipPreCache: cfg.SkipPreCache, } diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index af859f6..3365030 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -47,7 +47,7 @@ func New(arrs *arr.Storage) *Repair { } r := &Repair{ arrs: arrs, - logger: logger.NewLogger("repair", cfg.LogLevel), + logger: logger.NewLogger("repair"), duration: duration, runOnStart: cfg.Repair.RunOnStart, ZurgURL: cfg.Repair.ZurgURL, diff --git a/pkg/server/server.go b/pkg/server/server.go index 88f036c..47ef225 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -22,8 +22,7 @@ type Server struct { } func New() *Server { - cfg := config.GetConfig() - l := logger.NewLogger("http", cfg.LogLevel) + l := logger.NewLogger("http") r := chi.NewRouter() r.Use(middleware.Recoverer) r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static")))) diff --git a/pkg/web/server.go b/pkg/web/server.go index 56027d0..ba87d09 100644 --- a/pkg/web/server.go +++ b/pkg/web/server.go @@ -60,10 +60,9 @@ type Handler struct { } func New(qbit *qbit.QBit) *Handler { - cfg := config.GetConfig() return &Handler{ qbit: qbit, - logger: logger.NewLogger("ui", cfg.LogLevel), + logger: logger.NewLogger("ui"), } } diff --git a/pkg/webdav/cache.go b/pkg/webdav/cache.go index 50144fc..43a300b 100644 --- a/pkg/webdav/cache.go +++ b/pkg/webdav/cache.go @@ -7,10 +7,12 @@ import ( "github.com/dgraph-io/badger/v4" "github.com/goccy/go-json" "github.com/rs/zerolog" + "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "os" "path/filepath" "runtime" + "sort" "sync" "sync/atomic" "time" @@ -23,6 +25,11 @@ type DownloadLinkCache struct { Link string `json:"download_link"` } +type propfindResponse struct { + data []byte + ts time.Time +} + type CachedTorrent struct { *torrent.Torrent LastRead time.Time `json:"last_read"` @@ -39,27 +46,29 @@ type Cache struct { torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent listings atomic.Value downloadLinks map[string]string // key: file.Link, value: download link + propfindResp sync.Map workers int LastUpdated time.Time `json:"last_updated"` // refresh mutex - torrentsRefreshMutex sync.Mutex // for refreshing torrents - downloadLinksRefreshMutex sync.Mutex // for refreshing download links + listingRefreshMu sync.Mutex // for refreshing torrents + downloadLinksRefreshMu sync.Mutex // for refreshing download links + torrentsRefreshMu sync.Mutex // for refreshing torrents - // Mutexes + // Data Mutexes torrentsMutex sync.RWMutex // for torrents and torrentsNames - downloadLinksMutex sync.Mutex + downloadLinksMutex sync.Mutex // for downloadLinks } func (c *Cache) setTorrent(t *CachedTorrent) { c.torrentsMutex.Lock() - defer c.torrentsMutex.Unlock() c.torrents[t.Id] = t c.torrentsNames[t.Name] = t + c.torrentsMutex.Unlock() - c.refreshListings() + go c.refreshListings() // This is concurrent safe go func() { if err := c.SaveTorrent(t); err != nil { @@ -69,19 +78,31 @@ func (c *Cache) setTorrent(t *CachedTorrent) { } func (c *Cache) refreshListings() { - files := make([]os.FileInfo, 0, len(c.torrents)) - now := time.Now() + // Copy the current torrents to avoid concurrent issues + c.torrentsMutex.RLock() + torrents := make([]string, 0, len(c.torrents)) for _, t := range c.torrents { if t != nil && t.Torrent != nil { - files = append(files, &FileInfo{ - name: t.Name, - size: 0, - mode: 0755 | os.ModeDir, - modTime: now, - isDir: true, - }) + torrents = append(torrents, t.Name) } } + c.torrentsMutex.RUnlock() + + sort.Slice(torrents, func(i, j int) bool { + return torrents[i] < torrents[j] + }) + + files := make([]os.FileInfo, 0, len(torrents)) + now := time.Now() + for _, t := range torrents { + files = append(files, &FileInfo{ + name: t, + size: 0, + mode: 0755 | os.ModeDir, + modTime: now, + isDir: true, + }) + } // Atomic store of the complete ready-to-use slice c.listings.Store(files) } @@ -90,15 +111,16 @@ func (c *Cache) GetListing() []os.FileInfo { return c.listings.Load().([]os.FileInfo) } -func (c *Cache) setTorrents(torrents []*CachedTorrent) { +func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) { c.torrentsMutex.Lock() - defer c.torrentsMutex.Unlock() for _, t := range torrents { c.torrents[t.Id] = t c.torrentsNames[t.Name] = t } - go c.refreshListings() + c.torrentsMutex.Unlock() + + go c.refreshListings() // This is concurrent safe go func() { if err := c.SaveTorrents(); err != nil { @@ -148,13 +170,14 @@ func (m *Manager) GetCache(debridName string) *Cache { } func NewCache(client debrid.Client) *Cache { - dbPath := filepath.Join(config.GetConfig().Path, "cache", client.GetName()) + cfg := config.GetConfig() + dbPath := filepath.Join(cfg.Path, "cache", client.GetName()) return &Cache{ dir: dbPath, torrents: make(map[string]*CachedTorrent), torrentsNames: make(map[string]*CachedTorrent), client: client, - logger: client.GetLogger(), + logger: logger.NewLogger(fmt.Sprintf("%s-cache", client.GetName())), workers: 200, downloadLinks: make(map[string]string), } @@ -172,8 +195,8 @@ func (c *Cache) Start() error { // initial download links go func() { // lock download refresh mutex - c.downloadLinksRefreshMutex.Lock() - defer c.downloadLinksRefreshMutex.Unlock() + c.downloadLinksRefreshMu.Lock() + defer c.downloadLinksRefreshMu.Unlock() // This prevents the download links from being refreshed twice c.refreshDownloadLinks() }() @@ -195,8 +218,8 @@ func (c *Cache) Close() error { return nil } -func (c *Cache) load() ([]*CachedTorrent, error) { - torrents := make([]*CachedTorrent, 0) +func (c *Cache) load() (map[string]*CachedTorrent, error) { + torrents := make(map[string]*CachedTorrent) if err := os.MkdirAll(c.dir, 0755); err != nil { return torrents, fmt.Errorf("failed to create cache directory: %w", err) } @@ -225,7 +248,8 @@ func (c *Cache) load() ([]*CachedTorrent, error) { } if len(ct.Files) != 0 { // We can assume the torrent is complete - torrents = append(torrents, &ct) + ct.IsComplete = true + torrents[ct.Id] = &ct } } @@ -290,27 +314,48 @@ func (c *Cache) Sync() error { if err != nil { c.logger.Debug().Err(err).Msg("Failed to load cache") } - // Write these torrents to the cache - c.setTorrents(cachedTorrents) - c.logger.Info().Msgf("Loaded %d torrents from cache", len(cachedTorrents)) torrents, err := c.client.GetTorrents() - - c.logger.Info().Msgf("Got %d torrents from %s", len(torrents), c.client.GetName()) if err != nil { return fmt.Errorf("failed to sync torrents: %v", err) } - mewTorrents := make([]*torrent.Torrent, 0) + c.logger.Info().Msgf("Got %d torrents from %s", len(torrents), c.client.GetName()) + + newTorrents := make([]*torrent.Torrent, 0) + idStore := make(map[string]bool, len(torrents)) for _, t := range torrents { - if _, ok := c.torrents[t.Id]; !ok { - mewTorrents = append(mewTorrents, t) + idStore[t.Id] = true + if _, ok := cachedTorrents[t.Id]; !ok { + newTorrents = append(newTorrents, t) } } - c.logger.Info().Msgf("Found %d new torrents", len(mewTorrents)) - if len(mewTorrents) > 0 { - if err := c.sync(mewTorrents); err != nil { + // Check for deleted torrents + deletedTorrents := make([]string, 0) + for _, t := range cachedTorrents { + if _, ok := idStore[t.Id]; !ok { + deletedTorrents = append(deletedTorrents, t.Id) + } + } + + if len(deletedTorrents) > 0 { + c.logger.Info().Msgf("Found %d deleted torrents", len(deletedTorrents)) + for _, id := range deletedTorrents { + if _, ok := cachedTorrents[id]; ok { + delete(cachedTorrents, id) + c.removeFromDB(id) + } + } + } + + // Write these torrents to the cache + c.setTorrents(cachedTorrents) + c.logger.Info().Msgf("Loaded %d torrents from cache", len(cachedTorrents)) + + if len(newTorrents) > 0 { + c.logger.Info().Msgf("Found %d new torrents", len(newTorrents)) + if err := c.sync(newTorrents); err != nil { return fmt.Errorf("failed to sync torrents: %v", err) } } @@ -474,46 +519,6 @@ func (c *Cache) refreshTorrent(t *CachedTorrent) *CachedTorrent { return ct } -func (c *Cache) refreshListingWorker() { - c.logger.Info().Msg("WebDAV Background Refresh Worker started") - refreshTicker := time.NewTicker(10 * time.Second) - defer refreshTicker.Stop() - - for { - select { - case <-refreshTicker.C: - if c.torrentsRefreshMutex.TryLock() { - func() { - defer c.torrentsRefreshMutex.Unlock() - c.refreshListings() - }() - } else { - c.logger.Debug().Msg("Refresh already in progress") - } - } - } -} - -func (c *Cache) refreshDownloadLinksWorker() { - c.logger.Info().Msg("WebDAV Background Refresh Download Worker started") - refreshTicker := time.NewTicker(40 * time.Minute) - defer refreshTicker.Stop() - - for { - select { - case <-refreshTicker.C: - if c.downloadLinksRefreshMutex.TryLock() { - func() { - defer c.downloadLinksRefreshMutex.Unlock() - c.refreshDownloadLinks() - }() - } else { - c.logger.Debug().Msg("Refresh already in progress") - } - } - } -} - func (c *Cache) refreshDownloadLinks() map[string]string { c.downloadLinksMutex.Lock() defer c.downloadLinksMutex.Unlock() @@ -526,7 +531,6 @@ func (c *Cache) refreshDownloadLinks() map[string]string { for k, v := range downloadLinks { c.downloadLinks[k] = v.DownloadLink } - c.logger.Info().Msgf("Refreshed %d download links", len(downloadLinks)) return c.downloadLinks } @@ -534,9 +538,110 @@ func (c *Cache) GetClient() debrid.Client { return c.client } -func (c *Cache) Refresh() error { - // For now, we just want to refresh the listing - go c.refreshListingWorker() - go c.refreshDownloadLinksWorker() - return nil +func (c *Cache) refreshTorrents() { + c.torrentsMutex.RLock() + currentTorrents := c.torrents // + // Create a copy of the current torrents to avoid concurrent issues + torrents := make(map[string]string, len(currentTorrents)) // a mpa of id and name + for _, v := range currentTorrents { + torrents[v.Id] = v.Name + } + c.torrentsMutex.RUnlock() + + // Get new torrents from the debrid service + debTorrents, err := c.client.GetTorrents() + if err != nil { + c.logger.Debug().Err(err).Msg("Failed to get torrents") + return + } + + if len(debTorrents) == 0 { + // Maybe an error occurred + return + } + + // Get the newly added torrents only + newTorrents := make([]*torrent.Torrent, 0) + idStore := make(map[string]bool, len(debTorrents)) + for _, t := range debTorrents { + idStore[t.Id] = true + if _, ok := torrents[t.Id]; !ok { + newTorrents = append(newTorrents, t) + } + } + + // Check for deleted torrents + deletedTorrents := make([]string, 0) + for id, _ := range torrents { + if _, ok := idStore[id]; !ok { + deletedTorrents = append(deletedTorrents, id) + } + } + + if len(deletedTorrents) > 0 { + c.DeleteTorrent(deletedTorrents) + } + + if len(newTorrents) == 0 { + return + } + c.logger.Info().Msgf("Found %d new torrents", len(newTorrents)) + + // No need for a complex sync process, just add the new torrents + wg := sync.WaitGroup{} + wg.Add(len(newTorrents)) + for _, t := range newTorrents { + // processTorrent is concurrent safe + go func() { + defer wg.Done() + if err := c.processTorrent(t); err != nil { + c.logger.Info().Err(err).Msg("Failed to process torrent") + } + + }() + } + wg.Wait() +} + +func (c *Cache) DeleteTorrent(ids []string) { + c.logger.Info().Msgf("Deleting %d torrents", len(ids)) + c.torrentsMutex.Lock() + defer c.torrentsMutex.Unlock() + for _, id := range ids { + if t, ok := c.torrents[id]; ok { + delete(c.torrents, id) + delete(c.torrentsNames, t.Name) + c.removeFromDB(id) + } + } +} + +func (c *Cache) removeFromDB(torrentId string) { + filePath := filepath.Join(c.dir, torrentId+".json") + if err := os.Remove(filePath); err != nil { + c.logger.Debug().Err(err).Msgf("Failed to remove file: %s", filePath) + } +} + +func (c *Cache) resetPropfindResponse() { + // Right now, parents are hardcoded + parents := []string{"__all__", "torrents"} + // Reset only the parent directories + // Convert the parents to a keys + // This is a bit hacky, but it works + // Instead of deleting all the keys, we only delete the parent keys, e.g __all__/ or torrents/ + keys := make([]string, 0, len(parents)) + for _, p := range parents { + // Construct the key + // construct url + url := filepath.Join("/webdav/%s/%s", c.client.GetName(), p) + key0 := fmt.Sprintf("propfind:%s:0", url) + key1 := fmt.Sprintf("propfind:%s:1", url) + keys = append(keys, key0, key1) + } + + // Delete the keys + for _, k := range keys { + c.propfindResp.Delete(k) + } } diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 9df3409..6fdc586 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -138,7 +138,7 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { case io.SeekCurrent: newOffset = f.offset + offset case io.SeekEnd: - newOffset = f.size - offset + newOffset = f.size + offset default: return 0, os.ErrInvalid } diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index fa9b220..94d6656 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -15,6 +15,7 @@ import ( "net/http/httptest" "os" "path" + "slices" "strings" "sync" "time" @@ -52,7 +53,7 @@ func (h *Handler) Mkdir(ctx context.Context, name string, perm os.FileMode) erro func (h *Handler) RemoveAll(ctx context.Context, name string) error { name = path.Clean("/" + name) - rootDir := h.getParentRootPath() + rootDir := h.getRootPath() if name == rootDir { return os.ErrPermission @@ -67,6 +68,8 @@ func (h *Handler) RemoveAll(ctx context.Context, name string) error { if filename == "" { h.cache.GetClient().DeleteTorrent(cachedTorrent.Torrent) go h.cache.refreshListings() + go h.cache.refreshTorrents() + go h.cache.resetPropfindResponse() return nil } @@ -78,7 +81,7 @@ func (h *Handler) Rename(ctx context.Context, oldName, newName string) error { return os.ErrPermission // Read-only filesystem } -func (h *Handler) getParentRootPath() string { +func (h *Handler) getRootPath() string { return fmt.Sprintf("/webdav/%s", h.Name) } @@ -86,37 +89,33 @@ func (h *Handler) getTorrentsFolders() []os.FileInfo { return h.cache.GetListing() } +func (h *Handler) getParentItems() []string { + return []string{"__all__", "torrents", "version.txt"} +} + func (h *Handler) getParentFiles() []os.FileInfo { now := time.Now() - rootFiles := []os.FileInfo{ - &FileInfo{ - name: "__all__", + rootFiles := make([]os.FileInfo, 0, len(h.getParentItems())) + for _, item := range h.getParentItems() { + f := &FileInfo{ + name: item, size: 0, mode: 0755 | os.ModeDir, modTime: now, isDir: true, - }, - &FileInfo{ - name: "torrents", - size: 0, - mode: 0755 | os.ModeDir, - modTime: now, - isDir: true, - }, - &FileInfo{ - name: "version.txt", - size: int64(len("v1.0.0")), - mode: 0644, - modTime: now, - isDir: false, - }, + } + if item == "version.txt" { + f.isDir = false + f.size = int64(len("v1.0.0")) + } + rootFiles = append(rootFiles, f) } return rootFiles } func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) { name = path.Clean("/" + name) - rootDir := h.getParentRootPath() + rootDir := h.getRootPath() // Fast path optimization with a map lookup instead of string comparisons switch name { @@ -138,7 +137,7 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F } // Single check for top-level folders - if name == path.Join(rootDir, "__all__") || name == path.Join(rootDir, "torrents") { + if h.isParentPath(name) { folderName := strings.TrimPrefix(name, rootDir) folderName = strings.TrimPrefix(folderName, "/") @@ -157,7 +156,7 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F _path := strings.TrimPrefix(name, rootDir) parts := strings.Split(strings.TrimPrefix(_path, "/"), "/") - if len(parts) >= 2 && (parts[0] == "__all__" || parts[0] == "torrents") { + if len(parts) >= 2 && (slices.Contains(h.getParentItems(), parts[0])) { torrentName := parts[1] cachedTorrent := h.cache.GetTorrentByName(torrentName) @@ -224,71 +223,76 @@ func (h *Handler) getFileInfos(torrent *torrent.Torrent) []os.FileInfo { } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Handle OPTIONS if r.Method == "OPTIONS" { w.WriteHeader(http.StatusOK) return } - //Add specific PROPFIND optimization + // Cache PROPFIND responses for a short time to reduce load. if r.Method == "PROPFIND" { - propfindStart := time.Now() + // Determine the Depth; default to "1" if not provided. + depth := r.Header.Get("Depth") + if depth == "" { + depth = "1" + } + // Use both path and Depth header to form the cache key. + cacheKey := fmt.Sprintf("propfind:%s:%s", r.URL.Path, depth) - // Check if this is the slow path we identified - if strings.Contains(r.URL.Path, "__all__") { - // Fast path for this specific directory - depth := r.Header.Get("Depth") - if depth == "1" || depth == "" { - // This is a listing request + // Determine TTL based on the requested folder: + // - If the path is exactly the parent folder (which changes frequently), + // use a short TTL. + // - Otherwise, for deeper (torrent folder) paths, use a longer TTL. + var ttl time.Duration + if h.isParentPath(r.URL.Path) { + ttl = 10 * time.Second + } else { + ttl = 1 * time.Minute + } - // Use a cached response if available - cachedKey := "propfind_" + r.URL.Path - if cachedResponse, ok := h.responseCache.Load(cachedKey); ok { - responseData := cachedResponse.([]byte) + // Check if we have a cached response that hasn't expired. + if cached, ok := h.cache.propfindResp.Load(cacheKey); ok { + if respCache, ok := cached.(propfindResponse); ok { + if time.Since(respCache.ts) < ttl { w.Header().Set("Content-Type", "application/xml; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprintf("%d", len(responseData))) - w.Write(responseData) + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(respCache.data))) + w.Write(respCache.data) return } - - // Otherwise process normally but cache the result - responseRecorder := httptest.NewRecorder() - - // Process the request with the standard handler - handler := &webdav.Handler{ - FileSystem: h, - LockSystem: webdav.NewMemLS(), - Logger: func(r *http.Request, err error) { - if err != nil { - h.logger.Error().Err(err).Msg("WebDAV error") - } - }, - } - handler.ServeHTTP(responseRecorder, r) - - // Cache the response for future requests - responseData := responseRecorder.Body.Bytes() - h.responseCache.Store(cachedKey, responseData) - - // Send to the real client - for k, v := range responseRecorder.Header() { - w.Header()[k] = v - } - w.WriteHeader(responseRecorder.Code) - w.Write(responseData) - return } } - h.logger.Debug(). - Dur("propfind_prepare", time.Since(propfindStart)). - Msg("Proceeding with standard PROPFIND") + // No valid cache entry; process the PROPFIND request. + responseRecorder := httptest.NewRecorder() + handler := &webdav.Handler{ + FileSystem: h, + LockSystem: webdav.NewMemLS(), + Logger: func(r *http.Request, err error) { + if err != nil { + h.logger.Error().Err(err).Msg("WebDAV error") + } + }, + } + handler.ServeHTTP(responseRecorder, r) + responseData := responseRecorder.Body.Bytes() + + // Store the new response in the cache. + h.cache.propfindResp.Store(cacheKey, propfindResponse{ + data: responseData, + ts: time.Now(), + }) + + // Forward the captured response to the client. + for k, v := range responseRecorder.Header() { + w.Header()[k] = v + } + w.WriteHeader(responseRecorder.Code) + w.Write(responseData) + return } - // Check if this is a GET request for a file + // Handle GET requests for file/directory content if r.Method == "GET" { - openStart := time.Now() f, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0) if err != nil { h.logger.Debug().Err(err).Str("path", r.URL.Path).Msg("Failed to open file") @@ -304,17 +308,12 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } + // If the target is a directory, use your directory listing logic. if fi.IsDir() { - dirStart := time.Now() h.serveDirectory(w, r, f) - h.logger.Info(). - Dur("directory_time", time.Since(dirStart)). - Msg("Directory served") return } - // For file requests, use http.ServeContent. - // Ensure f implements io.ReadSeeker. rs, ok := f.(io.ReadSeeker) if !ok { // If not, read the entire file into memory as a fallback. @@ -326,8 +325,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } rs = bytes.NewReader(buf) } - - // Set Content-Type based on file name. fileName := fi.Name() contentType := getContentType(fileName) w.Header().Set("Content-Type", contentType) @@ -335,13 +332,62 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Serve the file with the correct modification time. // http.ServeContent automatically handles Range requests. http.ServeContent(w, r, fileName, fi.ModTime(), rs) - h.logger.Info(). - Dur("open_attempt_time", time.Since(openStart)). - Msg("Served file using ServeContent") + + // Set headers to indicate support for range requests and content type. + //fileName := fi.Name() + //w.Header().Set("Accept-Ranges", "bytes") + //w.Header().Set("Content-Type", getContentType(fileName)) + // + //// If a Range header is provided, parse and handle partial content. + //rangeHeader := r.Header.Get("Range") + //if rangeHeader != "" { + // parts := strings.Split(strings.TrimPrefix(rangeHeader, "bytes="), "-") + // if len(parts) == 2 { + // start, startErr := strconv.ParseInt(parts[0], 10, 64) + // end := fi.Size() - 1 + // if parts[1] != "" { + // var endErr error + // end, endErr = strconv.ParseInt(parts[1], 10, 64) + // if endErr != nil { + // end = fi.Size() - 1 + // } + // } + // + // if startErr == nil && start < fi.Size() { + // if start > end { + // start, end = end, start + // } + // if end >= fi.Size() { + // end = fi.Size() - 1 + // } + // + // contentLength := end - start + 1 + // w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, fi.Size())) + // w.Header().Set("Content-Length", fmt.Sprintf("%d", contentLength)) + // w.WriteHeader(http.StatusPartialContent) + // + // // Attempt to cast to your concrete File type to call Seek. + // if file, ok := f.(*File); ok { + // _, err = file.Seek(start, io.SeekStart) + // if err != nil { + // h.logger.Error().Err(err).Msg("Failed to seek in file") + // http.Error(w, "Server Error", http.StatusInternalServerError) + // return + // } + // + // limitedReader := io.LimitReader(f, contentLength) + // h.ioCopy(limitedReader, w) + // return + // } + // } + // } + //} + //w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size())) + //h.ioCopy(f, w) return } - // Default to standard WebDAV handler for other requests + // Fallback: for other methods, use the standard WebDAV handler. handler := &webdav.Handler{ FileSystem: h, LockSystem: webdav.NewMemLS(), @@ -355,7 +401,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } }, } - handler.ServeHTTP(w, r) } @@ -384,6 +429,17 @@ func getContentType(fileName string) string { return contentType } +func (h *Handler) isParentPath(_path string) bool { + rootPath := h.getRootPath() + parents := h.getParentItems() + for _, p := range parents { + if _path == path.Join(rootPath, p) { + return true + } + } + return false +} + func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file webdav.File) { var children []os.FileInfo if f, ok := file.(*File); ok { @@ -432,36 +488,35 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we } func (h *Handler) ioCopy(reader io.Reader, w io.Writer) (int64, error) { - // Start with a smaller initial buffer for faster first byte time - buffer := make([]byte, 8*1024) // 8KB initial buffer - written := int64(0) - - // First chunk needs to be delivered ASAP + // Start with a smaller buffer for faster first byte delivery. + buf := make([]byte, 4*1024) // 8KB initial buffer + totalWritten := int64(0) firstChunk := true for { - n, err := reader.Read(buffer) + n, err := reader.Read(buf) if n > 0 { - nw, ew := w.Write(buffer[:n]) + nw, ew := w.Write(buf[:n]) if ew != nil { var opErr *net.OpError if errors.As(ew, &opErr) && opErr.Err.Error() == "write: broken pipe" { h.logger.Debug().Msg("Client closed connection (normal for streaming)") + return totalWritten, ew } - break + return totalWritten, ew } - written += int64(nw) + totalWritten += int64(nw) - // Flush immediately after first chunk, then less frequently + // Flush immediately after the first chunk. if firstChunk { if flusher, ok := w.(http.Flusher); ok { flusher.Flush() } firstChunk = false - - // Increase buffer size after first chunk - buffer = make([]byte, 64*1024) // 512KB for subsequent reads - } else if written%(2*1024*1024) < int64(n) { // Flush every 2MB + // Increase buffer size for subsequent reads. + buf = make([]byte, 512*1024) // 64KB buffer after first chunk + } else if totalWritten%(2*1024*1024) < int64(n) { + // Flush roughly every 2MB of data transferred. if flusher, ok := w.(http.Flusher); ok { flusher.Flush() } @@ -476,5 +531,5 @@ func (h *Handler) ioCopy(reader io.Reader, w io.Writer) (int64, error) { } } - return written, nil + return totalWritten, nil } diff --git a/pkg/webdav/webdav.go b/pkg/webdav/webdav.go index ffa4394..1d108ae 100644 --- a/pkg/webdav/webdav.go +++ b/pkg/webdav/webdav.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "github.com/go-chi/chi/v5" - "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/pkg/service" "html/template" @@ -18,14 +17,13 @@ type WebDav struct { func New() *WebDav { svc := service.GetService() - cfg := config.GetConfig() w := &WebDav{ Handlers: make([]*Handler, 0), } debrids := svc.Debrid.GetDebrids() cacheManager := NewCacheManager(debrids) for name, c := range cacheManager.GetCaches() { - h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name), cfg.LogLevel)) + h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name))) w.Handlers = append(w.Handlers, h) } return w diff --git a/pkg/webdav/workers.go b/pkg/webdav/workers.go new file mode 100644 index 0000000..376e1e9 --- /dev/null +++ b/pkg/webdav/workers.go @@ -0,0 +1,69 @@ +package webdav + +import "time" + +func (c *Cache) Refresh() error { + // For now, we just want to refresh the listing and download links + c.logger.Info().Msg("Starting cache refresh workers") + go c.refreshListingWorker() + go c.refreshDownloadLinksWorker() + go c.refreshTorrentsWorker() + return nil +} + +func (c *Cache) refreshListingWorker() { + refreshTicker := time.NewTicker(10 * time.Second) + defer refreshTicker.Stop() + + for { + select { + case <-refreshTicker.C: + if c.listingRefreshMu.TryLock() { + func() { + defer c.listingRefreshMu.Unlock() + c.refreshListings() + }() + } else { + c.logger.Debug().Msg("Refresh already in progress") + } + } + } +} + +func (c *Cache) refreshDownloadLinksWorker() { + refreshTicker := time.NewTicker(40 * time.Minute) + defer refreshTicker.Stop() + + for { + select { + case <-refreshTicker.C: + if c.downloadLinksRefreshMu.TryLock() { + func() { + defer c.downloadLinksRefreshMu.Unlock() + c.refreshDownloadLinks() + }() + } else { + c.logger.Debug().Msg("Refresh already in progress") + } + } + } +} + +func (c *Cache) refreshTorrentsWorker() { + refreshTicker := time.NewTicker(5 * time.Second) + defer refreshTicker.Stop() + + for { + select { + case <-refreshTicker.C: + if c.listingRefreshMu.TryLock() { + func() { + defer c.listingRefreshMu.Unlock() + c.refreshTorrents() + }() + } else { + c.logger.Debug().Msg("Refresh already in progress") + } + } + } +} diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go index 05dbeb7..884b82c 100644 --- a/pkg/worker/worker.go +++ b/pkg/worker/worker.go @@ -18,8 +18,7 @@ var ( func getLogger() zerolog.Logger { once.Do(func() { - cfg := config.GetConfig() - _logInstance = logger.NewLogger("worker", cfg.LogLevel) + _logInstance = logger.NewLogger("worker") }) return _logInstance } From 50c775ca747129c1b5ac6498f210ce9ca34aa2eb Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Wed, 19 Mar 2025 05:31:36 +0100 Subject: [PATCH 04/39] Fix naming to accurately depict zurg --- internal/utils/regex.go | 1 + pkg/debrid/realdebrid/realdebrid.go | 4 ++-- pkg/webdav/cache.go | 7 ++++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/internal/utils/regex.go b/internal/utils/regex.go index 9b7d03a..e9cce5e 100644 --- a/internal/utils/regex.go +++ b/internal/utils/regex.go @@ -37,6 +37,7 @@ func RemoveInvalidChars(value string) string { } func RemoveExtension(value string) string { + value = RemoveInvalidChars(value) re := regexp.MustCompile(VIDEOMATCH + "|" + SAMPLEMATCH + "|" + MUSICMATCH) // Find the last index of the matched extension diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index adea14b..c018ca6 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -173,7 +173,7 @@ func (r *RealDebrid) UpdateTorrent(t *torrent.Torrent) error { if err != nil { return err } - name := utils.RemoveInvalidChars(data.OriginalFilename) + name := utils.RemoveExtension(data.OriginalFilename) t.Name = name t.Bytes = data.Bytes t.Folder = name @@ -379,7 +379,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*torrent.Torrent } torrents = append(torrents, &torrent.Torrent{ Id: t.Id, - Name: t.Filename, + Name: utils.RemoveInvalidChars(t.Filename), Bytes: t.Bytes, Progress: t.Progress, Status: t.Status, diff --git a/pkg/webdav/cache.go b/pkg/webdav/cache.go index 43a300b..035637b 100644 --- a/pkg/webdav/cache.go +++ b/pkg/webdav/cache.go @@ -8,6 +8,7 @@ import ( "github.com/goccy/go-json" "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/logger" + "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "os" "path/filepath" @@ -108,7 +109,10 @@ func (c *Cache) refreshListings() { } func (c *Cache) GetListing() []os.FileInfo { - return c.listings.Load().([]os.FileInfo) + if v, ok := c.listings.Load().([]os.FileInfo); ok { + return v + } + return nil } func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) { @@ -249,6 +253,7 @@ func (c *Cache) load() (map[string]*CachedTorrent, error) { if len(ct.Files) != 0 { // We can assume the torrent is complete ct.IsComplete = true + ct.Torrent.Name = utils.RemoveExtension(ct.Torrent.Filename) // Update the name torrents[ct.Id] = &ct } } From 0c68364a6a95d385a9d0f0d4c670d571edfd121d Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Thu, 20 Mar 2025 10:42:51 +0100 Subject: [PATCH 05/39] Improvements: - An improvised caching for stats; using metadata on ls - Integrated into the downloading system - Fix minor bugs noticed - Still experiemental, sike --- internal/config/config.go | 3 +- pkg/debrid/alldebrid/alldebrid.go | 26 +-- pkg/debrid/debrid.go | 105 ---------- pkg/{webdav => debrid/debrid}/cache.go | 255 +++++------------------ pkg/debrid/debrid/debrid.go | 98 +++++++-- pkg/debrid/debrid/engine.go | 51 +++++ pkg/debrid/debrid/misc.go | 10 + pkg/debrid/debrid/refresh.go | 205 ++++++++++++++++++ pkg/debrid/debrid/workers.go | 35 ++++ pkg/debrid/debrid_link/debrid_link.go | 24 +-- pkg/debrid/engine/engine.go | 30 --- pkg/debrid/realdebrid/realdebrid.go | 53 ++--- pkg/debrid/torbox/torbox.go | 22 +- pkg/debrid/types/debrid.go | 23 ++ pkg/debrid/{torrent => types}/torrent.go | 2 +- pkg/qbit/downloader.go | 17 +- pkg/qbit/import.go | 2 +- pkg/qbit/torrent.go | 54 ++++- pkg/qbit/types.go | 10 +- pkg/repair/repair.go | 4 +- pkg/service/service.go | 11 +- pkg/webdav/file.go | 28 ++- pkg/webdav/handler.go | 194 +++++++++-------- pkg/webdav/misc.go | 16 +- pkg/webdav/webdav.go | 4 +- pkg/webdav/workers.go | 69 ------ 26 files changed, 715 insertions(+), 636 deletions(-) delete mode 100644 pkg/debrid/debrid.go rename pkg/{webdav => debrid/debrid}/cache.go (65%) create mode 100644 pkg/debrid/debrid/engine.go create mode 100644 pkg/debrid/debrid/misc.go create mode 100644 pkg/debrid/debrid/refresh.go create mode 100644 pkg/debrid/debrid/workers.go delete mode 100644 pkg/debrid/engine/engine.go create mode 100644 pkg/debrid/types/debrid.go rename pkg/debrid/{torrent => types}/torrent.go (99%) delete mode 100644 pkg/webdav/workers.go diff --git a/internal/config/config.go b/internal/config/config.go index 30a2090..dd6b7bf 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -23,6 +23,7 @@ type Debrid struct { DownloadUncached bool `json:"download_uncached"` CheckCached bool `json:"check_cached"` RateLimit string `json:"rate_limit"` // 200/minute or 10/second + EnableWebDav bool `json:"enable_webdav"` } type Proxy struct { @@ -174,7 +175,7 @@ func validateQbitTorrent(config *QBitTorrent) error { return errors.New("qbittorent download folder is required") } if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) { - return errors.New("qbittorent download folder does not exist") + return fmt.Errorf("qbittorent download folder(%s) does not exist", config.DownloadFolder) } return nil } diff --git a/pkg/debrid/alldebrid/alldebrid.go b/pkg/debrid/alldebrid/alldebrid.go index ae82df8..c1f55bb 100644 --- a/pkg/debrid/alldebrid/alldebrid.go +++ b/pkg/debrid/alldebrid/alldebrid.go @@ -8,7 +8,7 @@ import ( "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "slices" "time" @@ -47,7 +47,7 @@ func (ad *AllDebrid) IsAvailable(hashes []string) map[string]bool { return result } -func (ad *AllDebrid) SubmitMagnet(torrent *torrent.Torrent) (*torrent.Torrent, error) { +func (ad *AllDebrid) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) { url := fmt.Sprintf("%s/magnet/upload", ad.Host) query := gourl.Values{} query.Add("magnets[]", torrent.Magnet.Link) @@ -84,8 +84,8 @@ func getAlldebridStatus(statusCode int) string { } } -func flattenFiles(files []MagnetFile, parentPath string, index *int) map[string]torrent.File { - result := make(map[string]torrent.File) +func flattenFiles(files []MagnetFile, parentPath string, index *int) map[string]types.File { + result := make(map[string]types.File) cfg := config.GetConfig() @@ -123,7 +123,7 @@ func flattenFiles(files []MagnetFile, parentPath string, index *int) map[string] } *index++ - file := torrent.File{ + file := types.File{ Id: strconv.Itoa(*index), Name: fileName, Size: f.Size, @@ -136,7 +136,7 @@ func flattenFiles(files []MagnetFile, parentPath string, index *int) map[string] return result } -func (ad *AllDebrid) UpdateTorrent(t *torrent.Torrent) error { +func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error { url := fmt.Sprintf("%s/magnet/status?id=%s", ad.Host, t.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := ad.client.MakeRequest(req) @@ -172,7 +172,7 @@ func (ad *AllDebrid) UpdateTorrent(t *torrent.Torrent) error { return nil } -func (ad *AllDebrid) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { +func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) { for { err := ad.UpdateTorrent(torrent) @@ -204,7 +204,7 @@ func (ad *AllDebrid) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*tor return torrent, nil } -func (ad *AllDebrid) DeleteTorrent(torrent *torrent.Torrent) { +func (ad *AllDebrid) DeleteTorrent(torrent *types.Torrent) { url := fmt.Sprintf("%s/magnet/delete?id=%s", ad.Host, torrent.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) _, err := ad.client.MakeRequest(req) @@ -215,7 +215,7 @@ func (ad *AllDebrid) DeleteTorrent(torrent *torrent.Torrent) { } } -func (ad *AllDebrid) GenerateDownloadLinks(t *torrent.Torrent) error { +func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error { for _, file := range t.Files { url := fmt.Sprintf("%s/link/unlock", ad.Host) query := gourl.Values{} @@ -239,7 +239,7 @@ func (ad *AllDebrid) GenerateDownloadLinks(t *torrent.Torrent) error { return nil } -func (ad *AllDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File { +func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) *types.File { url := fmt.Sprintf("%s/link/unlock", ad.Host) query := gourl.Values{} query.Add("link", file.Link) @@ -263,11 +263,11 @@ func (ad *AllDebrid) GetCheckCached() bool { return ad.CheckCached } -func (ad *AllDebrid) GetTorrents() ([]*torrent.Torrent, error) { +func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) { return nil, nil } -func (ad *AllDebrid) GetDownloads() (map[string]torrent.DownloadLinks, error) { +func (ad *AllDebrid) GetDownloads() (map[string]types.DownloadLinks, error) { return nil, nil } @@ -279,7 +279,7 @@ func (ad *AllDebrid) GetDownloadUncached() bool { return ad.DownloadUncached } -func (ad *AllDebrid) ConvertLinksToFiles(links []string) []torrent.File { +func (ad *AllDebrid) ConvertLinksToFiles(links []string) []types.File { return nil } diff --git a/pkg/debrid/debrid.go b/pkg/debrid/debrid.go deleted file mode 100644 index 02073f9..0000000 --- a/pkg/debrid/debrid.go +++ /dev/null @@ -1,105 +0,0 @@ -package debrid - -import ( - "fmt" - "github.com/sirrobot01/debrid-blackhole/internal/config" - "github.com/sirrobot01/debrid-blackhole/internal/utils" - "github.com/sirrobot01/debrid-blackhole/pkg/arr" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/alldebrid" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid_link" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/realdebrid" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torbox" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" -) - -func New() *engine.Engine { - cfg := config.GetConfig() - debrids := make([]debrid.Client, 0) - - for _, dc := range cfg.Debrids { - client := createDebridClient(dc) - logger := client.GetLogger() - logger.Info().Msg("Debrid Service started") - debrids = append(debrids, client) - } - d := &engine.Engine{ - Debrids: debrids, - LastUsed: 0, - } - return d -} - -func createDebridClient(dc config.Debrid) debrid.Client { - switch dc.Name { - case "realdebrid": - return realdebrid.New(dc) - case "torbox": - return torbox.New(dc) - case "debridlink": - return debrid_link.New(dc) - case "alldebrid": - return alldebrid.New(dc) - default: - return realdebrid.New(dc) - } -} - -func ProcessTorrent(d *engine.Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*torrent.Torrent, error) { - - debridTorrent := &torrent.Torrent{ - InfoHash: magnet.InfoHash, - Magnet: magnet, - Name: magnet.Name, - Arr: a, - Size: magnet.Size, - Files: make(map[string]torrent.File), - } - - errs := make([]error, 0) - - for index, db := range d.Debrids { - logger := db.GetLogger() - logger.Info().Msgf("Processing debrid: %s", db.GetName()) - - // Override first, arr second, debrid third - - if overrideDownloadUncached { - debridTorrent.DownloadUncached = true - } else if a.DownloadUncached != nil { - // Arr cached is set - debridTorrent.DownloadUncached = *a.DownloadUncached - } else { - debridTorrent.DownloadUncached = db.GetDownloadUncached() - } - - logger.Info().Msgf("Torrent Hash: %s", debridTorrent.InfoHash) - if db.GetCheckCached() { - hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash] - if !exists || !hash { - logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name) - continue - } else { - logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name) - } - } - - dbt, err := db.SubmitMagnet(debridTorrent) - if dbt != nil { - dbt.Arr = a - } - if err != nil || dbt == nil || dbt.Id == "" { - errs = append(errs, err) - continue - } - logger.Info().Msgf("Torrent: %s(id=%s) submitted to %s", dbt.Name, dbt.Id, db.GetName()) - d.LastUsed = index - return db.CheckStatus(dbt, isSymlink) - } - err := fmt.Errorf("failed to process torrent") - for _, e := range errs { - err = fmt.Errorf("%w\n%w", err, e) - } - return nil, err -} diff --git a/pkg/webdav/cache.go b/pkg/debrid/debrid/cache.go similarity index 65% rename from pkg/webdav/cache.go rename to pkg/debrid/debrid/cache.go index 035637b..abcc804 100644 --- a/pkg/webdav/cache.go +++ b/pkg/debrid/debrid/cache.go @@ -1,53 +1,50 @@ -package webdav +package debrid import ( "bufio" "context" "fmt" - "github.com/dgraph-io/badger/v4" "github.com/goccy/go-json" "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/utils" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "os" "path/filepath" "runtime" - "sort" "sync" "sync/atomic" "time" "github.com/sirrobot01/debrid-blackhole/internal/config" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" ) type DownloadLinkCache struct { Link string `json:"download_link"` } -type propfindResponse struct { - data []byte - ts time.Time +type PropfindResponse struct { + Data []byte + GzippedData []byte + Ts time.Time } type CachedTorrent struct { - *torrent.Torrent + *types.Torrent LastRead time.Time `json:"last_read"` IsComplete bool `json:"is_complete"` } type Cache struct { dir string - client debrid.Client - db *badger.DB + client types.Client logger zerolog.Logger torrents map[string]*CachedTorrent // key: torrent.Id, value: *CachedTorrent torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent listings atomic.Value downloadLinks map[string]string // key: file.Link, value: download link - propfindResp sync.Map + PropfindResp sync.Map workers int @@ -63,13 +60,28 @@ type Cache struct { downloadLinksMutex sync.Mutex // for downloadLinks } +type fileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +func (fi *fileInfo) Name() string { return fi.name } +func (fi *fileInfo) Size() int64 { return fi.size } +func (fi *fileInfo) Mode() os.FileMode { return fi.mode } +func (fi *fileInfo) ModTime() time.Time { return fi.modTime } +func (fi *fileInfo) IsDir() bool { return fi.isDir } +func (fi *fileInfo) Sys() interface{} { return nil } + func (c *Cache) setTorrent(t *CachedTorrent) { c.torrentsMutex.Lock() c.torrents[t.Id] = t c.torrentsNames[t.Name] = t c.torrentsMutex.Unlock() - go c.refreshListings() // This is concurrent safe + tryLock(&c.listingRefreshMu, c.refreshListings) go func() { if err := c.SaveTorrent(t); err != nil { @@ -78,36 +90,6 @@ func (c *Cache) setTorrent(t *CachedTorrent) { }() } -func (c *Cache) refreshListings() { - // Copy the current torrents to avoid concurrent issues - c.torrentsMutex.RLock() - torrents := make([]string, 0, len(c.torrents)) - for _, t := range c.torrents { - if t != nil && t.Torrent != nil { - torrents = append(torrents, t.Name) - } - } - c.torrentsMutex.RUnlock() - - sort.Slice(torrents, func(i, j int) bool { - return torrents[i] < torrents[j] - }) - - files := make([]os.FileInfo, 0, len(torrents)) - now := time.Now() - for _, t := range torrents { - files = append(files, &FileInfo{ - name: t, - size: 0, - mode: 0755 | os.ModeDir, - modTime: now, - isDir: true, - }) - } - // Atomic store of the complete ready-to-use slice - c.listings.Store(files) -} - func (c *Cache) GetListing() []os.FileInfo { if v, ok := c.listings.Load().([]os.FileInfo); ok { return v @@ -124,7 +106,7 @@ func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) { c.torrentsMutex.Unlock() - go c.refreshListings() // This is concurrent safe + tryLock(&c.listingRefreshMu, c.refreshListings) go func() { if err := c.SaveTorrents(); err != nil { @@ -149,31 +131,7 @@ func (c *Cache) GetTorrentNames() map[string]*CachedTorrent { return c.torrentsNames } -type Manager struct { - caches map[string]*Cache -} - -func NewCacheManager(clients []debrid.Client) *Manager { - m := &Manager{ - caches: make(map[string]*Cache), - } - - for _, client := range clients { - m.caches[client.GetName()] = NewCache(client) - } - - return m -} - -func (m *Manager) GetCaches() map[string]*Cache { - return m.caches -} - -func (m *Manager) GetCache(debridName string) *Cache { - return m.caches[debridName] -} - -func NewCache(client debrid.Client) *Cache { +func NewCache(client types.Client) *Cache { cfg := config.GetConfig() dbPath := filepath.Join(cfg.Path, "cache", client.GetName()) return &Cache{ @@ -202,7 +160,7 @@ func (c *Cache) Start() error { c.downloadLinksRefreshMu.Lock() defer c.downloadLinksRefreshMu.Unlock() // This prevents the download links from being refreshed twice - c.refreshDownloadLinks() + tryLock(&c.downloadLinksRefreshMu, c.refreshDownloadLinks) }() go func() { @@ -216,9 +174,6 @@ func (c *Cache) Start() error { } func (c *Cache) Close() error { - if c.db != nil { - return c.db.Close() - } return nil } @@ -327,7 +282,7 @@ func (c *Cache) Sync() error { c.logger.Info().Msgf("Got %d torrents from %s", len(torrents), c.client.GetName()) - newTorrents := make([]*torrent.Torrent, 0) + newTorrents := make([]*types.Torrent, 0) idStore := make(map[string]bool, len(torrents)) for _, t := range torrents { idStore[t.Id] = true @@ -368,12 +323,12 @@ func (c *Cache) Sync() error { return nil } -func (c *Cache) sync(torrents []*torrent.Torrent) error { +func (c *Cache) sync(torrents []*types.Torrent) error { // Calculate optimal workers - balance between CPU and IO workers := runtime.NumCPU() * 50 // A more balanced multiplier for BadgerDB // Create channels with appropriate buffering - workChan := make(chan *torrent.Torrent, workers*2) + workChan := make(chan *types.Torrent, workers*2) // Use an atomic counter for progress tracking var processed int64 @@ -398,7 +353,7 @@ func (c *Cache) sync(torrents []*torrent.Torrent) error { return // Channel closed, exit goroutine } - if err := c.processTorrent(t); err != nil { + if err := c.ProcessTorrent(t, true); err != nil { c.logger.Error().Err(err).Str("torrent", t.Name).Msg("sync error") atomic.AddInt64(&errorCount, 1) } @@ -435,11 +390,11 @@ func (c *Cache) sync(torrents []*torrent.Torrent) error { return nil } -func (c *Cache) processTorrent(t *torrent.Torrent) error { - var err error - err = c.client.UpdateTorrent(t) - if err != nil { - return fmt.Errorf("failed to get torrent files: %v", err) +func (c *Cache) ProcessTorrent(t *types.Torrent, refreshRclone bool) error { + if len(t.Files) == 0 { + if err := c.client.UpdateTorrent(t); err != nil { + return fmt.Errorf("failed to update torrent: %w", err) + } } ct := &CachedTorrent{ @@ -448,6 +403,9 @@ func (c *Cache) processTorrent(t *torrent.Torrent) error { IsComplete: len(t.Files) > 0, } c.setTorrent(ct) + if err := c.RefreshRclone(); err != nil { + c.logger.Debug().Err(err).Msg("Failed to refresh rclone") + } return nil } @@ -469,7 +427,7 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string { if ct.IsComplete { return "" } - ct = c.refreshTorrent(ct) // Refresh the torrent from the debrid service + ct = c.refreshTorrent(ct) // Refresh the torrent from the debrid if ct == nil { return "" } else { @@ -477,7 +435,7 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string { } } - c.logger.Debug().Msgf("Getting download link for %s", ct.Name) + c.logger.Trace().Msgf("Getting download link for %s", ct.Name) f := c.client.GetDownloadLink(ct.Torrent, &file) if f == nil { return "" @@ -490,7 +448,7 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string { return f.DownloadLink } -func (c *Cache) updateDownloadLink(file *torrent.File) { +func (c *Cache) updateDownloadLink(file *types.File) { c.downloadLinksMutex.Lock() defer c.downloadLinksMutex.Unlock() c.downloadLinks[file.Link] = file.DownloadLink @@ -503,111 +461,10 @@ func (c *Cache) checkDownloadLink(link string) string { return "" } -func (c *Cache) refreshTorrent(t *CachedTorrent) *CachedTorrent { - _torrent := t.Torrent - err := c.client.UpdateTorrent(_torrent) - if err != nil { - c.logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err) - return nil - } - if len(t.Files) == 0 { - return nil - } - - ct := &CachedTorrent{ - Torrent: _torrent, - LastRead: time.Now(), - IsComplete: len(t.Files) > 0, - } - c.setTorrent(ct) - - return ct -} - -func (c *Cache) refreshDownloadLinks() map[string]string { - c.downloadLinksMutex.Lock() - defer c.downloadLinksMutex.Unlock() - - downloadLinks, err := c.client.GetDownloads() - if err != nil { - c.logger.Debug().Err(err).Msg("Failed to get download links") - return nil - } - for k, v := range downloadLinks { - c.downloadLinks[k] = v.DownloadLink - } - return c.downloadLinks -} - -func (c *Cache) GetClient() debrid.Client { +func (c *Cache) GetClient() types.Client { return c.client } -func (c *Cache) refreshTorrents() { - c.torrentsMutex.RLock() - currentTorrents := c.torrents // - // Create a copy of the current torrents to avoid concurrent issues - torrents := make(map[string]string, len(currentTorrents)) // a mpa of id and name - for _, v := range currentTorrents { - torrents[v.Id] = v.Name - } - c.torrentsMutex.RUnlock() - - // Get new torrents from the debrid service - debTorrents, err := c.client.GetTorrents() - if err != nil { - c.logger.Debug().Err(err).Msg("Failed to get torrents") - return - } - - if len(debTorrents) == 0 { - // Maybe an error occurred - return - } - - // Get the newly added torrents only - newTorrents := make([]*torrent.Torrent, 0) - idStore := make(map[string]bool, len(debTorrents)) - for _, t := range debTorrents { - idStore[t.Id] = true - if _, ok := torrents[t.Id]; !ok { - newTorrents = append(newTorrents, t) - } - } - - // Check for deleted torrents - deletedTorrents := make([]string, 0) - for id, _ := range torrents { - if _, ok := idStore[id]; !ok { - deletedTorrents = append(deletedTorrents, id) - } - } - - if len(deletedTorrents) > 0 { - c.DeleteTorrent(deletedTorrents) - } - - if len(newTorrents) == 0 { - return - } - c.logger.Info().Msgf("Found %d new torrents", len(newTorrents)) - - // No need for a complex sync process, just add the new torrents - wg := sync.WaitGroup{} - wg.Add(len(newTorrents)) - for _, t := range newTorrents { - // processTorrent is concurrent safe - go func() { - defer wg.Done() - if err := c.processTorrent(t); err != nil { - c.logger.Info().Err(err).Msg("Failed to process torrent") - } - - }() - } - wg.Wait() -} - func (c *Cache) DeleteTorrent(ids []string) { c.logger.Info().Msgf("Deleting %d torrents", len(ids)) c.torrentsMutex.Lock() @@ -628,25 +485,7 @@ func (c *Cache) removeFromDB(torrentId string) { } } -func (c *Cache) resetPropfindResponse() { - // Right now, parents are hardcoded - parents := []string{"__all__", "torrents"} - // Reset only the parent directories - // Convert the parents to a keys - // This is a bit hacky, but it works - // Instead of deleting all the keys, we only delete the parent keys, e.g __all__/ or torrents/ - keys := make([]string, 0, len(parents)) - for _, p := range parents { - // Construct the key - // construct url - url := filepath.Join("/webdav/%s/%s", c.client.GetName(), p) - key0 := fmt.Sprintf("propfind:%s:0", url) - key1 := fmt.Sprintf("propfind:%s:1", url) - keys = append(keys, key0, key1) - } - - // Delete the keys - for _, k := range keys { - c.propfindResp.Delete(k) - } +func (c *Cache) OnRemove(torrentId string) { + go c.DeleteTorrent([]string{torrentId}) + go tryLock(&c.listingRefreshMu, c.refreshListings) } diff --git a/pkg/debrid/debrid/debrid.go b/pkg/debrid/debrid/debrid.go index 393abf1..f26bbb0 100644 --- a/pkg/debrid/debrid/debrid.go +++ b/pkg/debrid/debrid/debrid.go @@ -1,24 +1,86 @@ package debrid import ( - "github.com/rs/zerolog" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + "fmt" + "github.com/sirrobot01/debrid-blackhole/internal/config" + "github.com/sirrobot01/debrid-blackhole/internal/utils" + "github.com/sirrobot01/debrid-blackhole/pkg/arr" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/alldebrid" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid_link" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/realdebrid" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torbox" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" ) -type Client interface { - SubmitMagnet(tr *torrent.Torrent) (*torrent.Torrent, error) - CheckStatus(tr *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) - GenerateDownloadLinks(tr *torrent.Torrent) error - GetDownloadLink(tr *torrent.Torrent, file *torrent.File) *torrent.File - ConvertLinksToFiles(links []string) []torrent.File - DeleteTorrent(tr *torrent.Torrent) - IsAvailable(infohashes []string) map[string]bool - GetCheckCached() bool - GetDownloadUncached() bool - UpdateTorrent(torrent *torrent.Torrent) error - GetTorrents() ([]*torrent.Torrent, error) - GetName() string - GetLogger() zerolog.Logger - GetDownloadingStatus() []string - GetDownloads() (map[string]torrent.DownloadLinks, error) +func createDebridClient(dc config.Debrid) types.Client { + switch dc.Name { + case "realdebrid": + return realdebrid.New(dc) + case "torbox": + return torbox.New(dc) + case "debridlink": + return debrid_link.New(dc) + case "alldebrid": + return alldebrid.New(dc) + default: + return realdebrid.New(dc) + } +} + +func ProcessTorrent(d *Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) { + + debridTorrent := &types.Torrent{ + InfoHash: magnet.InfoHash, + Magnet: magnet, + Name: magnet.Name, + Arr: a, + Size: magnet.Size, + Files: make(map[string]types.File), + } + + errs := make([]error, 0) + + for index, db := range d.Clients { + logger := db.GetLogger() + logger.Info().Msgf("Processing debrid: %s", db.GetName()) + + // Override first, arr second, debrid third + + if overrideDownloadUncached { + debridTorrent.DownloadUncached = true + } else if a.DownloadUncached != nil { + // Arr cached is set + debridTorrent.DownloadUncached = *a.DownloadUncached + } else { + debridTorrent.DownloadUncached = db.GetDownloadUncached() + } + + logger.Info().Msgf("Torrent Hash: %s", debridTorrent.InfoHash) + if db.GetCheckCached() { + hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash] + if !exists || !hash { + logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name) + continue + } else { + logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name) + } + } + + dbt, err := db.SubmitMagnet(debridTorrent) + if dbt != nil { + dbt.Arr = a + } + if err != nil || dbt == nil || dbt.Id == "" { + errs = append(errs, err) + continue + } + logger.Info().Msgf("Torrent: %s(id=%s) submitted to %s", dbt.Name, dbt.Id, db.GetName()) + d.LastUsed = index + return db.CheckStatus(dbt, isSymlink) + } + err := fmt.Errorf("failed to process torrent") + for _, e := range errs { + err = fmt.Errorf("%w\n%w", err, e) + } + return nil, err } diff --git a/pkg/debrid/debrid/engine.go b/pkg/debrid/debrid/engine.go new file mode 100644 index 0000000..61ded2d --- /dev/null +++ b/pkg/debrid/debrid/engine.go @@ -0,0 +1,51 @@ +package debrid + +import ( + "github.com/sirrobot01/debrid-blackhole/internal/config" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" +) + +type Engine struct { + Clients map[string]types.Client + Caches map[string]*Cache + LastUsed string +} + +func NewEngine() *Engine { + cfg := config.GetConfig() + clients := make(map[string]types.Client) + + caches := make(map[string]*Cache) + + for _, dc := range cfg.Debrids { + client := createDebridClient(dc) + logger := client.GetLogger() + logger.Info().Msg("Debrid Service started") + clients[dc.Name] = client + caches[dc.Name] = NewCache(client) + } + + d := &Engine{ + Clients: clients, + LastUsed: "", + Caches: caches, + } + return d +} + +func (d *Engine) Get() types.Client { + if d.LastUsed == "" { + for _, c := range d.Clients { + return c + } + } + return d.Clients[d.LastUsed] +} + +func (d *Engine) GetByName(name string) types.Client { + return d.Clients[name] +} + +func (d *Engine) GetDebrids() map[string]types.Client { + return d.Clients +} diff --git a/pkg/debrid/debrid/misc.go b/pkg/debrid/debrid/misc.go new file mode 100644 index 0000000..beb6f04 --- /dev/null +++ b/pkg/debrid/debrid/misc.go @@ -0,0 +1,10 @@ +package debrid + +import "sync" + +func tryLock(mu *sync.Mutex, f func()) { + if mu.TryLock() { + defer mu.Unlock() + f() + } +} diff --git a/pkg/debrid/debrid/refresh.go b/pkg/debrid/debrid/refresh.go new file mode 100644 index 0000000..abe0b39 --- /dev/null +++ b/pkg/debrid/debrid/refresh.go @@ -0,0 +1,205 @@ +package debrid + +import ( + "bytes" + "fmt" + "github.com/goccy/go-json" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" + "net/http" + "os" + "path" + "path/filepath" + "sort" + "sync" + "time" +) + +func (c *Cache) refreshListings() { + // Copy the current torrents to avoid concurrent issues + c.torrentsMutex.RLock() + torrents := make([]string, 0, len(c.torrents)) + for _, t := range c.torrents { + if t != nil && t.Torrent != nil { + torrents = append(torrents, t.Name) + } + } + c.torrentsMutex.RUnlock() + + sort.Slice(torrents, func(i, j int) bool { + return torrents[i] < torrents[j] + }) + + files := make([]os.FileInfo, 0, len(torrents)) + now := time.Now() + for _, t := range torrents { + files = append(files, &fileInfo{ + name: t, + size: 0, + mode: 0755 | os.ModeDir, + modTime: now, + isDir: true, + }) + } + // Atomic store of the complete ready-to-use slice + c.listings.Store(files) + c.resetPropfindResponse() + if err := c.RefreshRclone(); err != nil { + c.logger.Debug().Err(err).Msg("Failed to refresh rclone") + } +} + +func (c *Cache) refreshTorrents() { + c.torrentsMutex.RLock() + currentTorrents := c.torrents // + // Create a copy of the current torrents to avoid concurrent issues + torrents := make(map[string]string, len(currentTorrents)) // a mpa of id and name + for _, v := range currentTorrents { + torrents[v.Id] = v.Name + } + c.torrentsMutex.RUnlock() + + // Get new torrents from the debrid service + debTorrents, err := c.client.GetTorrents() + if err != nil { + c.logger.Debug().Err(err).Msg("Failed to get torrents") + return + } + + if len(debTorrents) == 0 { + // Maybe an error occurred + return + } + + // Get the newly added torrents only + newTorrents := make([]*types.Torrent, 0) + idStore := make(map[string]bool, len(debTorrents)) + for _, t := range debTorrents { + idStore[t.Id] = true + if _, ok := torrents[t.Id]; !ok { + newTorrents = append(newTorrents, t) + } + } + + // Check for deleted torrents + deletedTorrents := make([]string, 0) + for id, _ := range torrents { + if _, ok := idStore[id]; !ok { + deletedTorrents = append(deletedTorrents, id) + } + } + + if len(deletedTorrents) > 0 { + c.DeleteTorrent(deletedTorrents) + } + + if len(newTorrents) == 0 { + return + } + c.logger.Info().Msgf("Found %d new torrents", len(newTorrents)) + + // No need for a complex sync process, just add the new torrents + wg := sync.WaitGroup{} + wg.Add(len(newTorrents)) + for _, t := range newTorrents { + // ProcessTorrent is concurrent safe + go func() { + defer wg.Done() + if err := c.ProcessTorrent(t, true); err != nil { + c.logger.Info().Err(err).Msg("Failed to process torrent") + } + + }() + } + wg.Wait() +} + +func (c *Cache) RefreshRclone() error { + params := map[string]interface{}{ + "recursive": "false", + } + + // Convert parameters to JSON + jsonParams, err := json.Marshal(params) + if err != nil { + return err + } + + // Create HTTP request + url := "http://192.168.0.219:9990/vfs/refresh" // Switch to config + req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonParams)) + if err != nil { + return err + } + + // Set the appropriate headers + req.Header.Set("Content-Type", "application/json") + + // Send the request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return err + } + if resp.StatusCode != 200 { + return fmt.Errorf("failed to refresh rclone: %s", resp.Status) + } + return nil +} + +func (c *Cache) refreshTorrent(t *CachedTorrent) *CachedTorrent { + _torrent := t.Torrent + err := c.client.UpdateTorrent(_torrent) + if err != nil { + c.logger.Debug().Msgf("Failed to get torrent files for %s: %v", t.Id, err) + return nil + } + if len(t.Files) == 0 { + return nil + } + + ct := &CachedTorrent{ + Torrent: _torrent, + LastRead: time.Now(), + IsComplete: len(t.Files) > 0, + } + c.setTorrent(ct) + + return ct +} + +func (c *Cache) refreshDownloadLinks() { + c.downloadLinksMutex.Lock() + defer c.downloadLinksMutex.Unlock() + + downloadLinks, err := c.client.GetDownloads() + if err != nil { + c.logger.Debug().Err(err).Msg("Failed to get download links") + } + for k, v := range downloadLinks { + c.downloadLinks[k] = v.DownloadLink + } +} + +func (c *Cache) resetPropfindResponse() { + // Right now, parents are hardcoded + parents := []string{"__all__", "torrents"} + // Reset only the parent directories + // Convert the parents to a keys + // This is a bit hacky, but it works + // Instead of deleting all the keys, we only delete the parent keys, e.g __all__/ or torrents/ + keys := make([]string, 0, len(parents)) + for _, p := range parents { + // Construct the key + // construct url + url := filepath.Join("/webdav", c.client.GetName(), p) + url = path.Clean(url) + key0 := fmt.Sprintf("propfind:%s:0", url) + key1 := fmt.Sprintf("propfind:%s:1", url) + keys = append(keys, key0, key1) + } + + // Delete the keys + for _, k := range keys { + c.PropfindResp.Delete(k) + } +} diff --git a/pkg/debrid/debrid/workers.go b/pkg/debrid/debrid/workers.go new file mode 100644 index 0000000..a0de397 --- /dev/null +++ b/pkg/debrid/debrid/workers.go @@ -0,0 +1,35 @@ +package debrid + +import "time" + +func (c *Cache) Refresh() error { + // For now, we just want to refresh the listing and download links + c.logger.Info().Msg("Starting cache refresh workers") + go c.refreshDownloadLinksWorker() + go c.refreshTorrentsWorker() + return nil +} + +func (c *Cache) refreshDownloadLinksWorker() { + refreshTicker := time.NewTicker(40 * time.Minute) + defer refreshTicker.Stop() + + for { + select { + case <-refreshTicker.C: + tryLock(&c.downloadLinksRefreshMu, c.refreshDownloadLinks) + } + } +} + +func (c *Cache) refreshTorrentsWorker() { + refreshTicker := time.NewTicker(5 * time.Second) + defer refreshTicker.Stop() + + for { + select { + case <-refreshTicker.C: + tryLock(&c.torrentsRefreshMu, c.refreshTorrents) + } + } +} diff --git a/pkg/debrid/debrid_link/debrid_link.go b/pkg/debrid/debrid_link/debrid_link.go index a65b624..fd19172 100644 --- a/pkg/debrid/debrid_link/debrid_link.go +++ b/pkg/debrid/debrid_link/debrid_link.go @@ -9,7 +9,7 @@ import ( "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "slices" "time" @@ -89,7 +89,7 @@ func (dl *DebridLink) IsAvailable(hashes []string) map[string]bool { return result } -func (dl *DebridLink) UpdateTorrent(t *torrent.Torrent) error { +func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error { url := fmt.Sprintf("%s/seedbox/list?ids=%s", dl.Host, t.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := dl.client.MakeRequest(req) @@ -133,7 +133,7 @@ func (dl *DebridLink) UpdateTorrent(t *torrent.Torrent) error { if !cfg.IsSizeAllowed(f.Size) { continue } - file := torrent.File{ + file := types.File{ Id: f.ID, Name: f.Name, Size: f.Size, @@ -146,7 +146,7 @@ func (dl *DebridLink) UpdateTorrent(t *torrent.Torrent) error { return nil } -func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) { +func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) { url := fmt.Sprintf("%s/seedbox/add", dl.Host) payload := map[string]string{"url": t.Magnet.Link} jsonPayload, _ := json.Marshal(payload) @@ -179,7 +179,7 @@ func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) t.MountPath = dl.MountPath t.Debrid = dl.Name for _, f := range data.Files { - file := torrent.File{ + file := types.File{ Id: f.ID, Name: f.Name, Size: f.Size, @@ -194,7 +194,7 @@ func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) return t, nil } -func (dl *DebridLink) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { +func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) { for { err := dl.UpdateTorrent(torrent) if err != nil || torrent == nil { @@ -223,7 +223,7 @@ func (dl *DebridLink) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*to return torrent, nil } -func (dl *DebridLink) DeleteTorrent(torrent *torrent.Torrent) { +func (dl *DebridLink) DeleteTorrent(torrent *types.Torrent) { url := fmt.Sprintf("%s/seedbox/%s/remove", dl.Host, torrent.Id) req, _ := http.NewRequest(http.MethodDelete, url, nil) _, err := dl.client.MakeRequest(req) @@ -234,15 +234,15 @@ func (dl *DebridLink) DeleteTorrent(torrent *torrent.Torrent) { } } -func (dl *DebridLink) GenerateDownloadLinks(t *torrent.Torrent) error { +func (dl *DebridLink) GenerateDownloadLinks(t *types.Torrent) error { return nil } -func (dl *DebridLink) GetDownloads() (map[string]torrent.DownloadLinks, error) { +func (dl *DebridLink) GetDownloads() (map[string]types.DownloadLinks, error) { return nil, nil } -func (dl *DebridLink) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File { +func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) *types.File { return file } @@ -280,10 +280,10 @@ func New(dc config.Debrid) *DebridLink { } } -func (dl *DebridLink) GetTorrents() ([]*torrent.Torrent, error) { +func (dl *DebridLink) GetTorrents() ([]*types.Torrent, error) { return nil, nil } -func (dl *DebridLink) ConvertLinksToFiles(links []string) []torrent.File { +func (dl *DebridLink) ConvertLinksToFiles(links []string) []types.File { return nil } diff --git a/pkg/debrid/engine/engine.go b/pkg/debrid/engine/engine.go deleted file mode 100644 index 6662ff0..0000000 --- a/pkg/debrid/engine/engine.go +++ /dev/null @@ -1,30 +0,0 @@ -package engine - -import ( - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" -) - -type Engine struct { - Debrids []debrid.Client - LastUsed int -} - -func (d *Engine) Get() debrid.Client { - if d.LastUsed == 0 { - return d.Debrids[0] - } - return d.Debrids[d.LastUsed] -} - -func (d *Engine) GetByName(name string) debrid.Client { - for _, deb := range d.Debrids { - if deb.GetName() == name { - return deb - } - } - return nil -} - -func (d *Engine) GetDebrids() []debrid.Client { - return d.Debrids -} diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index c018ca6..fa94275 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -8,7 +8,7 @@ import ( "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "io" "net/http" gourl "net/url" @@ -43,8 +43,8 @@ func (r *RealDebrid) GetLogger() zerolog.Logger { // getTorrentFiles returns a list of torrent files from the torrent info // validate is used to determine if the files should be validated // if validate is false, selected files will be returned -func getTorrentFiles(t *torrent.Torrent, data TorrentInfo, validate bool) map[string]torrent.File { - files := make(map[string]torrent.File) +func getTorrentFiles(t *types.Torrent, data TorrentInfo, validate bool) map[string]types.File { + files := make(map[string]types.File) cfg := config.GetConfig() idx := 0 for _, f := range data.Files { @@ -80,7 +80,7 @@ func getTorrentFiles(t *torrent.Torrent, data TorrentInfo, validate bool) map[st continue } - file := torrent.File{ + file := types.File{ Name: name, Path: name, Size: f.Bytes, @@ -141,7 +141,7 @@ func (r *RealDebrid) IsAvailable(hashes []string) map[string]bool { return result } -func (r *RealDebrid) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) { +func (r *RealDebrid) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) { url := fmt.Sprintf("%s/torrents/addMagnet", r.Host) payload := gourl.Values{ "magnet": {t.Magnet.Link}, @@ -161,7 +161,7 @@ func (r *RealDebrid) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error) return t, nil } -func (r *RealDebrid) UpdateTorrent(t *torrent.Torrent) error { +func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error { url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := r.client.MakeRequest(req) @@ -173,7 +173,7 @@ func (r *RealDebrid) UpdateTorrent(t *torrent.Torrent) error { if err != nil { return err } - name := utils.RemoveExtension(data.OriginalFilename) + name := utils.RemoveInvalidChars(data.OriginalFilename) t.Name = name t.Bytes = data.Bytes t.Folder = name @@ -190,7 +190,7 @@ func (r *RealDebrid) UpdateTorrent(t *torrent.Torrent) error { return nil } -func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { +func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torrent, error) { url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) for { @@ -204,7 +204,7 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T return t, err } status := data.Status - name := utils.RemoveInvalidChars(data.OriginalFilename) + name := utils.RemoveExtension(data.OriginalFilename) t.Name = name // Important because some magnet changes the name t.Folder = name t.Filename = data.Filename @@ -257,7 +257,7 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T return t, nil } -func (r *RealDebrid) DeleteTorrent(torrent *torrent.Torrent) { +func (r *RealDebrid) DeleteTorrent(torrent *types.Torrent) { url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrent.Id) req, _ := http.NewRequest(http.MethodDelete, url, nil) _, err := r.client.MakeRequest(req) @@ -268,7 +268,7 @@ func (r *RealDebrid) DeleteTorrent(torrent *torrent.Torrent) { } } -func (r *RealDebrid) GenerateDownloadLinks(t *torrent.Torrent) error { +func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error { url := fmt.Sprintf("%s/unrestrict/link/", r.Host) for _, f := range t.Files { if f.DownloadLink != "" { @@ -294,8 +294,8 @@ func (r *RealDebrid) GenerateDownloadLinks(t *torrent.Torrent) error { return nil } -func (r *RealDebrid) ConvertLinksToFiles(links []string) []torrent.File { - files := make([]torrent.File, 0) +func (r *RealDebrid) ConvertLinksToFiles(links []string) []types.File { + files := make([]types.File, 0) for _, l := range links { url := fmt.Sprintf("%s/unrestrict/link/", r.Host) payload := gourl.Values{ @@ -310,7 +310,7 @@ func (r *RealDebrid) ConvertLinksToFiles(links []string) []torrent.File { if err = json.Unmarshal(resp, &data); err != nil { continue } - files = append(files, torrent.File{ + files = append(files, types.File{ Name: data.Filename, Size: data.Filesize, Link: l, @@ -321,7 +321,7 @@ func (r *RealDebrid) ConvertLinksToFiles(links []string) []torrent.File { return files } -func (r *RealDebrid) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File { +func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) *types.File { url := fmt.Sprintf("%s/unrestrict/link/", r.Host) payload := gourl.Values{ "link": {file.Link}, @@ -344,9 +344,9 @@ func (r *RealDebrid) GetCheckCached() bool { return r.CheckCached } -func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*torrent.Torrent, error) { +func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) { url := fmt.Sprintf("%s/torrents?limit=%d", r.Host, limit) - torrents := make([]*torrent.Torrent, 0) + torrents := make([]*types.Torrent, 0) if offset > 0 { url = fmt.Sprintf("%s&offset=%d", url, offset) } @@ -374,10 +374,13 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*torrent.Torrent } filenames := map[string]bool{} for _, t := range data { + if t.Status != "downloaded" { + continue + } if _, exists := filenames[t.Filename]; exists { continue } - torrents = append(torrents, &torrent.Torrent{ + torrents = append(torrents, &types.Torrent{ Id: t.Id, Name: utils.RemoveInvalidChars(t.Filename), Bytes: t.Bytes, @@ -386,7 +389,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*torrent.Torrent Filename: t.Filename, OriginalFilename: t.Filename, Links: t.Links, - Files: make(map[string]torrent.File), + Files: make(map[string]types.File), InfoHash: t.Hash, Debrid: r.Name, MountPath: r.MountPath, @@ -395,7 +398,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*torrent.Torrent return totalItems, torrents, nil } -func (r *RealDebrid) GetTorrents() ([]*torrent.Torrent, error) { +func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) { limit := 5000 // Get first batch and total count @@ -449,8 +452,8 @@ func (r *RealDebrid) GetTorrents() ([]*torrent.Torrent, error) { return allTorrents, nil } -func (r *RealDebrid) GetDownloads() (map[string]torrent.DownloadLinks, error) { - links := make(map[string]torrent.DownloadLinks) +func (r *RealDebrid) GetDownloads() (map[string]types.DownloadLinks, error) { + links := make(map[string]types.DownloadLinks) offset := 0 limit := 5000 for { @@ -475,7 +478,7 @@ func (r *RealDebrid) GetDownloads() (map[string]torrent.DownloadLinks, error) { return links, nil } -func (r *RealDebrid) _getDownloads(offset int, limit int) ([]torrent.DownloadLinks, error) { +func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLinks, error) { url := fmt.Sprintf("%s/downloads?limit=%d", r.Host, limit) if offset > 0 { url = fmt.Sprintf("%s&offset=%d", url, offset) @@ -489,9 +492,9 @@ func (r *RealDebrid) _getDownloads(offset int, limit int) ([]torrent.DownloadLin if err = json.Unmarshal(resp, &data); err != nil { return nil, err } - links := make([]torrent.DownloadLinks, 0) + links := make([]types.DownloadLinks, 0) for _, d := range data { - links = append(links, torrent.DownloadLinks{ + links = append(links, types.DownloadLinks{ Filename: d.Filename, Size: d.Filesize, Link: d.Link, diff --git a/pkg/debrid/torbox/torbox.go b/pkg/debrid/torbox/torbox.go index 4769c08..ce83f2d 100644 --- a/pkg/debrid/torbox/torbox.go +++ b/pkg/debrid/torbox/torbox.go @@ -9,7 +9,7 @@ import ( "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "time" "mime/multipart" @@ -93,7 +93,7 @@ func (tb *Torbox) IsAvailable(hashes []string) map[string]bool { return result } -func (tb *Torbox) SubmitMagnet(torrent *torrent.Torrent) (*torrent.Torrent, error) { +func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) { url := fmt.Sprintf("%s/api/torrents/createtorrent", tb.Host) payload := &bytes.Buffer{} writer := multipart.NewWriter(payload) @@ -141,7 +141,7 @@ func getTorboxStatus(status string, finished bool) string { } } -func (tb *Torbox) UpdateTorrent(t *torrent.Torrent) error { +func (tb *Torbox) UpdateTorrent(t *types.Torrent) error { url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", tb.Host, t.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := tb.client.MakeRequest(req) @@ -180,7 +180,7 @@ func (tb *Torbox) UpdateTorrent(t *torrent.Torrent) error { if !cfg.IsSizeAllowed(f.Size) { continue } - file := torrent.File{ + file := types.File{ Id: strconv.Itoa(f.Id), Name: fileName, Size: f.Size, @@ -200,7 +200,7 @@ func (tb *Torbox) UpdateTorrent(t *torrent.Torrent) error { return nil } -func (tb *Torbox) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { +func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) { for { err := tb.UpdateTorrent(torrent) @@ -232,7 +232,7 @@ func (tb *Torbox) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torren return torrent, nil } -func (tb *Torbox) DeleteTorrent(torrent *torrent.Torrent) { +func (tb *Torbox) DeleteTorrent(torrent *types.Torrent) { url := fmt.Sprintf("%s/api/torrents/controltorrent/%s", tb.Host, torrent.Id) payload := map[string]string{"torrent_id": torrent.Id, "action": "Delete"} jsonPayload, _ := json.Marshal(payload) @@ -245,7 +245,7 @@ func (tb *Torbox) DeleteTorrent(torrent *torrent.Torrent) { } } -func (tb *Torbox) GenerateDownloadLinks(t *torrent.Torrent) error { +func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error { for _, file := range t.Files { url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host) query := gourl.Values{} @@ -273,7 +273,7 @@ func (tb *Torbox) GenerateDownloadLinks(t *torrent.Torrent) error { return nil } -func (tb *Torbox) GetDownloadLink(t *torrent.Torrent, file *torrent.File) *torrent.File { +func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) *types.File { url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host) query := gourl.Values{} query.Add("torrent_id", t.Id) @@ -306,7 +306,7 @@ func (tb *Torbox) GetCheckCached() bool { return tb.CheckCached } -func (tb *Torbox) GetTorrents() ([]*torrent.Torrent, error) { +func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) { return nil, nil } @@ -336,10 +336,10 @@ func New(dc config.Debrid) *Torbox { } } -func (tb *Torbox) ConvertLinksToFiles(links []string) []torrent.File { +func (tb *Torbox) ConvertLinksToFiles(links []string) []types.File { return nil } -func (tb *Torbox) GetDownloads() (map[string]torrent.DownloadLinks, error) { +func (tb *Torbox) GetDownloads() (map[string]types.DownloadLinks, error) { return nil, nil } diff --git a/pkg/debrid/types/debrid.go b/pkg/debrid/types/debrid.go new file mode 100644 index 0000000..58b599a --- /dev/null +++ b/pkg/debrid/types/debrid.go @@ -0,0 +1,23 @@ +package types + +import ( + "github.com/rs/zerolog" +) + +type Client interface { + SubmitMagnet(tr *Torrent) (*Torrent, error) + CheckStatus(tr *Torrent, isSymlink bool) (*Torrent, error) + GenerateDownloadLinks(tr *Torrent) error + GetDownloadLink(tr *Torrent, file *File) *File + ConvertLinksToFiles(links []string) []File + DeleteTorrent(tr *Torrent) + IsAvailable(infohashes []string) map[string]bool + GetCheckCached() bool + GetDownloadUncached() bool + UpdateTorrent(torrent *Torrent) error + GetTorrents() ([]*Torrent, error) + GetName() string + GetLogger() zerolog.Logger + GetDownloadingStatus() []string + GetDownloads() (map[string]DownloadLinks, error) +} diff --git a/pkg/debrid/torrent/torrent.go b/pkg/debrid/types/torrent.go similarity index 99% rename from pkg/debrid/torrent/torrent.go rename to pkg/debrid/types/torrent.go index 2fbbdbb..39bb86f 100644 --- a/pkg/debrid/torrent/torrent.go +++ b/pkg/debrid/types/torrent.go @@ -1,4 +1,4 @@ -package torrent +package types import ( "fmt" diff --git a/pkg/qbit/downloader.go b/pkg/qbit/downloader.go index 2a5ba52..7f752c0 100644 --- a/pkg/qbit/downloader.go +++ b/pkg/qbit/downloader.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/cavaliergopher/grab/v3" "github.com/sirrobot01/debrid-blackhole/internal/utils" - debrid "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + debrid "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "io" "net/http" "os" @@ -154,8 +154,13 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { torrentFolder = utils.RemoveExtension(torrentFolder) torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder } - torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ - err = os.MkdirAll(torrentSymlinkPath, os.ModePerm) + return q.createSymlinks(debridTorrent, torrentRclonePath, torrentFolder) // verify cos we're using external webdav +} + +func (q *QBit) createSymlinks(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) { + files := debridTorrent.Files + torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) + err := os.MkdirAll(torrentSymlinkPath, os.ModePerm) if err != nil { return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err) } @@ -164,16 +169,16 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { for _, file := range files { pending[file.Path] = file } - ticker := time.NewTicker(200 * time.Millisecond) + ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() for len(pending) > 0 { <-ticker.C for path, file := range pending { - fullFilePath := filepath.Join(torrentRclonePath, file.Path) + fullFilePath := filepath.Join(rclonePath, file.Path) if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) { q.logger.Info().Msgf("File is ready: %s", file.Path) - q.createSymLink(torrentSymlinkPath, torrentRclonePath, file) + q.createSymLink(torrentSymlinkPath, rclonePath, file) delete(pending, path) } } diff --git a/pkg/qbit/import.go b/pkg/qbit/import.go index a46f21f..243a99e 100644 --- a/pkg/qbit/import.go +++ b/pkg/qbit/import.go @@ -3,12 +3,12 @@ package qbit import ( "fmt" "github.com/sirrobot01/debrid-blackhole/internal/utils" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "github.com/sirrobot01/debrid-blackhole/pkg/service" "time" "github.com/google/uuid" "github.com/sirrobot01/debrid-blackhole/pkg/arr" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid" ) type ImportRequest struct { diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index 95165e3..67c855e 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -7,8 +7,8 @@ import ( "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/arr" - db "github.com/sirrobot01/debrid-blackhole/pkg/debrid" - debrid "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + db "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" + debrid "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "github.com/sirrobot01/debrid-blackhole/pkg/service" "io" "mime/multipart" @@ -74,13 +74,14 @@ func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category strin } func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr *arr.Arr, isSymlink bool) { - debridClient := service.GetDebrid().GetByName(debridTorrent.Debrid) + svc := service.GetService() + client := svc.Debrid.GetByName(debridTorrent.Debrid) for debridTorrent.Status != "downloaded" { q.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress) - dbT, err := debridClient.CheckStatus(debridTorrent, isSymlink) + dbT, err := client.CheckStatus(debridTorrent, isSymlink) if err != nil { q.logger.Error().Msgf("Error checking status: %v", err) - go debridClient.DeleteTorrent(debridTorrent) + go client.DeleteTorrent(debridTorrent) q.MarkAsFailed(torrent) if err := arr.Refresh(); err != nil { q.logger.Error().Msgf("Error refreshing arr: %v", err) @@ -92,7 +93,7 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr torrent = q.UpdateTorrentMin(torrent, debridTorrent) // Exit the loop for downloading statuses to prevent memory buildup - if !slices.Contains(debridClient.GetDownloadingStatus(), debridTorrent.Status) { + if !slices.Contains(client.GetDownloadingStatus(), debridTorrent.Status) { break } time.Sleep(time.Duration(q.RefreshInterval) * time.Second) @@ -102,14 +103,51 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr err error ) debridTorrent.Arr = arr + + // File is done downloading at this stage + + // Check if debrid supports webdav by checking cache if isSymlink { - torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/ + cache, ok := svc.Debrid.Caches[debridTorrent.Debrid] + if ok { + q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid) + // Use webdav to download the file + err := cache.ProcessTorrent(debridTorrent, true) + if err != nil { + return + } + rclonePath := filepath.Join(debridTorrent.MountPath, debridTorrent.Name) + + // Check if folder exists here + if _, err := os.Stat(rclonePath); os.IsNotExist(err) { + q.logger.Debug().Msgf("Folder does not exist: %s", rclonePath) + + // Check if torrent is in the listing + listing := cache.GetListing() + for _, t := range listing { + if t.Name() == debridTorrent.Name { + q.logger.Debug().Msgf("Torrent found in listing: %s", debridTorrent.Name) + } + } + + // Check if torrent is in the webdav + if t := cache.GetTorrentByName(debridTorrent.Name); t == nil { + q.logger.Debug().Msgf("Torrent not found in webdav: %s", debridTorrent.Name) + } + } + + torrentSymlinkPath, err = q.createSymlinks(debridTorrent, rclonePath, debridTorrent.Name) + + } else { + // User is using either zurg or debrid webdav + torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/ + } } else { torrentSymlinkPath, err = q.ProcessManualFile(torrent) } if err != nil { q.MarkAsFailed(torrent) - go debridClient.DeleteTorrent(debridTorrent) + go client.DeleteTorrent(debridTorrent) q.logger.Info().Msgf("Error: %v", err) return } diff --git a/pkg/qbit/types.go b/pkg/qbit/types.go index eaa159c..5355ff6 100644 --- a/pkg/qbit/types.go +++ b/pkg/qbit/types.go @@ -2,7 +2,7 @@ package qbit import ( "fmt" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "sync" ) @@ -173,10 +173,10 @@ type TorrentCategory struct { } type Torrent struct { - ID string `json:"id"` - DebridTorrent *torrent.Torrent `json:"-"` - Debrid string `json:"debrid"` - TorrentPath string `json:"-"` + ID string `json:"id"` + DebridTorrent *types.Torrent `json:"-"` + Debrid string `json:"debrid"` + TorrentPath string `json:"-"` AddedOn int64 `json:"added_on,omitempty"` AmountLeft int64 `json:"amount_left"` diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 3365030..9e6f0f6 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -10,7 +10,7 @@ import ( "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/pkg/arr" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "golang.org/x/sync/errgroup" "net" "net/http" @@ -29,7 +29,7 @@ import ( type Repair struct { Jobs map[string]*Job arrs *arr.Storage - deb debrid.Client + deb types.Client duration time.Duration runOnStart bool ZurgURL string diff --git a/pkg/service/service.go b/pkg/service/service.go index 104a7c6..a5e0c12 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -2,8 +2,7 @@ package service import ( "github.com/sirrobot01/debrid-blackhole/pkg/arr" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "github.com/sirrobot01/debrid-blackhole/pkg/repair" "sync" ) @@ -11,7 +10,7 @@ import ( type Service struct { Repair *repair.Repair Arr *arr.Storage - Debrid *engine.Engine + Debrid *debrid.Engine } var ( @@ -22,7 +21,7 @@ var ( func New() *Service { once.Do(func() { arrs := arr.NewStorage() - deb := debrid.New() + deb := debrid.NewEngine() instance = &Service{ Repair: repair.New(arrs), Arr: arrs, @@ -42,7 +41,7 @@ func GetService() *Service { func Update() *Service { arrs := arr.NewStorage() - deb := debrid.New() + deb := debrid.NewEngine() instance = &Service{ Repair: repair.New(arrs), Arr: arrs, @@ -51,6 +50,6 @@ func Update() *Service { return instance } -func GetDebrid() *engine.Engine { +func GetDebrid() *debrid.Engine { return GetService().Debrid } diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 6fdc586..6c8b84a 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -2,6 +2,7 @@ package webdav import ( "fmt" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "io" "net/http" "os" @@ -20,18 +21,19 @@ var sharedClient = &http.Client{ } type File struct { - cache *Cache + cache *debrid.Cache fileId string torrentId string - size int64 - offset int64 - isDir bool - children []os.FileInfo - reader io.ReadCloser - seekPending bool - content []byte - name string + size int64 + offset int64 + isDir bool + children []os.FileInfo + reader io.ReadCloser + seekPending bool + content []byte + name string + metadataOnly bool downloadLink string link string @@ -49,11 +51,12 @@ func (f *File) Close() error { func (f *File) GetDownloadLink() string { // Check if we already have a final URL cached - if f.downloadLink != "" { + + if f.downloadLink != "" && isValidURL(f.downloadLink) { return f.downloadLink } downloadLink := f.cache.GetDownloadLink(f.torrentId, f.name, f.link) - if downloadLink != "" { + if downloadLink != "" && isValidURL(downloadLink) { f.downloadLink = downloadLink return downloadLink } @@ -65,6 +68,9 @@ func (f *File) Read(p []byte) (n int, err error) { if f.isDir { return 0, os.ErrInvalid } + if f.metadataOnly { + return 0, io.EOF + } // If file content is preloaded, read from memory. if f.content != nil { diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index 94d6656..b17acb2 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -2,11 +2,13 @@ package webdav import ( "bytes" + "compress/gzip" "context" "errors" "fmt" "github.com/rs/zerolog" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "golang.org/x/net/webdav" "html/template" "io" @@ -14,7 +16,7 @@ import ( "net/http" "net/http/httptest" "os" - "path" + path "path/filepath" "slices" "strings" "sync" @@ -24,7 +26,7 @@ import ( type Handler struct { Name string logger zerolog.Logger - cache *Cache + cache *debrid.Cache lastRefresh time.Time refreshMutex sync.Mutex RootPath string @@ -33,7 +35,7 @@ type Handler struct { ctx context.Context } -func NewHandler(name string, cache *Cache, logger zerolog.Logger) *Handler { +func NewHandler(name string, cache *debrid.Cache, logger zerolog.Logger) *Handler { h := &Handler{ Name: name, cache: cache, @@ -67,9 +69,7 @@ func (h *Handler) RemoveAll(ctx context.Context, name string) error { if filename == "" { h.cache.GetClient().DeleteTorrent(cachedTorrent.Torrent) - go h.cache.refreshListings() - go h.cache.refreshTorrents() - go h.cache.resetPropfindResponse() + h.cache.OnRemove(cachedTorrent.Id) return nil } @@ -117,22 +117,29 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F name = path.Clean("/" + name) rootDir := h.getRootPath() + metadataOnly := false + if ctx.Value("metadataOnly") != nil { + metadataOnly = true + } + // Fast path optimization with a map lookup instead of string comparisons switch name { case rootDir: return &File{ - cache: h.cache, - isDir: true, - children: h.getParentFiles(), - name: "/", + cache: h.cache, + isDir: true, + children: h.getParentFiles(), + name: "/", + metadataOnly: metadataOnly, }, nil case path.Join(rootDir, "version.txt"): return &File{ - cache: h.cache, - isDir: false, - content: []byte("v1.0.0"), - name: "version.txt", - size: int64(len("v1.0.0")), + cache: h.cache, + isDir: false, + content: []byte("v1.0.0"), + name: "version.txt", + size: int64(len("v1.0.0")), + metadataOnly: metadataOnly, }, nil } @@ -145,11 +152,12 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F children := h.getTorrentsFolders() return &File{ - cache: h.cache, - isDir: true, - children: children, - name: folderName, - size: 0, + cache: h.cache, + isDir: true, + children: children, + name: folderName, + size: 0, + metadataOnly: metadataOnly, }, nil } @@ -168,12 +176,13 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F if len(parts) == 2 { // Torrent folder level return &File{ - cache: h.cache, - torrentId: cachedTorrent.Id, - isDir: true, - children: h.getFileInfos(cachedTorrent.Torrent), - name: cachedTorrent.Name, - size: cachedTorrent.Size, + cache: h.cache, + torrentId: cachedTorrent.Id, + isDir: true, + children: h.getFileInfos(cachedTorrent.Torrent), + name: cachedTorrent.Name, + size: cachedTorrent.Size, + metadataOnly: metadataOnly, }, nil } @@ -189,6 +198,7 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F size: file.Size, link: file.Link, downloadLink: file.DownloadLink, + metadataOnly: metadataOnly, } return fi, nil } @@ -207,7 +217,7 @@ func (h *Handler) Stat(ctx context.Context, name string) (os.FileInfo, error) { return f.Stat() } -func (h *Handler) getFileInfos(torrent *torrent.Torrent) []os.FileInfo { +func (h *Handler) getFileInfos(torrent *types.Torrent) []os.FileInfo { files := make([]os.FileInfo, 0, len(torrent.Files)) now := time.Now() for _, file := range torrent.Files { @@ -232,34 +242,28 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Cache PROPFIND responses for a short time to reduce load. if r.Method == "PROPFIND" { // Determine the Depth; default to "1" if not provided. + // Set metadata only + ctx := context.WithValue(r.Context(), "metadataOnly", true) + r = r.WithContext(ctx) + cleanPath := path.Clean(r.URL.Path) depth := r.Header.Get("Depth") if depth == "" { depth = "1" } // Use both path and Depth header to form the cache key. - cacheKey := fmt.Sprintf("propfind:%s:%s", r.URL.Path, depth) + cacheKey := fmt.Sprintf("propfind:%s:%s", cleanPath, depth) // Determine TTL based on the requested folder: // - If the path is exactly the parent folder (which changes frequently), // use a short TTL. // - Otherwise, for deeper (torrent folder) paths, use a longer TTL. - var ttl time.Duration + ttl := 30 * time.Minute if h.isParentPath(r.URL.Path) { - ttl = 10 * time.Second - } else { - ttl = 1 * time.Minute + ttl = 20 * time.Second } - // Check if we have a cached response that hasn't expired. - if cached, ok := h.cache.propfindResp.Load(cacheKey); ok { - if respCache, ok := cached.(propfindResponse); ok { - if time.Since(respCache.ts) < ttl { - w.Header().Set("Content-Type", "application/xml; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprintf("%d", len(respCache.data))) - w.Write(respCache.data) - return - } - } + if served := h.serveFromCacheIfValid(w, r, cacheKey, ttl); served { + return } // No valid cache entry; process the PROPFIND request. @@ -276,10 +280,22 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { handler.ServeHTTP(responseRecorder, r) responseData := responseRecorder.Body.Bytes() - // Store the new response in the cache. - h.cache.propfindResp.Store(cacheKey, propfindResponse{ - data: responseData, - ts: time.Now(), + // Create compressed version + var gzippedData []byte + if len(responseData) > 0 { + var buf bytes.Buffer + gzw := gzip.NewWriter(&buf) + if _, err := gzw.Write(responseData); err == nil { + if err := gzw.Close(); err == nil { + gzippedData = buf.Bytes() + } + } + } + + h.cache.PropfindResp.Store(cacheKey, debrid.PropfindResponse{ + Data: responseData, + GzippedData: gzippedData, + Ts: time.Now(), }) // Forward the captured response to the client. @@ -332,58 +348,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Serve the file with the correct modification time. // http.ServeContent automatically handles Range requests. http.ServeContent(w, r, fileName, fi.ModTime(), rs) - - // Set headers to indicate support for range requests and content type. - //fileName := fi.Name() - //w.Header().Set("Accept-Ranges", "bytes") - //w.Header().Set("Content-Type", getContentType(fileName)) - // - //// If a Range header is provided, parse and handle partial content. - //rangeHeader := r.Header.Get("Range") - //if rangeHeader != "" { - // parts := strings.Split(strings.TrimPrefix(rangeHeader, "bytes="), "-") - // if len(parts) == 2 { - // start, startErr := strconv.ParseInt(parts[0], 10, 64) - // end := fi.Size() - 1 - // if parts[1] != "" { - // var endErr error - // end, endErr = strconv.ParseInt(parts[1], 10, 64) - // if endErr != nil { - // end = fi.Size() - 1 - // } - // } - // - // if startErr == nil && start < fi.Size() { - // if start > end { - // start, end = end, start - // } - // if end >= fi.Size() { - // end = fi.Size() - 1 - // } - // - // contentLength := end - start + 1 - // w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, fi.Size())) - // w.Header().Set("Content-Length", fmt.Sprintf("%d", contentLength)) - // w.WriteHeader(http.StatusPartialContent) - // - // // Attempt to cast to your concrete File type to call Seek. - // if file, ok := f.(*File); ok { - // _, err = file.Seek(start, io.SeekStart) - // if err != nil { - // h.logger.Error().Err(err).Msg("Failed to seek in file") - // http.Error(w, "Server Error", http.StatusInternalServerError) - // return - // } - // - // limitedReader := io.LimitReader(f, contentLength) - // h.ioCopy(limitedReader, w) - // return - // } - // } - // } - //} - //w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size())) - //h.ioCopy(f, w) return } @@ -433,13 +397,43 @@ func (h *Handler) isParentPath(_path string) bool { rootPath := h.getRootPath() parents := h.getParentItems() for _, p := range parents { - if _path == path.Join(rootPath, p) { + if path.Clean(_path) == path.Clean(path.Join(rootPath, p)) { return true } } return false } +func (h *Handler) serveFromCacheIfValid(w http.ResponseWriter, r *http.Request, cacheKey string, ttl time.Duration) bool { + cached, ok := h.cache.PropfindResp.Load(cacheKey) + if !ok { + return false + } + + respCache, ok := cached.(debrid.PropfindResponse) + if !ok { + return false + } + + if time.Since(respCache.Ts) >= ttl { + // Remove expired cache entry + h.cache.PropfindResp.Delete(cacheKey) + return false + } + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + + if acceptsGzip(r) && len(respCache.GzippedData) > 0 { + w.Header().Set("Content-Encoding", "gzip") + w.Header().Set("Vary", "Accept-Encoding") + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(respCache.GzippedData))) + w.Write(respCache.GzippedData) + } else { + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(respCache.Data))) + w.Write(respCache.Data) + } + return true +} + func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file webdav.File) { var children []os.FileInfo if f, ok := file.(*File); ok { diff --git a/pkg/webdav/misc.go b/pkg/webdav/misc.go index ebbaa7f..2de354d 100644 --- a/pkg/webdav/misc.go +++ b/pkg/webdav/misc.go @@ -1,6 +1,10 @@ package webdav -import "strings" +import ( + "net/http" + "net/url" + "strings" +) // getName: Returns the torrent name and filename from the path // /webdav/alldebrid/__all__/TorrentName @@ -12,3 +16,13 @@ func getName(rootDir, path string) (string, string) { } return parts[0], strings.Join(parts[1:], "/") } + +func acceptsGzip(r *http.Request) bool { + return strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") +} + +func isValidURL(str string) bool { + u, err := url.Parse(str) + // A valid URL should parse without error, and have a non-empty scheme and host. + return err == nil && u.Scheme != "" && u.Host != "" +} diff --git a/pkg/webdav/webdav.go b/pkg/webdav/webdav.go index 1d108ae..3617943 100644 --- a/pkg/webdav/webdav.go +++ b/pkg/webdav/webdav.go @@ -20,9 +20,7 @@ func New() *WebDav { w := &WebDav{ Handlers: make([]*Handler, 0), } - debrids := svc.Debrid.GetDebrids() - cacheManager := NewCacheManager(debrids) - for name, c := range cacheManager.GetCaches() { + for name, c := range svc.Debrid.Caches { h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name))) w.Handlers = append(w.Handlers, h) } diff --git a/pkg/webdav/workers.go b/pkg/webdav/workers.go deleted file mode 100644 index 376e1e9..0000000 --- a/pkg/webdav/workers.go +++ /dev/null @@ -1,69 +0,0 @@ -package webdav - -import "time" - -func (c *Cache) Refresh() error { - // For now, we just want to refresh the listing and download links - c.logger.Info().Msg("Starting cache refresh workers") - go c.refreshListingWorker() - go c.refreshDownloadLinksWorker() - go c.refreshTorrentsWorker() - return nil -} - -func (c *Cache) refreshListingWorker() { - refreshTicker := time.NewTicker(10 * time.Second) - defer refreshTicker.Stop() - - for { - select { - case <-refreshTicker.C: - if c.listingRefreshMu.TryLock() { - func() { - defer c.listingRefreshMu.Unlock() - c.refreshListings() - }() - } else { - c.logger.Debug().Msg("Refresh already in progress") - } - } - } -} - -func (c *Cache) refreshDownloadLinksWorker() { - refreshTicker := time.NewTicker(40 * time.Minute) - defer refreshTicker.Stop() - - for { - select { - case <-refreshTicker.C: - if c.downloadLinksRefreshMu.TryLock() { - func() { - defer c.downloadLinksRefreshMu.Unlock() - c.refreshDownloadLinks() - }() - } else { - c.logger.Debug().Msg("Refresh already in progress") - } - } - } -} - -func (c *Cache) refreshTorrentsWorker() { - refreshTicker := time.NewTicker(5 * time.Second) - defer refreshTicker.Stop() - - for { - select { - case <-refreshTicker.C: - if c.listingRefreshMu.TryLock() { - func() { - defer c.listingRefreshMu.Unlock() - c.refreshTorrents() - }() - } else { - c.logger.Debug().Msg("Refresh already in progress") - } - } - } -} From 8d494fc27740d45531f42ade1ceb0488a910b54c Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Fri, 21 Mar 2025 04:10:16 +0100 Subject: [PATCH 06/39] Update repair; fix minor bugs with namings --- go.mod | 1 + go.sum | 2 + internal/logger/logger.go | 3 + internal/request/request.go | 19 ++ pkg/debrid/alldebrid/alldebrid.go | 6 +- pkg/debrid/debrid/cache.go | 5 +- pkg/debrid/debrid/refresh.go | 28 +-- pkg/debrid/debrid/{workers.go => worker.go} | 0 pkg/debrid/debrid/xml.go | 118 +++++++++++ pkg/debrid/debrid_link/debrid_link.go | 6 +- pkg/debrid/realdebrid/realdebrid.go | 14 +- pkg/debrid/torbox/torbox.go | 8 +- pkg/debrid/types/debrid.go | 2 +- pkg/qbit/import.go | 2 +- pkg/qbit/torrent.go | 28 +-- pkg/repair/repair.go | 218 ++++++++++++++++++-- pkg/service/service.go | 4 +- pkg/web/server.go | 25 ++- pkg/web/web/repair.html | 14 +- pkg/webdav/file.go | 77 +++++-- pkg/webdav/handler.go | 26 +-- 21 files changed, 455 insertions(+), 151 deletions(-) rename pkg/debrid/debrid/{workers.go => worker.go} (100%) create mode 100644 pkg/debrid/debrid/xml.go diff --git a/go.mod b/go.mod index 852c34c..b8852ee 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( require ( github.com/anacrolix/missinggo v1.3.0 // indirect github.com/anacrolix/missinggo/v2 v2.7.3 // indirect + github.com/beevik/etree v1.5.0 // indirect github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect diff --git a/go.sum b/go.sum index 32e6b7a..72b0544 100644 --- a/go.sum +++ b/go.sum @@ -36,6 +36,8 @@ github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CM github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8= github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beevik/etree v1.5.0 h1:iaQZFSDS+3kYZiGoc9uKeOkUY3nYMXOKLl6KIJxiJWs= +github.com/beevik/etree v1.5.0/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs= github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 5ad3229..0a6b18a 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -73,6 +73,7 @@ func NewLogger(prefix string) zerolog.Logger { Level(zerolog.InfoLevel) // Set the log level + level = strings.ToLower(level) switch level { case "debug": logger = logger.Level(zerolog.DebugLevel) @@ -82,6 +83,8 @@ func NewLogger(prefix string) zerolog.Logger { logger = logger.Level(zerolog.WarnLevel) case "error": logger = logger.Level(zerolog.ErrorLevel) + case "trace": + logger = logger.Level(zerolog.TraceLevel) } return logger } diff --git a/internal/request/request.go b/internal/request/request.go index d59cdac..2643d94 100644 --- a/internal/request/request.go +++ b/internal/request/request.go @@ -2,6 +2,7 @@ package request import ( "bytes" + "compress/gzip" "context" "crypto/tls" "fmt" @@ -288,3 +289,21 @@ func JSONResponse(w http.ResponseWriter, data interface{}, code int) { return } } + +func Gzip(body []byte) []byte { + + var b bytes.Buffer + if len(body) == 0 { + return nil + } + gz := gzip.NewWriter(&b) + _, err := gz.Write(body) + if err != nil { + return nil + } + err = gz.Close() + if err != nil { + return nil + } + return b.Bytes() +} diff --git a/pkg/debrid/alldebrid/alldebrid.go b/pkg/debrid/alldebrid/alldebrid.go index c1f55bb..0ee2708 100644 --- a/pkg/debrid/alldebrid/alldebrid.go +++ b/pkg/debrid/alldebrid/alldebrid.go @@ -204,12 +204,12 @@ func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types return torrent, nil } -func (ad *AllDebrid) DeleteTorrent(torrent *types.Torrent) { - url := fmt.Sprintf("%s/magnet/delete?id=%s", ad.Host, torrent.Id) +func (ad *AllDebrid) DeleteTorrent(torrentId string) { + url := fmt.Sprintf("%s/magnet/delete?id=%s", ad.Host, torrentId) req, _ := http.NewRequest(http.MethodGet, url, nil) _, err := ad.client.MakeRequest(req) if err == nil { - ad.logger.Info().Msgf("Torrent: %s deleted", torrent.Name) + ad.logger.Info().Msgf("Torrent: %s deleted", torrentId) } else { ad.logger.Info().Msgf("Error deleting torrent: %s", err) } diff --git a/pkg/debrid/debrid/cache.go b/pkg/debrid/debrid/cache.go index abcc804..be51e9d 100644 --- a/pkg/debrid/debrid/cache.go +++ b/pkg/debrid/debrid/cache.go @@ -208,7 +208,7 @@ func (c *Cache) load() (map[string]*CachedTorrent, error) { if len(ct.Files) != 0 { // We can assume the torrent is complete ct.IsComplete = true - ct.Torrent.Name = utils.RemoveExtension(ct.Torrent.Filename) // Update the name + ct.Torrent.Name = utils.RemoveExtension(ct.Torrent.OriginalFilename) // Update the name torrents[ct.Id] = &ct } } @@ -403,9 +403,6 @@ func (c *Cache) ProcessTorrent(t *types.Torrent, refreshRclone bool) error { IsComplete: len(t.Files) > 0, } c.setTorrent(ct) - if err := c.RefreshRclone(); err != nil { - c.logger.Debug().Err(err).Msg("Failed to refresh rclone") - } return nil } diff --git a/pkg/debrid/debrid/refresh.go b/pkg/debrid/debrid/refresh.go index abe0b39..07d9752 100644 --- a/pkg/debrid/debrid/refresh.go +++ b/pkg/debrid/debrid/refresh.go @@ -7,8 +7,6 @@ import ( "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "net/http" "os" - "path" - "path/filepath" "sort" "sync" "time" @@ -42,7 +40,7 @@ func (c *Cache) refreshListings() { } // Atomic store of the complete ready-to-use slice c.listings.Store(files) - c.resetPropfindResponse() + _ = c.RefreshXml() if err := c.RefreshRclone(); err != nil { c.logger.Debug().Err(err).Msg("Failed to refresh rclone") } @@ -179,27 +177,3 @@ func (c *Cache) refreshDownloadLinks() { c.downloadLinks[k] = v.DownloadLink } } - -func (c *Cache) resetPropfindResponse() { - // Right now, parents are hardcoded - parents := []string{"__all__", "torrents"} - // Reset only the parent directories - // Convert the parents to a keys - // This is a bit hacky, but it works - // Instead of deleting all the keys, we only delete the parent keys, e.g __all__/ or torrents/ - keys := make([]string, 0, len(parents)) - for _, p := range parents { - // Construct the key - // construct url - url := filepath.Join("/webdav", c.client.GetName(), p) - url = path.Clean(url) - key0 := fmt.Sprintf("propfind:%s:0", url) - key1 := fmt.Sprintf("propfind:%s:1", url) - keys = append(keys, key0, key1) - } - - // Delete the keys - for _, k := range keys { - c.PropfindResp.Delete(k) - } -} diff --git a/pkg/debrid/debrid/workers.go b/pkg/debrid/debrid/worker.go similarity index 100% rename from pkg/debrid/debrid/workers.go rename to pkg/debrid/debrid/worker.go diff --git a/pkg/debrid/debrid/xml.go b/pkg/debrid/debrid/xml.go new file mode 100644 index 0000000..be3938b --- /dev/null +++ b/pkg/debrid/debrid/xml.go @@ -0,0 +1,118 @@ +package debrid + +import ( + "fmt" + "github.com/beevik/etree" + "github.com/sirrobot01/debrid-blackhole/internal/request" + "net/http" + "net/url" + path "path/filepath" + "time" +) + +func (c *Cache) RefreshXml() error { + parents := []string{"__all__", "torrents"} + for _, parent := range parents { + if err := c.refreshParentXml(parent); err != nil { + return fmt.Errorf("failed to refresh XML for %s: %v", parent, err) + } + } + + c.logger.Debug().Msgf("Refreshed XML cache for %s", c.client.GetName()) + return nil +} + +func (c *Cache) refreshParentXml(parent string) error { + // Define the WebDAV namespace + davNS := "DAV:" + + // Create the root multistatus element + doc := etree.NewDocument() + doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`) + + multistatus := doc.CreateElement("D:multistatus") + multistatus.CreateAttr("xmlns:D", davNS) + + // Get the current timestamp in RFC1123 format (WebDAV format) + currentTime := time.Now().UTC().Format(http.TimeFormat) + + // Add the parent directory + parentPath := fmt.Sprintf("/webdav/%s/%s/", c.client.GetName(), parent) + addDirectoryResponse(multistatus, parentPath, parent, currentTime) + + // Add torrents to the XML + torrents := c.GetListing() + for _, torrent := range torrents { + torrentName := torrent.Name() + torrentPath := fmt.Sprintf("/webdav/%s/%s/%s/", + c.client.GetName(), + url.PathEscape(torrentName), + parent, + ) + + addDirectoryResponse(multistatus, torrentPath, torrentName, currentTime) + } + + // Convert to XML string + xmlData, err := doc.WriteToBytes() + if err != nil { + return fmt.Errorf("failed to generate XML: %v", err) + } + + // Store in cache + // Construct the keys + baseUrl := path.Clean(fmt.Sprintf("/webdav/%s/%s", c.client.GetName())) + key0 := fmt.Sprintf("propfind:%s:0", baseUrl) + key1 := fmt.Sprintf("propfind:%s:1", baseUrl) + + res := PropfindResponse{ + Data: xmlData, + GzippedData: request.Gzip(xmlData), + Ts: time.Now(), + } + c.PropfindResp.Store(key0, res) + c.PropfindResp.Store(key1, res) + return nil +} + +func addDirectoryResponse(multistatus *etree.Element, href, displayName, modTime string) *etree.Element { + responseElem := multistatus.CreateElement("D:response") + + // Add href + hrefElem := responseElem.CreateElement("D:href") + hrefElem.SetText(href) + + // Add propstat + propstatElem := responseElem.CreateElement("D:propstat") + + // Add prop + propElem := propstatElem.CreateElement("D:prop") + + // Add resource type (collection = directory) + resourceTypeElem := propElem.CreateElement("D:resourcetype") + resourceTypeElem.CreateElement("D:collection") + + // Add display name + displayNameElem := propElem.CreateElement("D:displayname") + displayNameElem.SetText(displayName) + + // Add last modified time + lastModElem := propElem.CreateElement("D:getlastmodified") + lastModElem.SetText(modTime) + + // Add supported lock + lockElem := propElem.CreateElement("D:supportedlock") + lockEntryElem := lockElem.CreateElement("D:lockentry") + + lockScopeElem := lockEntryElem.CreateElement("D:lockscope") + lockScopeElem.CreateElement("D:exclusive") + + lockTypeElem := lockEntryElem.CreateElement("D:locktype") + lockTypeElem.CreateElement("D:write") + + // Add status + statusElem := propstatElem.CreateElement("D:status") + statusElem.SetText("HTTP/1.1 200 OK") + + return responseElem +} diff --git a/pkg/debrid/debrid_link/debrid_link.go b/pkg/debrid/debrid_link/debrid_link.go index fd19172..8027be4 100644 --- a/pkg/debrid/debrid_link/debrid_link.go +++ b/pkg/debrid/debrid_link/debrid_link.go @@ -223,12 +223,12 @@ func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*type return torrent, nil } -func (dl *DebridLink) DeleteTorrent(torrent *types.Torrent) { - url := fmt.Sprintf("%s/seedbox/%s/remove", dl.Host, torrent.Id) +func (dl *DebridLink) DeleteTorrent(torrentId string) { + url := fmt.Sprintf("%s/seedbox/%s/remove", dl.Host, torrentId) req, _ := http.NewRequest(http.MethodDelete, url, nil) _, err := dl.client.MakeRequest(req) if err == nil { - dl.logger.Info().Msgf("Torrent: %s deleted", torrent.Name) + dl.logger.Info().Msgf("Torrent: %s deleted", torrentId) } else { dl.logger.Info().Msgf("Error deleting torrent: %s", err) } diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index fa94275..8838ab9 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -173,7 +173,7 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error { if err != nil { return err } - name := utils.RemoveInvalidChars(data.OriginalFilename) + name := utils.RemoveExtension(data.OriginalFilename) t.Name = name t.Bytes = data.Bytes t.Folder = name @@ -182,7 +182,7 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error { t.Speed = data.Speed t.Seeders = data.Seeders t.Filename = data.Filename - t.OriginalFilename = data.OriginalFilename + t.OriginalFilename = name t.Links = data.Links t.MountPath = r.MountPath t.Debrid = r.Name @@ -208,7 +208,7 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre t.Name = name // Important because some magnet changes the name t.Folder = name t.Filename = data.Filename - t.OriginalFilename = data.OriginalFilename + t.OriginalFilename = name t.Bytes = data.Bytes t.Progress = data.Progress t.Speed = data.Speed @@ -257,12 +257,12 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre return t, nil } -func (r *RealDebrid) DeleteTorrent(torrent *types.Torrent) { - url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrent.Id) +func (r *RealDebrid) DeleteTorrent(torrentId string) { + url := fmt.Sprintf("%s/torrents/delete/%s", r.Host, torrentId) req, _ := http.NewRequest(http.MethodDelete, url, nil) _, err := r.client.MakeRequest(req) if err == nil { - r.logger.Info().Msgf("Torrent: %s deleted", torrent.Name) + r.logger.Info().Msgf("Torrent: %s deleted", torrentId) } else { r.logger.Info().Msgf("Error deleting torrent: %s", err) } @@ -382,7 +382,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, } torrents = append(torrents, &types.Torrent{ Id: t.Id, - Name: utils.RemoveInvalidChars(t.Filename), + Name: utils.RemoveInvalidChars(t.Filename), // This changes when we get the files Bytes: t.Bytes, Progress: t.Progress, Status: t.Status, diff --git a/pkg/debrid/torbox/torbox.go b/pkg/debrid/torbox/torbox.go index ce83f2d..9c5e31e 100644 --- a/pkg/debrid/torbox/torbox.go +++ b/pkg/debrid/torbox/torbox.go @@ -232,14 +232,14 @@ func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.To return torrent, nil } -func (tb *Torbox) DeleteTorrent(torrent *types.Torrent) { - url := fmt.Sprintf("%s/api/torrents/controltorrent/%s", tb.Host, torrent.Id) - payload := map[string]string{"torrent_id": torrent.Id, "action": "Delete"} +func (tb *Torbox) DeleteTorrent(torrentId string) { + url := fmt.Sprintf("%s/api/torrents/controltorrent/%s", tb.Host, torrentId) + payload := map[string]string{"torrent_id": torrentId, "action": "Delete"} jsonPayload, _ := json.Marshal(payload) req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(jsonPayload)) _, err := tb.client.MakeRequest(req) if err == nil { - tb.logger.Info().Msgf("Torrent: %s deleted", torrent.Name) + tb.logger.Info().Msgf("Torrent: %s deleted", torrentId) } else { tb.logger.Info().Msgf("Error deleting torrent: %s", err) } diff --git a/pkg/debrid/types/debrid.go b/pkg/debrid/types/debrid.go index 58b599a..e1adf4d 100644 --- a/pkg/debrid/types/debrid.go +++ b/pkg/debrid/types/debrid.go @@ -10,7 +10,7 @@ type Client interface { GenerateDownloadLinks(tr *Torrent) error GetDownloadLink(tr *Torrent, file *File) *File ConvertLinksToFiles(links []string) []File - DeleteTorrent(tr *Torrent) + DeleteTorrent(torrentId string) IsAvailable(infohashes []string) map[string]bool GetCheckCached() bool GetDownloadUncached() bool diff --git a/pkg/qbit/import.go b/pkg/qbit/import.go index 243a99e..724220e 100644 --- a/pkg/qbit/import.go +++ b/pkg/qbit/import.go @@ -78,7 +78,7 @@ func (i *ImportRequest) Process(q *QBit) (err error) { if err != nil || debridTorrent == nil { if debridTorrent != nil { dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid) - go dbClient.DeleteTorrent(debridTorrent) + go dbClient.DeleteTorrent(debridTorrent.Id) } if err == nil { err = fmt.Errorf("failed to process torrent") diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index 67c855e..b1cdc8c 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -60,7 +60,7 @@ func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category strin if err != nil || debridTorrent == nil { if debridTorrent != nil { dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid) - go dbClient.DeleteTorrent(debridTorrent) + go dbClient.DeleteTorrent(debridTorrent.Id) } if err == nil { err = fmt.Errorf("failed to process torrent") @@ -81,7 +81,7 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr dbT, err := client.CheckStatus(debridTorrent, isSymlink) if err != nil { q.logger.Error().Msgf("Error checking status: %v", err) - go client.DeleteTorrent(debridTorrent) + go client.DeleteTorrent(debridTorrent.Id) q.MarkAsFailed(torrent) if err := arr.Refresh(); err != nil { q.logger.Error().Msgf("Error refreshing arr: %v", err) @@ -116,26 +116,10 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr if err != nil { return } - rclonePath := filepath.Join(debridTorrent.MountPath, debridTorrent.Name) - - // Check if folder exists here - if _, err := os.Stat(rclonePath); os.IsNotExist(err) { - q.logger.Debug().Msgf("Folder does not exist: %s", rclonePath) - - // Check if torrent is in the listing - listing := cache.GetListing() - for _, t := range listing { - if t.Name() == debridTorrent.Name { - q.logger.Debug().Msgf("Torrent found in listing: %s", debridTorrent.Name) - } - } - - // Check if torrent is in the webdav - if t := cache.GetTorrentByName(debridTorrent.Name); t == nil { - q.logger.Debug().Msgf("Torrent not found in webdav: %s", debridTorrent.Name) - } + if err := cache.RefreshRclone(); err != nil { + q.logger.Trace().Msgf("Error refreshing rclone: %v", err) } - + rclonePath := filepath.Join(debridTorrent.MountPath, debridTorrent.Name) torrentSymlinkPath, err = q.createSymlinks(debridTorrent, rclonePath, debridTorrent.Name) } else { @@ -147,7 +131,7 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr } if err != nil { q.MarkAsFailed(torrent) - go client.DeleteTorrent(debridTorrent) + go client.DeleteTorrent(debridTorrent.Id) q.logger.Info().Msgf("Error: %v", err) return } diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 9e6f0f6..6c38cbf 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -10,7 +10,7 @@ import ( "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/pkg/arr" - "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "golang.org/x/sync/errgroup" "net" "net/http" @@ -29,7 +29,7 @@ import ( type Repair struct { Jobs map[string]*Job arrs *arr.Storage - deb types.Client + deb *debrid.Engine duration time.Duration runOnStart bool ZurgURL string @@ -39,7 +39,7 @@ type Repair struct { filename string } -func New(arrs *arr.Storage) *Repair { +func New(arrs *arr.Storage, engine *debrid.Engine) *Repair { cfg := config.GetConfig() duration, err := parseSchedule(cfg.Repair.Interval) if err != nil { @@ -53,6 +53,7 @@ func New(arrs *arr.Storage) *Repair { ZurgURL: cfg.Repair.ZurgURL, autoProcess: cfg.Repair.AutoProcess, filename: filepath.Join(cfg.Path, "repair.json"), + deb: engine, } if r.ZurgURL != "" { r.IsZurg = true @@ -66,10 +67,11 @@ func New(arrs *arr.Storage) *Repair { type JobStatus string const ( - JobStarted JobStatus = "started" - JobPending JobStatus = "pending" - JobFailed JobStatus = "failed" - JobCompleted JobStatus = "completed" + JobStarted JobStatus = "started" + JobPending JobStatus = "pending" + JobFailed JobStatus = "failed" + JobCompleted JobStatus = "completed" + JobProcessing JobStatus = "processing" ) type Job struct { @@ -185,12 +187,21 @@ func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess, recu r.reset(job) r.Jobs[key] = job go r.saveToFile() - err := r.repair(job) - go r.saveToFile() - return err + go func() { + if err := r.repair(job); err != nil { + r.logger.Error().Err(err).Msg("Error running repair") + r.logger.Error().Err(err).Msg("Error running repair") + job.FailedAt = time.Now() + job.Error = err.Error() + job.Status = JobFailed + job.CompletedAt = time.Now() + } + }() + return nil } func (r *Repair) repair(job *Job) error { + defer r.saveToFile() if err := r.preRunChecks(); err != nil { return err } @@ -331,6 +342,161 @@ func (r *Repair) Start(ctx context.Context) error { } } +func (r *Repair) getUniquePaths(media arr.Content) map[string]string { + // Use zurg setup to check file availability with zurg + // This reduces bandwidth usage significantly + + uniqueParents := make(map[string]string) + files := media.Files + for _, file := range files { + target := getSymlinkTarget(file.Path) + if target != "" { + file.IsSymlink = true + dir, f := filepath.Split(target) + parent := filepath.Base(filepath.Clean(dir)) + // Set target path folder/file.mkv + file.TargetPath = f + uniqueParents[parent] = target + } + } + return uniqueParents +} + +func (r *Repair) clean(job *Job) error { + // Create a new error group + g, ctx := errgroup.WithContext(context.Background()) + + uniqueItems := make(map[string]string) + mu := sync.Mutex{} + + // Limit concurrent goroutines + g.SetLimit(runtime.NumCPU() * 4) + + for _, a := range job.Arrs { + a := a // Capture range variable + g.Go(func() error { + // Check if context was canceled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + items, err := r.cleanArr(job, a, "") + if err != nil { + r.logger.Error().Err(err).Msgf("Error cleaning %s", a) + return err + } + + // Safely append the found items to the shared slice + if len(items) > 0 { + mu.Lock() + for k, v := range items { + uniqueItems[k] = v + } + mu.Unlock() + } + + return nil + }) + } + + if err := g.Wait(); err != nil { + return err + } + + if len(uniqueItems) == 0 { + job.CompletedAt = time.Now() + job.Status = JobCompleted + + go func() { + if err := request.SendDiscordMessage("repair_clean_complete", "success", job.discordContext()); err != nil { + r.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() + + return nil + } + + cache := r.deb.Caches["realdebrid"] + if cache == nil { + return fmt.Errorf("cache not found") + } + torrents := cache.GetTorrents() + + dangling := make([]string, 0) + for _, t := range torrents { + if _, ok := uniqueItems[t.Name]; !ok { + dangling = append(dangling, t.Id) + } + } + + r.logger.Info().Msgf("Found %d delapitated items", len(dangling)) + + if len(dangling) == 0 { + job.CompletedAt = time.Now() + job.Status = JobCompleted + return nil + } + + client := r.deb.Clients["realdebrid"] + if client == nil { + return fmt.Errorf("client not found") + } + for _, id := range dangling { + client.DeleteTorrent(id) + } + + return nil +} + +func (r *Repair) cleanArr(j *Job, _arr string, tmdbId string) (map[string]string, error) { + uniqueItems := make(map[string]string) + a := r.arrs.Get(_arr) + + r.logger.Info().Msgf("Starting repair for %s", a.Name) + media, err := a.GetMedia(tmdbId) + if err != nil { + r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err) + return uniqueItems, err + } + + // Create a new error group + g, ctx := errgroup.WithContext(context.Background()) + + mu := sync.Mutex{} + + // Limit concurrent goroutines + g.SetLimit(runtime.NumCPU() * 4) + + for _, m := range media { + m := m // Create a new variable scoped to the loop iteration + g.Go(func() error { + // Check if context was canceled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + u := r.getUniquePaths(m) + for k, v := range u { + mu.Lock() + uniqueItems[k] = v + mu.Unlock() + } + return nil + }) + } + + if err := g.Wait(); err != nil { + return uniqueItems, err + } + + r.logger.Info().Msgf("Repair completed for %s. %d unique items", a.Name, len(uniqueItems)) + return uniqueItems, nil +} + func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) { brokenItems := make([]arr.ContentFile, 0) a := r.arrs.Get(_arr) @@ -575,6 +741,7 @@ func (r *Repair) ProcessJob(id string) error { if job == nil { return fmt.Errorf("job %s not found", id) } + // All validation checks remain the same if job.Status != JobPending { return fmt.Errorf("job %s not pending", id) } @@ -598,6 +765,7 @@ func (r *Repair) ProcessJob(id string) error { // Create a new error group g := new(errgroup.Group) + g.SetLimit(runtime.NumCPU() * 4) for arrName, items := range brokenItems { items := items @@ -612,7 +780,6 @@ func (r *Repair) ProcessJob(id string) error { if err := a.DeleteFiles(items); err != nil { r.logger.Error().Err(err).Msgf("Failed to delete broken items for %s", arrName) return nil - } // Search for missing items if err := a.SearchMissing(items); err != nil { @@ -620,20 +787,29 @@ func (r *Repair) ProcessJob(id string) error { return nil } return nil - }) } - if err := g.Wait(); err != nil { - job.FailedAt = time.Now() - job.Error = err.Error() - job.CompletedAt = time.Now() - job.Status = JobFailed - return err - } + // Update job status to in-progress + job.Status = JobProcessing + r.saveToFile() - job.CompletedAt = time.Now() - job.Status = JobCompleted + // Launch a goroutine to wait for completion and update the job + go func() { + if err := g.Wait(); err != nil { + job.FailedAt = time.Now() + job.Error = err.Error() + job.CompletedAt = time.Now() + job.Status = JobFailed + r.logger.Error().Err(err).Msgf("Job %s failed", id) + } else { + job.CompletedAt = time.Now() + job.Status = JobCompleted + r.logger.Info().Msgf("Job %s completed successfully", id) + } + + r.saveToFile() + }() return nil } diff --git a/pkg/service/service.go b/pkg/service/service.go index a5e0c12..7574f06 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -23,7 +23,7 @@ func New() *Service { arrs := arr.NewStorage() deb := debrid.NewEngine() instance = &Service{ - Repair: repair.New(arrs), + Repair: repair.New(arrs, deb), Arr: arrs, Debrid: deb, } @@ -43,7 +43,7 @@ func Update() *Service { arrs := arr.NewStorage() deb := debrid.NewEngine() instance = &Service{ - Repair: repair.New(arrs), + Repair: repair.New(arrs, deb), Arr: arrs, Debrid: deb, } diff --git a/pkg/web/server.go b/pkg/web/server.go index ba87d09..ddb2bc9 100644 --- a/pkg/web/server.go +++ b/pkg/web/server.go @@ -375,15 +375,20 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) { svc := service.GetService() - _arr := svc.Arr.Get(req.ArrName) - if _arr == nil { - http.Error(w, "No Arrs found to repair", http.StatusNotFound) - return + var arrs []string + + if req.ArrName != "" { + _arr := svc.Arr.Get(req.ArrName) + if _arr == nil { + http.Error(w, "No Arrs found to repair", http.StatusNotFound) + return + } + arrs = append(arrs, req.ArrName) } if req.Async { go func() { - if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil { + if err := svc.Repair.AddJob(arrs, req.MediaIds, req.AutoProcess, false); err != nil { ui.logger.Error().Err(err).Msg("Failed to repair media") } }() @@ -459,12 +464,10 @@ func (ui *Handler) handleProcessRepairJob(w http.ResponseWriter, r *http.Request http.Error(w, "No job ID provided", http.StatusBadRequest) return } - go func() { - svc := service.GetService() - if err := svc.Repair.ProcessJob(id); err != nil { - ui.logger.Error().Err(err).Msg("Failed to process repair job") - } - }() + svc := service.GetService() + if err := svc.Repair.ProcessJob(id); err != nil { + ui.logger.Error().Err(err).Msg("Failed to process repair job") + } w.WriteHeader(http.StatusOK) } diff --git a/pkg/web/web/repair.html b/pkg/web/web/repair.html index f99a45d..68f9e86 100644 --- a/pkg/web/web/repair.html +++ b/pkg/web/web/repair.html @@ -8,7 +8,7 @@
-
@@ -174,12 +174,6 @@ submitBtn.innerHTML = 'Repairing...'; let mediaIds = document.getElementById('mediaIds').value.split(',').map(id => id.trim()); let arr = document.getElementById('arrSelect').value; - if (!arr) { - createToast('Please select an Arr instance', 'warning'); - submitBtn.disabled = false; - submitBtn.innerHTML = originalText; - return; - } try { const response = await fetch('/internal/repair', { method: 'POST', @@ -187,7 +181,7 @@ 'Content-Type': 'application/json' }, body: JSON.stringify({ - arr: document.getElementById('arrSelect').value, + arr: arr, mediaIds: mediaIds, async: document.getElementById('isAsync').checked, autoProcess: document.getElementById('autoProcess').checked, @@ -262,17 +256,15 @@ // Determine status let status = 'In Progress'; let statusClass = 'text-primary'; - let canDelete = false; + let canDelete = job.status !== "started"; let totalItems = job.broken_items ? Object.values(job.broken_items).reduce((sum, arr) => sum + arr.length, 0) : 0; if (job.status === 'failed') { status = 'Failed'; statusClass = 'text-danger'; - canDelete = true; } else if (job.status === 'completed') { status = 'Completed'; statusClass = 'text-success'; - canDelete = true; } else if (job.status === 'pending') { status = 'Pending'; statusClass = 'text-warning'; diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 6c8b84a..ca5d596 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -1,6 +1,7 @@ package webdav import ( + "bufio" "fmt" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "io" @@ -11,11 +12,11 @@ import ( var sharedClient = &http.Client{ Transport: &http.Transport{ - // These settings help maintain persistent connections. MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, DisableCompression: false, DisableKeepAlives: false, + Proxy: http.ProxyFromEnvironment, }, Timeout: 0, } @@ -39,6 +40,24 @@ type File struct { link string } +type bufferedReadCloser struct { + *bufio.Reader + closer io.Closer +} + +// Create a new bufferedReadCloser with a larger buffer +func newBufferedReadCloser(rc io.ReadCloser) *bufferedReadCloser { + return &bufferedReadCloser{ + Reader: bufio.NewReaderSize(rc, 64*1024), // Increase to 1MB buffer + closer: rc, + } +} + +// Close implements ReadCloser interface +func (brc *bufferedReadCloser) Close() error { + return brc.closer.Close() +} + // File interface implementations for File func (f *File) Close() error { @@ -82,40 +101,48 @@ func (f *File) Read(p []byte) (n int, err error) { return n, nil } - // If we haven't started streaming or a seek was requested, - // close the existing stream and start a new HTTP GET request. + // If we haven't started streaming the file yet or need to reposition if f.reader == nil || f.seekPending { + // Close existing reader if we're repositioning if f.reader != nil && f.seekPending { f.reader.Close() f.reader = nil } - // Create a new HTTP GET request for the file's URL. - req, err := http.NewRequest("GET", f.GetDownloadLink(), nil) + downloadLink := f.GetDownloadLink() + if downloadLink == "" { + return 0, fmt.Errorf("failed to get download link for file") + } + + // Create an HTTP GET request to the file's URL. + req, err := http.NewRequest("GET", downloadLink, nil) if err != nil { return 0, fmt.Errorf("failed to create HTTP request: %w", err) } - // If we've already read some data, request only the remaining bytes. + // Request only the bytes starting from our current offset if f.offset > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset)) } - // Execute the HTTP request. + // Add important headers for streaming + req.Header.Set("Connection", "keep-alive") + req.Header.Set("Accept", "*/*") + req.Header.Set("User-Agent", "Infuse/7.0.2 (iOS)") + req.Header.Set("Accept-Encoding", "gzip, deflate, br") + resp, err := sharedClient.Do(req) if err != nil { return 0, fmt.Errorf("HTTP request error: %w", err) } - // Accept a 200 (OK) or 206 (Partial Content) status. + // Check response codes more carefully if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { resp.Body.Close() return 0, fmt.Errorf("unexpected HTTP status: %d", resp.StatusCode) } - // Store the response body as our reader. - f.reader = resp.Body - // Reset the seek pending flag now that we've reinitialized the reader. + f.reader = newBufferedReadCloser(resp.Body) f.seekPending = false } @@ -123,10 +150,12 @@ func (f *File) Read(p []byte) (n int, err error) { n, err = f.reader.Read(p) f.offset += int64(n) - // When we reach the end of the stream, close the reader. if err == io.EOF { f.reader.Close() f.reader = nil + } else if err != nil { + f.reader.Close() + f.reader = nil } return n, err @@ -137,12 +166,12 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { return 0, os.ErrInvalid } - var newOffset int64 + newOffset := f.offset switch whence { case io.SeekStart: newOffset = offset case io.SeekCurrent: - newOffset = f.offset + offset + newOffset += offset case io.SeekEnd: newOffset = f.size + offset default: @@ -156,7 +185,7 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { newOffset = f.size } - // If we're seeking to a new position, mark the reader for reset. + // Only mark seek as pending if position actually changed if newOffset != f.offset { f.offset = newOffset f.seekPending = true @@ -184,6 +213,24 @@ func (f *File) Stat() (os.FileInfo, error) { }, nil } +func (f *File) ReadAt(p []byte, off int64) (n int, err error) { + // Save current position + + // Seek to requested position + _, err = f.Seek(off, io.SeekStart) + if err != nil { + return 0, err + } + + // Read the data + n, err = f.Read(p) + + // Don't restore position for Infuse compatibility + // Infuse expects sequential reads after the initial seek + + return n, err +} + func (f *File) Write(p []byte) (n int, err error) { return 0, os.ErrPermission } diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index b17acb2..c4c3954 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -2,7 +2,6 @@ package webdav import ( "bytes" - "compress/gzip" "context" "errors" "fmt" @@ -68,7 +67,7 @@ func (h *Handler) RemoveAll(ctx context.Context, name string) error { } if filename == "" { - h.cache.GetClient().DeleteTorrent(cachedTorrent.Torrent) + h.cache.GetClient().DeleteTorrent(cachedTorrent.Torrent.Id) h.cache.OnRemove(cachedTorrent.Id) return nil } @@ -259,7 +258,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // - Otherwise, for deeper (torrent folder) paths, use a longer TTL. ttl := 30 * time.Minute if h.isParentPath(r.URL.Path) { - ttl = 20 * time.Second + ttl = 30 * time.Second } if served := h.serveFromCacheIfValid(w, r, cacheKey, ttl); served { @@ -281,22 +280,12 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { responseData := responseRecorder.Body.Bytes() // Create compressed version - var gzippedData []byte - if len(responseData) > 0 { - var buf bytes.Buffer - gzw := gzip.NewWriter(&buf) - if _, err := gzw.Write(responseData); err == nil { - if err := gzw.Close(); err == nil { - gzippedData = buf.Bytes() - } - } - } - h.cache.PropfindResp.Store(cacheKey, debrid.PropfindResponse{ - Data: responseData, - GzippedData: gzippedData, - Ts: time.Now(), - }) + //h.cache.PropfindResp.Store(cacheKey, debrid.PropfindResponse{ + // Data: responseData, + // GzippedData: request.Gzip(responseData), + // Ts: time.Now(), + //}) // Forward the captured response to the client. for k, v := range responseRecorder.Header() { @@ -417,7 +406,6 @@ func (h *Handler) serveFromCacheIfValid(w http.ResponseWriter, r *http.Request, if time.Since(respCache.Ts) >= ttl { // Remove expired cache entry - h.cache.PropfindResp.Delete(cacheKey) return false } w.Header().Set("Content-Type", "application/xml; charset=utf-8") From f93d489956cc69431ec2dd8b0226ff964b651c92 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Fri, 21 Mar 2025 17:55:19 +0100 Subject: [PATCH 07/39] Fix regex --- internal/config/misc.go | 2 +- internal/utils/regex.go | 6 +++--- pkg/arr/refresh.go | 34 ---------------------------------- 3 files changed, 4 insertions(+), 38 deletions(-) diff --git a/internal/config/misc.go b/internal/config/misc.go index 8af24c1..575df87 100644 --- a/internal/config/misc.go +++ b/internal/config/misc.go @@ -25,7 +25,7 @@ func (c *Config) IsAllowedFile(filename string) bool { func getDefaultExtensions() []string { videoExts := strings.Split("YUV,WMV,WEBM,VOB,VIV,SVI,ROQ,RMVB,RM,OGV,OGG,NSV,MXF,MPG,MPEG,M2V,MP2,MPE,MPV,MP4,M4P,M4V,MOV,QT,MNG,MKV,FLV,DRC,AVI,ASF,AMV,MKA,F4V,3GP,3G2,DIVX,X264,X265", ",") - musicExts := strings.Split("MP3,WAV,FLAC,AAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",") + musicExts := strings.Split("MP3,WAV,FLAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",") // Combine both slices allExts := append(videoExts, musicExts...) diff --git a/internal/utils/regex.go b/internal/utils/regex.go index e9cce5e..2ac0722 100644 --- a/internal/utils/regex.go +++ b/internal/utils/regex.go @@ -7,8 +7,8 @@ import ( ) var ( - VIDEOMATCH = "(?i)(\\.)(YUV|WMV|WEBM|VOB|VIV|SVI|ROQ|RMVB|RM|OGV|OGG|NSV|MXF|MPG|MPEG|M2V|MP2|MPE|MPV|MP4|M4P|M4V|MOV|QT|MNG|MKV|FLV|DRC|AVI|ASF|AMV|MKA|F4V|3GP|3G2|DIVX|X264|X265)$" - MUSICMATCH = "(?i)(\\.)(?:MP3|WAV|FLAC|AAC|OGG|WMA|AIFF|ALAC|M4A|APE|AC3|DTS|M4P|MID|MIDI|MKA|MP2|MPA|RA|VOC|WV|AMR)$" + VIDEOMATCH = "(?i)(\\.)(webm|m4v|3gp|nsv|ty|strm|rm|rmvb|m3u|ifo|mov|qt|divx|xvid|bivx|nrg|pva|wmv|asf|asx|ogm|ogv|m2v|avi|bin|dat|dvr-ms|mpg|mpeg|mp4|avc|vp3|svq3|nuv|viv|dv|fli|flv|wpl|img|iso|vob|mkv|mk3d|ts|wtv|m2ts)$" + MUSICMATCH = "(?i)(\\.)(mp2|mp3|m4a|m4b|m4p|ogg|oga|opus|wma|wav|wv|flac|ape|aif|aiff|aifc)$" ) var SAMPLEMATCH = `(?i)(^|[\\/]|[._-])(sample|trailer|thumb)s?([._-]|$)` @@ -38,7 +38,7 @@ func RemoveInvalidChars(value string) string { func RemoveExtension(value string) string { value = RemoveInvalidChars(value) - re := regexp.MustCompile(VIDEOMATCH + "|" + SAMPLEMATCH + "|" + MUSICMATCH) + re := regexp.MustCompile(VIDEOMATCH + "|" + MUSICMATCH) // Find the last index of the matched extension loc := re.FindStringIndex(value) diff --git a/pkg/arr/refresh.go b/pkg/arr/refresh.go index aab0a71..fd7e8d0 100644 --- a/pkg/arr/refresh.go +++ b/pkg/arr/refresh.go @@ -1,12 +1,9 @@ package arr import ( - "cmp" "fmt" - "github.com/sirrobot01/debrid-blackhole/internal/request" "net/http" "strconv" - "strings" ) func (a *Arr) Refresh() error { @@ -26,34 +23,3 @@ func (a *Arr) Refresh() error { return fmt.Errorf("failed to refresh: %v", err) } - -func (a *Arr) Blacklist(infoHash string) error { - downloadId := strings.ToUpper(infoHash) - history := a.GetHistory(downloadId, "grabbed") - if history == nil { - return nil - } - torrentId := 0 - for _, record := range history.Records { - if strings.EqualFold(record.DownloadID, downloadId) { - torrentId = record.ID - break - } - } - if torrentId != 0 { - url, err := request.JoinURL(a.Host, "history/failed/", strconv.Itoa(torrentId)) - if err != nil { - return err - } - req, err := http.NewRequest(http.MethodPost, url, nil) - if err != nil { - return err - } - client := &http.Client{} - _, err = client.Do(req) - if err == nil { - return fmt.Errorf("failed to mark %s as failed: %v", cmp.Or(a.Name, a.Host), err) - } - } - return nil -} From d10b67958458b362291f6b2b92d83fd8d8123851 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Fri, 21 Mar 2025 17:58:06 +0100 Subject: [PATCH 08/39] Fix regex --- internal/config/misc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/config/misc.go b/internal/config/misc.go index 575df87..c18e6bf 100644 --- a/internal/config/misc.go +++ b/internal/config/misc.go @@ -24,7 +24,7 @@ func (c *Config) IsAllowedFile(filename string) bool { } func getDefaultExtensions() []string { - videoExts := strings.Split("YUV,WMV,WEBM,VOB,VIV,SVI,ROQ,RMVB,RM,OGV,OGG,NSV,MXF,MPG,MPEG,M2V,MP2,MPE,MPV,MP4,M4P,M4V,MOV,QT,MNG,MKV,FLV,DRC,AVI,ASF,AMV,MKA,F4V,3GP,3G2,DIVX,X264,X265", ",") + videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,img,iso,vob,mkv,mk3d,ts,wtv,m2ts'", ",") musicExts := strings.Split("MP3,WAV,FLAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",") // Combine both slices From 738474be16faabd3a3be5c1c409c637d08084372 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sat, 22 Mar 2025 00:17:07 +0100 Subject: [PATCH 09/39] Experimental usability stage --- doc/config.full.json | 7 +++- go.mod | 19 ++-------- go.sum | 36 ++---------------- internal/config/config.go | 32 +++++++++++++++- internal/request/request.go | 18 +++++++++ pkg/debrid/debrid/cache.go | 68 +++++++++++++++++++++++----------- pkg/debrid/debrid/engine.go | 9 ++++- pkg/debrid/debrid/misc.go | 10 ----- pkg/debrid/debrid/refresh.go | 71 +++++++++++++++++++++++++----------- pkg/debrid/debrid/worker.go | 8 ++-- pkg/debrid/debrid/xml.go | 29 ++++++++++----- pkg/qbit/torrent.go | 3 -- pkg/web/web/download.html | 4 +- pkg/webdav/handler.go | 46 ++++++++++++----------- 14 files changed, 212 insertions(+), 148 deletions(-) delete mode 100644 pkg/debrid/debrid/misc.go diff --git a/doc/config.full.json b/doc/config.full.json index dda1890..f280936 100644 --- a/doc/config.full.json +++ b/doc/config.full.json @@ -16,7 +16,8 @@ "folder": "/mnt/remote/realdebrid/__all__/", "rate_limit": "250/minute", "download_uncached": false, - "check_cached": false + "check_cached": false, + "use_webdav": true }, { "name": "debridlink", @@ -91,4 +92,8 @@ "allowed_file_types": [], "use_auth": false, "discord_webhook_url": "https://discord.com/api/webhooks/...", + "webdav": { + "torrents_refresh_interval": "5m", + "download_links_refresh_interval": "1h" + } } \ No newline at end of file diff --git a/go.mod b/go.mod index b8852ee..d14ecfa 100644 --- a/go.mod +++ b/go.mod @@ -6,12 +6,15 @@ toolchain go1.23.2 require ( github.com/anacrolix/torrent v1.55.0 + github.com/beevik/etree v1.5.0 github.com/cavaliergopher/grab/v3 v3.0.1 github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 github.com/go-chi/chi/v5 v5.1.0 + github.com/goccy/go-json v0.10.5 github.com/google/uuid v1.6.0 github.com/gorilla/sessions v1.4.0 + github.com/puzpuzpuz/xsync/v3 v3.5.1 github.com/rs/zerolog v1.33.0 github.com/valyala/fastjson v1.6.4 golang.org/x/crypto v0.33.0 @@ -24,32 +27,16 @@ require ( require ( github.com/anacrolix/missinggo v1.3.0 // indirect github.com/anacrolix/missinggo/v2 v2.7.3 // indirect - github.com/beevik/etree v1.5.0 // indirect github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/dgraph-io/badger/v4 v4.6.0 // indirect - github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/goccy/go-json v0.10.5 // indirect - github.com/google/flatbuffers v25.2.10+incompatible // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/huandu/xstrings v1.3.2 // indirect - github.com/klauspost/compress v1.18.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/stretchr/testify v1.10.0 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/text v0.22.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect ) diff --git a/go.sum b/go.sum index 72b0544..f90f30b 100644 --- a/go.sum +++ b/go.sum @@ -48,25 +48,16 @@ github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaq github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og= github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4= github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger/v4 v4.6.0 h1:acOwfOOZ4p1dPRnYzvkVm7rUk2Y21TgPVepCy5dJdFQ= -github.com/dgraph-io/badger/v4 v4.6.0/go.mod h1:KSJ5VTuZNC3Sd+YhvVjk2nYua9UZnnTr/SkXvdtiPgI= -github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= -github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -89,11 +80,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= @@ -116,8 +102,6 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= -github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -152,8 +136,6 @@ github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVY github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -185,7 +167,6 @@ github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= @@ -206,10 +187,11 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= +github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= @@ -239,14 +221,6 @@ github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPy go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= @@ -266,8 +240,6 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -322,8 +294,6 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/config/config.go b/internal/config/config.go index dd6b7bf..375b43e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,6 +1,7 @@ package config import ( + "cmp" "errors" "fmt" "github.com/goccy/go-json" @@ -23,7 +24,12 @@ type Debrid struct { DownloadUncached bool `json:"download_uncached"` CheckCached bool `json:"check_cached"` RateLimit string `json:"rate_limit"` // 200/minute or 10/second - EnableWebDav bool `json:"enable_webdav"` + + // Webdav + UseWebdav bool `json:"use_webdav"` + TorrentRefreshInterval string `json:"torrent_refresh_interval"` + DownloadLinksRefreshInterval string `json:"downloads_refresh_interval"` + TorrentRefreshWorkers int `json:"torrent_refresh_workers"` } type Proxy struct { @@ -67,6 +73,16 @@ type Auth struct { Password string `json:"password"` } +type WebDav struct { + TorrentsRefreshInterval string `json:"torrents_refresh_interval"` + DownloadLinksRefreshInterval string `json:"download_links_refresh_interval"` + Workers int `json:"workers"` + + RcUrl string `json:"rc_url"` + RcUser string `json:"rc_user"` + RcPass string `json:"rc_pass"` +} + type Config struct { LogLevel string `json:"log_level"` Debrid Debrid `json:"debrid"` @@ -76,6 +92,7 @@ type Config struct { QBitTorrent QBitTorrent `json:"qbittorrent"` Arrs []Arr `json:"arrs"` Repair Repair `json:"repair"` + WebDav WebDav `json:"webdav"` AllowedExt []string `json:"allowed_file_types"` MinFileSize string `json:"min_file_size"` // Minimum file size to download, 10MB, 1GB, etc MaxFileSize string `json:"max_file_size"` // Maximum file size to download (0 means no limit) @@ -286,3 +303,16 @@ func (c *Config) NeedsSetup() bool { } return false } + +func (c *Config) GetDebridWebDav(d Debrid) Debrid { + if d.TorrentRefreshInterval == "" { + d.TorrentRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "15s") // 15 seconds + } + if d.DownloadLinksRefreshInterval == "" { + d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes + } + if d.TorrentRefreshWorkers == 0 { + d.TorrentRefreshWorkers = cmp.Or(c.WebDav.Workers, 30) // 30 workers + } + return d +} diff --git a/internal/request/request.go b/internal/request/request.go index 2643d94..8659169 100644 --- a/internal/request/request.go +++ b/internal/request/request.go @@ -18,6 +18,7 @@ import ( "regexp" "strconv" "strings" + "sync" "time" ) @@ -40,6 +41,11 @@ func JoinURL(base string, paths ...string) (string, error) { return joined, nil } +var ( + once sync.Once + instance *Client +) + type ClientOption func(*Client) // Client represents an HTTP client with additional capabilities @@ -83,6 +89,11 @@ func (c *Client) WithLogger(logger zerolog.Logger) *Client { return c } +func (c *Client) WithTransport(transport *http.Transport) *Client { + c.client.Transport = transport + return c +} + // WithRetryableStatus adds status codes that should trigger a retry func (c *Client) WithRetryableStatus(statusCodes ...int) *Client { for _, code := range statusCodes { @@ -307,3 +318,10 @@ func Gzip(body []byte) []byte { } return b.Bytes() } + +func Default() *Client { + once.Do(func() { + instance = New() + }) + return instance +} diff --git a/pkg/debrid/debrid/cache.go b/pkg/debrid/debrid/cache.go index be51e9d..10c9a11 100644 --- a/pkg/debrid/debrid/cache.go +++ b/pkg/debrid/debrid/cache.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "github.com/goccy/go-json" + "github.com/puzpuzpuz/xsync/v3" "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/utils" @@ -44,16 +45,17 @@ type Cache struct { torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent listings atomic.Value downloadLinks map[string]string // key: file.Link, value: download link - PropfindResp sync.Map + PropfindResp *xsync.MapOf[string, PropfindResponse] - workers int - - LastUpdated time.Time `json:"last_updated"` + // config + workers int + torrentRefreshInterval time.Duration + downloadLinksRefreshInterval time.Duration // refresh mutex - listingRefreshMu sync.Mutex // for refreshing torrents - downloadLinksRefreshMu sync.Mutex // for refreshing download links - torrentsRefreshMu sync.Mutex // for refreshing torrents + listingRefreshMu sync.RWMutex // for refreshing torrents + downloadLinksRefreshMu sync.RWMutex // for refreshing download links + torrentsRefreshMu sync.RWMutex // for refreshing torrents // Data Mutexes torrentsMutex sync.RWMutex // for torrents and torrentsNames @@ -81,7 +83,7 @@ func (c *Cache) setTorrent(t *CachedTorrent) { c.torrentsNames[t.Name] = t c.torrentsMutex.Unlock() - tryLock(&c.listingRefreshMu, c.refreshListings) + c.refreshListings() go func() { if err := c.SaveTorrent(t); err != nil { @@ -106,7 +108,7 @@ func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) { c.torrentsMutex.Unlock() - tryLock(&c.listingRefreshMu, c.refreshListings) + c.refreshListings() go func() { if err := c.SaveTorrents(); err != nil { @@ -131,17 +133,27 @@ func (c *Cache) GetTorrentNames() map[string]*CachedTorrent { return c.torrentsNames } -func NewCache(client types.Client) *Cache { +func NewCache(dc config.Debrid, client types.Client) *Cache { cfg := config.GetConfig() - dbPath := filepath.Join(cfg.Path, "cache", client.GetName()) + torrentRefreshInterval, err := time.ParseDuration(dc.TorrentRefreshInterval) + if err != nil { + torrentRefreshInterval = time.Second * 15 + } + downloadLinksRefreshInterval, err := time.ParseDuration(dc.DownloadLinksRefreshInterval) + if err != nil { + downloadLinksRefreshInterval = time.Minute * 40 + } return &Cache{ - dir: dbPath, - torrents: make(map[string]*CachedTorrent), - torrentsNames: make(map[string]*CachedTorrent), - client: client, - logger: logger.NewLogger(fmt.Sprintf("%s-cache", client.GetName())), - workers: 200, - downloadLinks: make(map[string]string), + dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files + torrents: make(map[string]*CachedTorrent), + torrentsNames: make(map[string]*CachedTorrent), + client: client, + logger: logger.NewLogger(fmt.Sprintf("%s-cache", client.GetName())), + workers: 200, + downloadLinks: make(map[string]string), + torrentRefreshInterval: torrentRefreshInterval, + downloadLinksRefreshInterval: downloadLinksRefreshInterval, + PropfindResp: xsync.NewMapOf[string, PropfindResponse](), } } @@ -160,7 +172,7 @@ func (c *Cache) Start() error { c.downloadLinksRefreshMu.Lock() defer c.downloadLinksRefreshMu.Unlock() // This prevents the download links from being refreshed twice - tryLock(&c.downloadLinksRefreshMu, c.refreshDownloadLinks) + c.refreshDownloadLinks() }() go func() { @@ -462,7 +474,19 @@ func (c *Cache) GetClient() types.Client { return c.client } -func (c *Cache) DeleteTorrent(ids []string) { +func (c *Cache) DeleteTorrent(id string) { + c.logger.Info().Msgf("Deleting torrent %s", id) + c.torrentsMutex.Lock() + defer c.torrentsMutex.Unlock() + if t, ok := c.torrents[id]; ok { + delete(c.torrents, id) + delete(c.torrentsNames, t.Name) + + c.removeFromDB(id) + } +} + +func (c *Cache) DeleteTorrents(ids []string) { c.logger.Info().Msgf("Deleting %d torrents", len(ids)) c.torrentsMutex.Lock() defer c.torrentsMutex.Unlock() @@ -483,6 +507,6 @@ func (c *Cache) removeFromDB(torrentId string) { } func (c *Cache) OnRemove(torrentId string) { - go c.DeleteTorrent([]string{torrentId}) - go tryLock(&c.listingRefreshMu, c.refreshListings) + go c.DeleteTorrent(torrentId) + go c.refreshListings() } diff --git a/pkg/debrid/debrid/engine.go b/pkg/debrid/debrid/engine.go index 61ded2d..10f3f37 100644 --- a/pkg/debrid/debrid/engine.go +++ b/pkg/debrid/debrid/engine.go @@ -18,11 +18,16 @@ func NewEngine() *Engine { caches := make(map[string]*Cache) for _, dc := range cfg.Debrids { + dc = cfg.GetDebridWebDav(dc) client := createDebridClient(dc) logger := client.GetLogger() - logger.Info().Msg("Debrid Service started") + if dc.UseWebdav { + caches[dc.Name] = NewCache(dc, client) + logger.Info().Msg("Debrid Service started with WebDAV") + } else { + logger.Info().Msg("Debrid Service started") + } clients[dc.Name] = client - caches[dc.Name] = NewCache(client) } d := &Engine{ diff --git a/pkg/debrid/debrid/misc.go b/pkg/debrid/debrid/misc.go deleted file mode 100644 index beb6f04..0000000 --- a/pkg/debrid/debrid/misc.go +++ /dev/null @@ -1,10 +0,0 @@ -package debrid - -import "sync" - -func tryLock(mu *sync.Mutex, f func()) { - if mu.TryLock() { - defer mu.Unlock() - f() - } -} diff --git a/pkg/debrid/debrid/refresh.go b/pkg/debrid/debrid/refresh.go index 07d9752..9bfaefb 100644 --- a/pkg/debrid/debrid/refresh.go +++ b/pkg/debrid/debrid/refresh.go @@ -1,18 +1,26 @@ package debrid import ( - "bytes" "fmt" - "github.com/goccy/go-json" + "github.com/sirrobot01/debrid-blackhole/internal/config" + "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" + "io" "net/http" "os" + "slices" "sort" + "strings" "sync" "time" ) func (c *Cache) refreshListings() { + if c.listingRefreshMu.TryLock() { + defer c.listingRefreshMu.Unlock() + } else { + return + } // Copy the current torrents to avoid concurrent issues c.torrentsMutex.RLock() torrents := make([]string, 0, len(c.torrents)) @@ -47,6 +55,11 @@ func (c *Cache) refreshListings() { } func (c *Cache) refreshTorrents() { + if c.torrentsRefreshMu.TryLock() { + defer c.torrentsRefreshMu.Unlock() + } else { + return + } c.torrentsMutex.RLock() currentTorrents := c.torrents // // Create a copy of the current torrents to avoid concurrent issues @@ -69,12 +82,12 @@ func (c *Cache) refreshTorrents() { } // Get the newly added torrents only - newTorrents := make([]*types.Torrent, 0) + _newTorrents := make([]*types.Torrent, 0) idStore := make(map[string]bool, len(debTorrents)) for _, t := range debTorrents { idStore[t.Id] = true if _, ok := torrents[t.Id]; !ok { - newTorrents = append(newTorrents, t) + _newTorrents = append(_newTorrents, t) } } @@ -85,9 +98,15 @@ func (c *Cache) refreshTorrents() { deletedTorrents = append(deletedTorrents, id) } } + newTorrents := make([]*types.Torrent, 0) + for _, t := range _newTorrents { + if !slices.Contains(deletedTorrents, t.Id) { + _newTorrents = append(_newTorrents, t) + } + } if len(deletedTorrents) > 0 { - c.DeleteTorrent(deletedTorrents) + c.DeleteTorrents(deletedTorrents) } if len(newTorrents) == 0 { @@ -112,34 +131,37 @@ func (c *Cache) refreshTorrents() { } func (c *Cache) RefreshRclone() error { - params := map[string]interface{}{ - "recursive": "false", - } + client := request.Default() + cfg := config.GetConfig().WebDav - // Convert parameters to JSON - jsonParams, err := json.Marshal(params) + if cfg.RcUrl == "" { + return nil + } + // Create form data + data := "dir=__all__&dir2=torrents" + + // Create a POST request with form URL-encoded content + forgetReq, err := http.NewRequest("POST", fmt.Sprintf("%s/vfs/forget", cfg.RcUrl), strings.NewReader(data)) if err != nil { return err } - - // Create HTTP request - url := "http://192.168.0.219:9990/vfs/refresh" // Switch to config - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonParams)) - if err != nil { - return err + if cfg.RcUser != "" && cfg.RcPass != "" { + forgetReq.SetBasicAuth(cfg.RcUser, cfg.RcPass) } - // Set the appropriate headers - req.Header.Set("Content-Type", "application/json") + // Set the appropriate content type for form data + forgetReq.Header.Set("Content-Type", "application/x-www-form-urlencoded") // Send the request - client := &http.Client{} - resp, err := client.Do(req) + forgetResp, err := client.Do(forgetReq) if err != nil { return err } - if resp.StatusCode != 200 { - return fmt.Errorf("failed to refresh rclone: %s", resp.Status) + defer forgetResp.Body.Close() + + if forgetResp.StatusCode != 200 { + body, _ := io.ReadAll(forgetResp.Body) + return fmt.Errorf("failed to forget rclone: %s - %s", forgetResp.Status, string(body)) } return nil } @@ -166,6 +188,11 @@ func (c *Cache) refreshTorrent(t *CachedTorrent) *CachedTorrent { } func (c *Cache) refreshDownloadLinks() { + if c.downloadLinksRefreshMu.TryLock() { + defer c.downloadLinksRefreshMu.Unlock() + } else { + return + } c.downloadLinksMutex.Lock() defer c.downloadLinksMutex.Unlock() diff --git a/pkg/debrid/debrid/worker.go b/pkg/debrid/debrid/worker.go index a0de397..297ff65 100644 --- a/pkg/debrid/debrid/worker.go +++ b/pkg/debrid/debrid/worker.go @@ -11,25 +11,25 @@ func (c *Cache) Refresh() error { } func (c *Cache) refreshDownloadLinksWorker() { - refreshTicker := time.NewTicker(40 * time.Minute) + refreshTicker := time.NewTicker(c.downloadLinksRefreshInterval) defer refreshTicker.Stop() for { select { case <-refreshTicker.C: - tryLock(&c.downloadLinksRefreshMu, c.refreshDownloadLinks) + c.refreshDownloadLinks() } } } func (c *Cache) refreshTorrentsWorker() { - refreshTicker := time.NewTicker(5 * time.Second) + refreshTicker := time.NewTicker(c.torrentRefreshInterval) defer refreshTicker.Stop() for { select { case <-refreshTicker.C: - tryLock(&c.torrentsRefreshMu, c.refreshTorrents) + c.refreshTorrents() } } } diff --git a/pkg/debrid/debrid/xml.go b/pkg/debrid/debrid/xml.go index be3938b..749b72a 100644 --- a/pkg/debrid/debrid/xml.go +++ b/pkg/debrid/debrid/xml.go @@ -6,14 +6,16 @@ import ( "github.com/sirrobot01/debrid-blackhole/internal/request" "net/http" "net/url" + "os" path "path/filepath" "time" ) func (c *Cache) RefreshXml() error { parents := []string{"__all__", "torrents"} + torrents := c.GetListing() for _, parent := range parents { - if err := c.refreshParentXml(parent); err != nil { + if err := c.refreshParentXml(torrents, parent); err != nil { return fmt.Errorf("failed to refresh XML for %s: %v", parent, err) } } @@ -22,7 +24,7 @@ func (c *Cache) RefreshXml() error { return nil } -func (c *Cache) refreshParentXml(parent string) error { +func (c *Cache) refreshParentXml(torrents []os.FileInfo, parent string) error { // Define the WebDAV namespace davNS := "DAV:" @@ -37,20 +39,21 @@ func (c *Cache) refreshParentXml(parent string) error { currentTime := time.Now().UTC().Format(http.TimeFormat) // Add the parent directory - parentPath := fmt.Sprintf("/webdav/%s/%s/", c.client.GetName(), parent) + baseUrl := path.Clean(fmt.Sprintf("/webdav/%s/%s", c.client.GetName(), parent)) + parentPath := fmt.Sprintf("%s/", baseUrl) addDirectoryResponse(multistatus, parentPath, parent, currentTime) // Add torrents to the XML - torrents := c.GetListing() for _, torrent := range torrents { - torrentName := torrent.Name() + name := torrent.Name() + // Note the path structure change - parent first, then torrent name torrentPath := fmt.Sprintf("/webdav/%s/%s/%s/", c.client.GetName(), - url.PathEscape(torrentName), parent, + url.PathEscape(name), ) - addDirectoryResponse(multistatus, torrentPath, torrentName, currentTime) + addDirectoryResponse(multistatus, torrentPath, name, currentTime) } // Convert to XML string @@ -60,8 +63,6 @@ func (c *Cache) refreshParentXml(parent string) error { } // Store in cache - // Construct the keys - baseUrl := path.Clean(fmt.Sprintf("/webdav/%s/%s", c.client.GetName())) key0 := fmt.Sprintf("propfind:%s:0", baseUrl) key1 := fmt.Sprintf("propfind:%s:1", baseUrl) @@ -78,7 +79,7 @@ func (c *Cache) refreshParentXml(parent string) error { func addDirectoryResponse(multistatus *etree.Element, href, displayName, modTime string) *etree.Element { responseElem := multistatus.CreateElement("D:response") - // Add href + // Add href - ensure it's properly formatted hrefElem := responseElem.CreateElement("D:href") hrefElem.SetText(href) @@ -100,6 +101,14 @@ func addDirectoryResponse(multistatus *etree.Element, href, displayName, modTime lastModElem := propElem.CreateElement("D:getlastmodified") lastModElem.SetText(modTime) + // Add content type for directories + contentTypeElem := propElem.CreateElement("D:getcontenttype") + contentTypeElem.SetText("httpd/unix-directory") + + // Add length (size) - directories typically have zero size + contentLengthElem := propElem.CreateElement("D:getcontentlength") + contentLengthElem.SetText("0") + // Add supported lock lockElem := propElem.CreateElement("D:supportedlock") lockEntryElem := lockElem.CreateElement("D:lockentry") diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index b1cdc8c..2193ec3 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -116,9 +116,6 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr if err != nil { return } - if err := cache.RefreshRclone(); err != nil { - q.logger.Trace().Msgf("Error refreshing rclone: %v", err) - } rclonePath := filepath.Join(debridTorrent.MountPath, debridTorrent.Name) torrentSymlinkPath, err = q.createSymlinks(debridTorrent, rclonePath, debridTorrent.Name) diff --git a/pkg/web/web/download.html b/pkg/web/web/download.html index 62dadf2..d2a2c1d 100644 --- a/pkg/web/web/download.html +++ b/pkg/web/web/download.html @@ -127,8 +127,8 @@ } } else { createToast(`Successfully added ${result.results.length} torrents!`); - document.getElementById('magnetURI').value = ''; - document.getElementById('torrentFiles').value = ''; + //document.getElementById('magnetURI').value = ''; + //document.getElementById('torrentFiles').value = ''; } } catch (error) { createToast(`Error adding downloads: ${error.message}`, 'error'); diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index c4c3954..ed14b41 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "github.com/rs/zerolog" + "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "golang.org/x/net/webdav" @@ -23,15 +24,12 @@ import ( ) type Handler struct { - Name string - logger zerolog.Logger - cache *debrid.Cache - lastRefresh time.Time - refreshMutex sync.Mutex - RootPath string - responseCache sync.Map - cacheTTL time.Duration - ctx context.Context + Name string + logger zerolog.Logger + cache *debrid.Cache + lastRefresh time.Time + refreshMutex sync.Mutex + RootPath string } func NewHandler(name string, cache *debrid.Cache, logger zerolog.Logger) *Handler { @@ -40,7 +38,6 @@ func NewHandler(name string, cache *debrid.Cache, logger zerolog.Logger) *Handle cache: cache, logger: logger, RootPath: fmt.Sprintf("/%s", name), - ctx: context.Background(), } return h } @@ -278,21 +275,31 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } handler.ServeHTTP(responseRecorder, r) responseData := responseRecorder.Body.Bytes() + gzippedData := request.Gzip(responseData) // Create compressed version - //h.cache.PropfindResp.Store(cacheKey, debrid.PropfindResponse{ - // Data: responseData, - // GzippedData: request.Gzip(responseData), - // Ts: time.Now(), - //}) + h.cache.PropfindResp.Store(cacheKey, debrid.PropfindResponse{ + Data: responseData, + GzippedData: gzippedData, + Ts: time.Now(), + }) // Forward the captured response to the client. for k, v := range responseRecorder.Header() { w.Header()[k] = v } w.WriteHeader(responseRecorder.Code) - w.Write(responseData) + + if acceptsGzip(r) { + w.Header().Set("Content-Encoding", "gzip") + w.Header().Set("Vary", "Accept-Encoding") + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(gzippedData))) + w.Write(gzippedData) + } else { + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(responseData))) + w.Write(responseData) + } return } @@ -394,12 +401,7 @@ func (h *Handler) isParentPath(_path string) bool { } func (h *Handler) serveFromCacheIfValid(w http.ResponseWriter, r *http.Request, cacheKey string, ttl time.Duration) bool { - cached, ok := h.cache.PropfindResp.Load(cacheKey) - if !ok { - return false - } - - respCache, ok := cached.(debrid.PropfindResponse) + respCache, ok := h.cache.PropfindResp.Load(cacheKey) if !ok { return false } From 49875446b44a293d1e0c652d20210ccab49261af Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sat, 22 Mar 2025 00:30:00 +0100 Subject: [PATCH 10/39] Fix header writing --- doc/config.full.json | 3 ++- pkg/webdav/handler.go | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/doc/config.full.json b/doc/config.full.json index f280936..8b91ffa 100644 --- a/doc/config.full.json +++ b/doc/config.full.json @@ -94,6 +94,7 @@ "discord_webhook_url": "https://discord.com/api/webhooks/...", "webdav": { "torrents_refresh_interval": "5m", - "download_links_refresh_interval": "1h" + "download_links_refresh_interval": "1h", + "rc_url": "http://192.168.0.219:9990" } } \ No newline at end of file diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index ed14b41..fafcfc5 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -289,15 +289,16 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { for k, v := range responseRecorder.Header() { w.Header()[k] = v } - w.WriteHeader(responseRecorder.Code) if acceptsGzip(r) { w.Header().Set("Content-Encoding", "gzip") w.Header().Set("Vary", "Accept-Encoding") w.Header().Set("Content-Length", fmt.Sprintf("%d", len(gzippedData))) + w.WriteHeader(responseRecorder.Code) w.Write(gzippedData) } else { w.Header().Set("Content-Length", fmt.Sprintf("%d", len(responseData))) + w.WriteHeader(responseRecorder.Code) w.Write(responseData) } return @@ -416,9 +417,11 @@ func (h *Handler) serveFromCacheIfValid(w http.ResponseWriter, r *http.Request, w.Header().Set("Content-Encoding", "gzip") w.Header().Set("Vary", "Accept-Encoding") w.Header().Set("Content-Length", fmt.Sprintf("%d", len(respCache.GzippedData))) + w.WriteHeader(http.StatusOK) w.Write(respCache.GzippedData) } else { w.Header().Set("Content-Length", fmt.Sprintf("%d", len(respCache.Data))) + w.WriteHeader(http.StatusOK) w.Write(respCache.Data) } return true From e2f792d5ab9b7a9ce386065d408fa8c75a87877e Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sat, 22 Mar 2025 06:05:53 +0100 Subject: [PATCH 11/39] hotfix xml --- doc/config.full.json | 6 ++++-- pkg/debrid/debrid/xml.go | 1 - 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/config.full.json b/doc/config.full.json index 8b91ffa..8ef74a3 100644 --- a/doc/config.full.json +++ b/doc/config.full.json @@ -93,8 +93,10 @@ "use_auth": false, "discord_webhook_url": "https://discord.com/api/webhooks/...", "webdav": { - "torrents_refresh_interval": "5m", + "torrents_refresh_interval": "15s", "download_links_refresh_interval": "1h", - "rc_url": "http://192.168.0.219:9990" + "rc_url": "http://192.168.0.219:9990", + "rc_user": "your_rclone_rc_user", + "rc_pass": "your_rclone_rc_pass" } } \ No newline at end of file diff --git a/pkg/debrid/debrid/xml.go b/pkg/debrid/debrid/xml.go index 749b72a..80dfcce 100644 --- a/pkg/debrid/debrid/xml.go +++ b/pkg/debrid/debrid/xml.go @@ -19,7 +19,6 @@ func (c *Cache) RefreshXml() error { return fmt.Errorf("failed to refresh XML for %s: %v", parent, err) } } - c.logger.Debug().Msgf("Refreshed XML cache for %s", c.client.GetName()) return nil } From 8c13da5d308a7e52a29c096c773726c91453ff22 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sun, 23 Mar 2025 09:32:19 +0100 Subject: [PATCH 12/39] Improve streaming --- pkg/debrid/realdebrid/realdebrid.go | 4 +++ pkg/webdav/file.go | 44 ++++++----------------------- 2 files changed, 13 insertions(+), 35 deletions(-) diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index 8838ab9..0f268e7 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -357,6 +357,10 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, return 0, torrents, err } + if resp.StatusCode == http.StatusNoContent { + return 0, torrents, nil + } + if resp.StatusCode != http.StatusOK { resp.Body.Close() return 0, torrents, fmt.Errorf("realdebrid API error: %d", resp.StatusCode) diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index ca5d596..ba170cd 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -1,7 +1,6 @@ package webdav import ( - "bufio" "fmt" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "io" @@ -12,11 +11,13 @@ import ( var sharedClient = &http.Client{ Transport: &http.Transport{ - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - DisableCompression: false, - DisableKeepAlives: false, - Proxy: http.ProxyFromEnvironment, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + ResponseHeaderTimeout: 30 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableCompression: false, // Enable compression for faster transfers + DisableKeepAlives: false, + Proxy: http.ProxyFromEnvironment, }, Timeout: 0, } @@ -40,24 +41,6 @@ type File struct { link string } -type bufferedReadCloser struct { - *bufio.Reader - closer io.Closer -} - -// Create a new bufferedReadCloser with a larger buffer -func newBufferedReadCloser(rc io.ReadCloser) *bufferedReadCloser { - return &bufferedReadCloser{ - Reader: bufio.NewReaderSize(rc, 64*1024), // Increase to 1MB buffer - closer: rc, - } -} - -// Close implements ReadCloser interface -func (brc *bufferedReadCloser) Close() error { - return brc.closer.Close() -} - // File interface implementations for File func (f *File) Close() error { @@ -103,7 +86,6 @@ func (f *File) Read(p []byte) (n int, err error) { // If we haven't started streaming the file yet or need to reposition if f.reader == nil || f.seekPending { - // Close existing reader if we're repositioning if f.reader != nil && f.seekPending { f.reader.Close() f.reader = nil @@ -114,39 +96,31 @@ func (f *File) Read(p []byte) (n int, err error) { return 0, fmt.Errorf("failed to get download link for file") } - // Create an HTTP GET request to the file's URL. req, err := http.NewRequest("GET", downloadLink, nil) if err != nil { return 0, fmt.Errorf("failed to create HTTP request: %w", err) } - // Request only the bytes starting from our current offset if f.offset > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset)) } - // Add important headers for streaming + // Set headers as needed req.Header.Set("Connection", "keep-alive") req.Header.Set("Accept", "*/*") - req.Header.Set("User-Agent", "Infuse/7.0.2 (iOS)") - req.Header.Set("Accept-Encoding", "gzip, deflate, br") resp, err := sharedClient.Do(req) if err != nil { return 0, fmt.Errorf("HTTP request error: %w", err) } - - // Check response codes more carefully if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { resp.Body.Close() return 0, fmt.Errorf("unexpected HTTP status: %d", resp.StatusCode) } - - f.reader = newBufferedReadCloser(resp.Body) + f.reader = resp.Body f.seekPending = false } - // Read data from the HTTP stream. n, err = f.reader.Read(p) f.offset += int64(n) From 9469c98df73a2f72d66de1fefb7f7751a6cba573 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Mon, 24 Mar 2025 12:12:38 +0100 Subject: [PATCH 13/39] Add support for different folder naming; minor bug fixes --- internal/config/config.go | 8 ++ internal/utils/regex.go | 1 - pkg/debrid/alldebrid/alldebrid.go | 48 ++++++-- pkg/debrid/alldebrid/types.go | 8 ++ pkg/debrid/debrid/cache.go | 167 ++++++++++++++------------ pkg/debrid/debrid/refresh.go | 23 +++- pkg/debrid/debrid/xml.go | 5 +- pkg/debrid/debrid_link/debrid_link.go | 76 +++++++++++- pkg/debrid/realdebrid/realdebrid.go | 51 ++------ pkg/debrid/torbox/torbox.go | 16 +-- pkg/debrid/types/debrid.go | 3 +- pkg/qbit/torrent.go | 2 +- pkg/webdav/file.go | 1 - pkg/webdav/handler.go | 50 +------- 14 files changed, 249 insertions(+), 210 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 375b43e..042877a 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -30,6 +30,7 @@ type Debrid struct { TorrentRefreshInterval string `json:"torrent_refresh_interval"` DownloadLinksRefreshInterval string `json:"downloads_refresh_interval"` TorrentRefreshWorkers int `json:"torrent_refresh_workers"` + WebDavFolderNaming string `json:"webdav_folder_naming"` } type Proxy struct { @@ -78,6 +79,10 @@ type WebDav struct { DownloadLinksRefreshInterval string `json:"download_links_refresh_interval"` Workers int `json:"workers"` + // Folder + FolderNaming string `json:"folder_naming"` + + // Rclone RcUrl string `json:"rc_url"` RcUser string `json:"rc_user"` RcPass string `json:"rc_pass"` @@ -314,5 +319,8 @@ func (c *Config) GetDebridWebDav(d Debrid) Debrid { if d.TorrentRefreshWorkers == 0 { d.TorrentRefreshWorkers = cmp.Or(c.WebDav.Workers, 30) // 30 workers } + if d.WebDavFolderNaming == "" { + d.WebDavFolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext") + } return d } diff --git a/internal/utils/regex.go b/internal/utils/regex.go index 2ac0722..ae4fd8d 100644 --- a/internal/utils/regex.go +++ b/internal/utils/regex.go @@ -37,7 +37,6 @@ func RemoveInvalidChars(value string) string { } func RemoveExtension(value string) string { - value = RemoveInvalidChars(value) re := regexp.MustCompile(VIDEOMATCH + "|" + MUSICMATCH) // Find the last index of the matched extension diff --git a/pkg/debrid/alldebrid/alldebrid.go b/pkg/debrid/alldebrid/alldebrid.go index 0ee2708..a24b63d 100644 --- a/pkg/debrid/alldebrid/alldebrid.go +++ b/pkg/debrid/alldebrid/alldebrid.go @@ -128,6 +128,7 @@ func flattenFiles(files []MagnetFile, parentPath string, index *int) map[string] Name: fileName, Size: f.Size, Path: currentPath, + Link: f.Link, } result[file.Name] = file } @@ -239,7 +240,7 @@ func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error { return nil } -func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) *types.File { +func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string, error) { url := fmt.Sprintf("%s/link/unlock", ad.Host) query := gourl.Values{} query.Add("link", file.Link) @@ -247,16 +248,17 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) *types. req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := ad.client.MakeRequest(req) if err != nil { - return nil + return "", err } var data DownloadLink if err = json.Unmarshal(resp, &data); err != nil { - return nil + return "", err } link := data.Data.Link - file.DownloadLink = link - file.Generated = time.Now() - return file + if link == "" { + return "", fmt.Errorf("error getting download links %s", data.Error.Message) + } + return link, nil } func (ad *AllDebrid) GetCheckCached() bool { @@ -264,7 +266,35 @@ func (ad *AllDebrid) GetCheckCached() bool { } func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) { - return nil, nil + url := fmt.Sprintf("%s/magnet/status?status=ready", ad.Host) + req, _ := http.NewRequest(http.MethodGet, url, nil) + resp, err := ad.client.MakeRequest(req) + torrents := make([]*types.Torrent, 0) + if err != nil { + return torrents, err + } + var res TorrentsListResponse + err = json.Unmarshal(resp, &res) + if err != nil { + ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err) + return torrents, err + } + for _, magnet := range res.Data.Magnets { + torrents = append(torrents, &types.Torrent{ + Id: strconv.Itoa(magnet.Id), + Name: magnet.Filename, + Bytes: magnet.Size, + Status: getAlldebridStatus(magnet.StatusCode), + Filename: magnet.Filename, + OriginalFilename: magnet.Filename, + Files: make(map[string]types.File), + InfoHash: magnet.Hash, + Debrid: ad.Name, + MountPath: ad.MountPath, + }) + } + + return torrents, nil } func (ad *AllDebrid) GetDownloads() (map[string]types.DownloadLinks, error) { @@ -279,10 +309,6 @@ func (ad *AllDebrid) GetDownloadUncached() bool { return ad.DownloadUncached } -func (ad *AllDebrid) ConvertLinksToFiles(links []string) []types.File { - return nil -} - func New(dc config.Debrid) *AllDebrid { rl := request.ParseRateLimit(dc.RateLimit) headers := map[string]string{ diff --git a/pkg/debrid/alldebrid/types.go b/pkg/debrid/alldebrid/types.go index aa489c0..4d88af5 100644 --- a/pkg/debrid/alldebrid/types.go +++ b/pkg/debrid/alldebrid/types.go @@ -40,6 +40,14 @@ type TorrentInfoResponse struct { Error *errorResponse `json:"error"` } +type TorrentsListResponse struct { + Status string `json:"status"` + Data struct { + Magnets []magnetInfo `json:"magnets"` + } `json:"data"` + Error *errorResponse `json:"error"` +} + type UploadMagnetResponse struct { Status string `json:"status"` Data struct { diff --git a/pkg/debrid/debrid/cache.go b/pkg/debrid/debrid/cache.go index 10c9a11..7c52fa6 100644 --- a/pkg/debrid/debrid/cache.go +++ b/pkg/debrid/debrid/cache.go @@ -7,6 +7,7 @@ import ( "github.com/goccy/go-json" "github.com/puzpuzpuz/xsync/v3" "github.com/rs/zerolog" + "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" @@ -16,8 +17,14 @@ import ( "sync" "sync/atomic" "time" +) - "github.com/sirrobot01/debrid-blackhole/internal/config" +type WebDavFolderNaming string + +const ( + WebDavUseOriginalName WebDavFolderNaming = "original" + WebDavUseID WebDavFolderNaming = "use_id" + WebDavUseOriginalNameNoExt WebDavFolderNaming = "original_no_ext" ) type DownloadLinkCache struct { @@ -46,6 +53,7 @@ type Cache struct { listings atomic.Value downloadLinks map[string]string // key: file.Link, value: download link PropfindResp *xsync.MapOf[string, PropfindResponse] + folderNaming WebDavFolderNaming // config workers int @@ -62,77 +70,6 @@ type Cache struct { downloadLinksMutex sync.Mutex // for downloadLinks } -type fileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -func (fi *fileInfo) Name() string { return fi.name } -func (fi *fileInfo) Size() int64 { return fi.size } -func (fi *fileInfo) Mode() os.FileMode { return fi.mode } -func (fi *fileInfo) ModTime() time.Time { return fi.modTime } -func (fi *fileInfo) IsDir() bool { return fi.isDir } -func (fi *fileInfo) Sys() interface{} { return nil } - -func (c *Cache) setTorrent(t *CachedTorrent) { - c.torrentsMutex.Lock() - c.torrents[t.Id] = t - c.torrentsNames[t.Name] = t - c.torrentsMutex.Unlock() - - c.refreshListings() - - go func() { - if err := c.SaveTorrent(t); err != nil { - c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id) - } - }() -} - -func (c *Cache) GetListing() []os.FileInfo { - if v, ok := c.listings.Load().([]os.FileInfo); ok { - return v - } - return nil -} - -func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) { - c.torrentsMutex.Lock() - for _, t := range torrents { - c.torrents[t.Id] = t - c.torrentsNames[t.Name] = t - } - - c.torrentsMutex.Unlock() - - c.refreshListings() - - go func() { - if err := c.SaveTorrents(); err != nil { - c.logger.Debug().Err(err).Msgf("Failed to save torrents") - } - }() -} - -func (c *Cache) GetTorrents() map[string]*CachedTorrent { - c.torrentsMutex.RLock() - defer c.torrentsMutex.RUnlock() - result := make(map[string]*CachedTorrent, len(c.torrents)) - for k, v := range c.torrents { - result[k] = v - } - return result -} - -func (c *Cache) GetTorrentNames() map[string]*CachedTorrent { - c.torrentsMutex.RLock() - defer c.torrentsMutex.RUnlock() - return c.torrentsNames -} - func NewCache(dc config.Debrid, client types.Client) *Cache { cfg := config.GetConfig() torrentRefreshInterval, err := time.ParseDuration(dc.TorrentRefreshInterval) @@ -154,9 +91,77 @@ func NewCache(dc config.Debrid, client types.Client) *Cache { torrentRefreshInterval: torrentRefreshInterval, downloadLinksRefreshInterval: downloadLinksRefreshInterval, PropfindResp: xsync.NewMapOf[string, PropfindResponse](), + folderNaming: WebDavFolderNaming(dc.WebDavFolderNaming), } } +func (c *Cache) GetTorrentFolder(torrent *types.Torrent) string { + folderName := torrent.Name + if c.folderNaming == WebDavUseID { + folderName = torrent.Id + } else if c.folderNaming == WebDavUseOriginalNameNoExt { + folderName = utils.RemoveExtension(torrent.Name) + } + return folderName +} + +func (c *Cache) setTorrent(t *CachedTorrent) { + c.torrentsMutex.Lock() + c.torrents[t.Id] = t + + c.torrentsNames[c.GetTorrentFolder(t.Torrent)] = t + c.torrentsMutex.Unlock() + + c.refreshListings() + + go func() { + if err := c.SaveTorrent(t); err != nil { + c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id) + } + }() +} + +func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) { + c.torrentsMutex.Lock() + for _, t := range torrents { + c.torrents[t.Id] = t + c.torrentsNames[c.GetTorrentFolder(t.Torrent)] = t + } + + c.torrentsMutex.Unlock() + + c.refreshListings() + + go func() { + if err := c.SaveTorrents(); err != nil { + c.logger.Debug().Err(err).Msgf("Failed to save torrents") + } + }() +} + +func (c *Cache) GetListing() []os.FileInfo { + if v, ok := c.listings.Load().([]os.FileInfo); ok { + return v + } + return nil +} + +func (c *Cache) GetTorrents() map[string]*CachedTorrent { + c.torrentsMutex.RLock() + defer c.torrentsMutex.RUnlock() + result := make(map[string]*CachedTorrent, len(c.torrents)) + for k, v := range c.torrents { + result[k] = v + } + return result +} + +func (c *Cache) GetTorrentNames() map[string]*CachedTorrent { + c.torrentsMutex.RLock() + defer c.torrentsMutex.RUnlock() + return c.torrentsNames +} + func (c *Cache) Start() error { if err := os.MkdirAll(c.dir, 0755); err != nil { return fmt.Errorf("failed to create cache directory: %w", err) @@ -220,7 +225,6 @@ func (c *Cache) load() (map[string]*CachedTorrent, error) { if len(ct.Files) != 0 { // We can assume the torrent is complete ct.IsComplete = true - ct.Torrent.Name = utils.RemoveExtension(ct.Torrent.OriginalFilename) // Update the name torrents[ct.Id] = &ct } } @@ -445,19 +449,21 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string { } c.logger.Trace().Msgf("Getting download link for %s", ct.Name) - f := c.client.GetDownloadLink(ct.Torrent, &file) - if f == nil { + link, err := c.client.GetDownloadLink(ct.Torrent, &file) + if err != nil { + c.logger.Error().Err(err).Msg("Failed to get download link") return "" } - file.DownloadLink = f.DownloadLink + file.DownloadLink = link + file.Generated = time.Now() ct.Files[filename] = file - go c.updateDownloadLink(f) + go c.updateDownloadLink(file) go c.setTorrent(ct) - return f.DownloadLink + return file.DownloadLink } -func (c *Cache) updateDownloadLink(file *types.File) { +func (c *Cache) updateDownloadLink(file types.File) { c.downloadLinksMutex.Lock() defer c.downloadLinksMutex.Unlock() c.downloadLinks[file.Link] = file.DownloadLink @@ -493,7 +499,7 @@ func (c *Cache) DeleteTorrents(ids []string) { for _, id := range ids { if t, ok := c.torrents[id]; ok { delete(c.torrents, id) - delete(c.torrentsNames, t.Name) + delete(c.torrentsNames, c.GetTorrentFolder(t.Torrent)) c.removeFromDB(id) } } @@ -507,6 +513,7 @@ func (c *Cache) removeFromDB(torrentId string) { } func (c *Cache) OnRemove(torrentId string) { + c.logger.Debug().Msgf("OnRemove triggered for %s", torrentId) go c.DeleteTorrent(torrentId) go c.refreshListings() } diff --git a/pkg/debrid/debrid/refresh.go b/pkg/debrid/debrid/refresh.go index 9bfaefb..66d3db1 100644 --- a/pkg/debrid/debrid/refresh.go +++ b/pkg/debrid/debrid/refresh.go @@ -15,6 +15,21 @@ import ( "time" ) +type fileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +func (fi *fileInfo) Name() string { return fi.name } +func (fi *fileInfo) Size() int64 { return fi.size } +func (fi *fileInfo) Mode() os.FileMode { return fi.mode } +func (fi *fileInfo) ModTime() time.Time { return fi.modTime } +func (fi *fileInfo) IsDir() bool { return fi.isDir } +func (fi *fileInfo) Sys() interface{} { return nil } + func (c *Cache) refreshListings() { if c.listingRefreshMu.TryLock() { defer c.listingRefreshMu.Unlock() @@ -23,11 +38,9 @@ func (c *Cache) refreshListings() { } // Copy the current torrents to avoid concurrent issues c.torrentsMutex.RLock() - torrents := make([]string, 0, len(c.torrents)) - for _, t := range c.torrents { - if t != nil && t.Torrent != nil { - torrents = append(torrents, t.Name) - } + torrents := make([]string, 0, len(c.torrentsNames)) + for k, _ := range c.torrentsNames { + torrents = append(torrents, k) } c.torrentsMutex.RUnlock() diff --git a/pkg/debrid/debrid/xml.go b/pkg/debrid/debrid/xml.go index 80dfcce..c1fde10 100644 --- a/pkg/debrid/debrid/xml.go +++ b/pkg/debrid/debrid/xml.go @@ -5,7 +5,6 @@ import ( "github.com/beevik/etree" "github.com/sirrobot01/debrid-blackhole/internal/request" "net/http" - "net/url" "os" path "path/filepath" "time" @@ -19,7 +18,7 @@ func (c *Cache) RefreshXml() error { return fmt.Errorf("failed to refresh XML for %s: %v", parent, err) } } - c.logger.Debug().Msgf("Refreshed XML cache for %s", c.client.GetName()) + c.logger.Trace().Msgf("Refreshed XML cache for %s", c.client.GetName()) return nil } @@ -49,7 +48,7 @@ func (c *Cache) refreshParentXml(torrents []os.FileInfo, parent string) error { torrentPath := fmt.Sprintf("/webdav/%s/%s/%s/", c.client.GetName(), parent, - url.PathEscape(name), + name, ) addDirectoryResponse(multistatus, torrentPath, name, currentTime) diff --git a/pkg/debrid/debrid_link/debrid_link.go b/pkg/debrid/debrid_link/debrid_link.go index 8027be4..ae5bb3c 100644 --- a/pkg/debrid/debrid_link/debrid_link.go +++ b/pkg/debrid/debrid_link/debrid_link.go @@ -242,8 +242,8 @@ func (dl *DebridLink) GetDownloads() (map[string]types.DownloadLinks, error) { return nil, nil } -func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) *types.File { - return file +func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (string, error) { + return file.DownloadLink, nil } func (dl *DebridLink) GetDownloadingStatus() []string { @@ -281,9 +281,75 @@ func New(dc config.Debrid) *DebridLink { } func (dl *DebridLink) GetTorrents() ([]*types.Torrent, error) { - return nil, nil + page := 0 + perPage := 100 + torrents := make([]*types.Torrent, 0) + for { + t, err := dl.getTorrents(page, perPage) + if err != nil { + break + } + if len(t) == 0 { + break + } + torrents = append(torrents, t...) + page++ + } + return torrents, nil } -func (dl *DebridLink) ConvertLinksToFiles(links []string) []types.File { - return nil +func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) { + url := fmt.Sprintf("%s/seedbox/list?page=%d&perPage=%d", dl.Host, page, perPage) + req, _ := http.NewRequest(http.MethodGet, url, nil) + resp, err := dl.client.MakeRequest(req) + torrents := make([]*types.Torrent, 0) + if err != nil { + return torrents, err + } + var res TorrentInfo + err = json.Unmarshal(resp, &res) + if err != nil { + dl.logger.Info().Msgf("Error unmarshalling torrent info: %s", err) + return torrents, err + } + + data := *res.Value + + if len(data) == 0 { + return torrents, nil + } + for _, t := range data { + if t.Status != 100 { + continue + } + torrent := &types.Torrent{ + Id: t.ID, + Name: t.Name, + Bytes: t.TotalSize, + Status: "downloaded", + Filename: t.Name, + OriginalFilename: t.Name, + InfoHash: t.HashString, + Files: make(map[string]types.File), + Debrid: dl.Name, + MountPath: dl.MountPath, + } + cfg := config.GetConfig() + for _, f := range t.Files { + if !cfg.IsSizeAllowed(f.Size) { + continue + } + file := types.File{ + Id: f.ID, + Name: f.Name, + Size: f.Size, + Path: f.Name, + DownloadLink: f.DownloadURL, + Link: f.DownloadURL, + } + torrent.Files[f.Name] = file + } + torrents = append(torrents, torrent) + } + return torrents, nil } diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index 0f268e7..d4027ec 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -173,16 +173,15 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error { if err != nil { return err } - name := utils.RemoveExtension(data.OriginalFilename) - t.Name = name + t.Name = data.OriginalFilename t.Bytes = data.Bytes - t.Folder = name + t.Folder = data.OriginalFilename t.Progress = data.Progress t.Status = data.Status t.Speed = data.Speed t.Seeders = data.Seeders t.Filename = data.Filename - t.OriginalFilename = name + t.OriginalFilename = data.OriginalFilename t.Links = data.Links t.MountPath = r.MountPath t.Debrid = r.Name @@ -204,11 +203,10 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre return t, err } status := data.Status - name := utils.RemoveExtension(data.OriginalFilename) - t.Name = name // Important because some magnet changes the name - t.Folder = name + t.Name = data.Filename // Important because some magnet changes the name + t.Folder = data.OriginalFilename t.Filename = data.Filename - t.OriginalFilename = name + t.OriginalFilename = data.OriginalFilename t.Bytes = data.Bytes t.Progress = data.Progress t.Speed = data.Speed @@ -294,34 +292,7 @@ func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error { return nil } -func (r *RealDebrid) ConvertLinksToFiles(links []string) []types.File { - files := make([]types.File, 0) - for _, l := range links { - url := fmt.Sprintf("%s/unrestrict/link/", r.Host) - payload := gourl.Values{ - "link": {l}, - } - req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) - resp, err := r.client.MakeRequest(req) - if err != nil { - continue - } - var data UnrestrictResponse - if err = json.Unmarshal(resp, &data); err != nil { - continue - } - files = append(files, types.File{ - Name: data.Filename, - Size: data.Filesize, - Link: l, - DownloadLink: data.Download, - Generated: time.Now(), - }) - } - return files -} - -func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) *types.File { +func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string, error) { url := fmt.Sprintf("%s/unrestrict/link/", r.Host) payload := gourl.Values{ "link": {file.Link}, @@ -329,15 +300,13 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) *types. req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) resp, err := r.client.MakeRequest(req) if err != nil { - return nil + return "", err } var data UnrestrictResponse if err = json.Unmarshal(resp, &data); err != nil { - return nil + return "", err } - file.DownloadLink = data.Download - file.Generated = time.Now() - return file + return data.Download, nil } func (r *RealDebrid) GetCheckCached() bool { diff --git a/pkg/debrid/torbox/torbox.go b/pkg/debrid/torbox/torbox.go index 9c5e31e..91ff0ba 100644 --- a/pkg/debrid/torbox/torbox.go +++ b/pkg/debrid/torbox/torbox.go @@ -273,7 +273,7 @@ func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error { return nil } -func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) *types.File { +func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (string, error) { url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host) query := gourl.Values{} query.Add("torrent_id", t.Id) @@ -283,19 +283,17 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) *types.Fil req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := tb.client.MakeRequest(req) if err != nil { - return nil + return "", err } var data DownloadLinksResponse if err = json.Unmarshal(resp, &data); err != nil { - return nil + return "", err } if data.Data == nil { - return nil + return "", fmt.Errorf("error getting download links") } link := *data.Data - file.DownloadLink = link - file.Generated = time.Now() - return file + return link, nil } func (tb *Torbox) GetDownloadingStatus() []string { @@ -336,10 +334,6 @@ func New(dc config.Debrid) *Torbox { } } -func (tb *Torbox) ConvertLinksToFiles(links []string) []types.File { - return nil -} - func (tb *Torbox) GetDownloads() (map[string]types.DownloadLinks, error) { return nil, nil } diff --git a/pkg/debrid/types/debrid.go b/pkg/debrid/types/debrid.go index e1adf4d..7bfb873 100644 --- a/pkg/debrid/types/debrid.go +++ b/pkg/debrid/types/debrid.go @@ -8,8 +8,7 @@ type Client interface { SubmitMagnet(tr *Torrent) (*Torrent, error) CheckStatus(tr *Torrent, isSymlink bool) (*Torrent, error) GenerateDownloadLinks(tr *Torrent) error - GetDownloadLink(tr *Torrent, file *File) *File - ConvertLinksToFiles(links []string) []File + GetDownloadLink(tr *Torrent, file *File) (string, error) DeleteTorrent(torrentId string) IsAvailable(infohashes []string) map[string]bool GetCheckCached() bool diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index 2193ec3..56c5b11 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -116,7 +116,7 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr if err != nil { return } - rclonePath := filepath.Join(debridTorrent.MountPath, debridTorrent.Name) + rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) torrentSymlinkPath, err = q.createSymlinks(debridTorrent, rclonePath, debridTorrent.Name) } else { diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index ba170cd..20b775c 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -73,7 +73,6 @@ func (f *File) Read(p []byte) (n int, err error) { if f.metadataOnly { return 0, io.EOF } - // If file content is preloaded, read from memory. if f.content != nil { if f.offset >= int64(len(f.content)) { diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index fafcfc5..939d9af 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -3,7 +3,6 @@ package webdav import ( "bytes" "context" - "errors" "fmt" "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/request" @@ -12,7 +11,6 @@ import ( "golang.org/x/net/webdav" "html/template" "io" - "net" "net/http" "net/http/httptest" "os" @@ -229,6 +227,7 @@ func (h *Handler) getFileInfos(torrent *types.Torrent) []os.FileInfo { } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Handle OPTIONS if r.Method == "OPTIONS" { w.WriteHeader(http.StatusOK) @@ -473,50 +472,3 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we return } } - -func (h *Handler) ioCopy(reader io.Reader, w io.Writer) (int64, error) { - // Start with a smaller buffer for faster first byte delivery. - buf := make([]byte, 4*1024) // 8KB initial buffer - totalWritten := int64(0) - firstChunk := true - - for { - n, err := reader.Read(buf) - if n > 0 { - nw, ew := w.Write(buf[:n]) - if ew != nil { - var opErr *net.OpError - if errors.As(ew, &opErr) && opErr.Err.Error() == "write: broken pipe" { - h.logger.Debug().Msg("Client closed connection (normal for streaming)") - return totalWritten, ew - } - return totalWritten, ew - } - totalWritten += int64(nw) - - // Flush immediately after the first chunk. - if firstChunk { - if flusher, ok := w.(http.Flusher); ok { - flusher.Flush() - } - firstChunk = false - // Increase buffer size for subsequent reads. - buf = make([]byte, 512*1024) // 64KB buffer after first chunk - } else if totalWritten%(2*1024*1024) < int64(n) { - // Flush roughly every 2MB of data transferred. - if flusher, ok := w.(http.Flusher); ok { - flusher.Flush() - } - } - } - - if err != nil { - if err != io.EOF { - h.logger.Error().Err(err).Msg("Error reading from file") - } - break - } - } - - return totalWritten, nil -} From 56bca562f4cd19560d4b4cf42068e370ca25d22e Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Mon, 24 Mar 2025 20:39:35 +0100 Subject: [PATCH 14/39] Fix duplicate links for files --- pkg/debrid/debrid/cache.go | 66 ++++++++++++++++++++++++----- pkg/debrid/debrid/xml.go | 2 +- pkg/debrid/realdebrid/realdebrid.go | 15 ++----- pkg/qbit/torrent.go | 2 +- pkg/webdav/webdav.go | 21 +++++++++ 5 files changed, 83 insertions(+), 23 deletions(-) diff --git a/pkg/debrid/debrid/cache.go b/pkg/debrid/debrid/cache.go index 7c52fa6..fdabe2d 100644 --- a/pkg/debrid/debrid/cache.go +++ b/pkg/debrid/debrid/cache.go @@ -27,10 +27,6 @@ const ( WebDavUseOriginalNameNoExt WebDavFolderNaming = "original_no_ext" ) -type DownloadLinkCache struct { - Link string `json:"download_link"` -} - type PropfindResponse struct { Data []byte GzippedData []byte @@ -112,8 +108,6 @@ func (c *Cache) setTorrent(t *CachedTorrent) { c.torrentsNames[c.GetTorrentFolder(t.Torrent)] = t c.torrentsMutex.Unlock() - c.refreshListings() - go func() { if err := c.SaveTorrent(t); err != nil { c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id) @@ -224,6 +218,19 @@ func (c *Cache) load() (map[string]*CachedTorrent, error) { } if len(ct.Files) != 0 { // We can assume the torrent is complete + + // Make sure no file has a duplicate link + linkStore := make(map[string]bool) + for _, f := range ct.Files { + if _, ok := linkStore[f.Link]; ok { + // Duplicate link, refresh the torrent + ct = *c.refreshTorrent(&ct) + break + } else { + linkStore[f.Link] = true + } + } + ct.IsComplete = true torrents[ct.Id] = &ct } @@ -369,7 +376,7 @@ func (c *Cache) sync(torrents []*types.Torrent) error { return // Channel closed, exit goroutine } - if err := c.ProcessTorrent(t, true); err != nil { + if err := c.ProcessTorrent(t, false); err != nil { c.logger.Error().Err(err).Str("torrent", t.Name).Msg("sync error") atomic.AddInt64(&errorCount, 1) } @@ -402,6 +409,7 @@ func (c *Cache) sync(torrents []*types.Torrent) error { // Wait for all workers to complete wg.Wait() + c.refreshListings() c.logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount) return nil } @@ -412,13 +420,16 @@ func (c *Cache) ProcessTorrent(t *types.Torrent, refreshRclone bool) error { return fmt.Errorf("failed to update torrent: %w", err) } } - ct := &CachedTorrent{ Torrent: t, LastRead: time.Now(), IsComplete: len(t.Files) > 0, } c.setTorrent(ct) + + if refreshRclone { + c.refreshListings() + } return nil } @@ -447,7 +458,6 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string { file = ct.Files[filename] } } - c.logger.Trace().Msgf("Getting download link for %s", ct.Name) link, err := c.client.GetDownloadLink(ct.Torrent, &file) if err != nil { @@ -463,6 +473,39 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string { return file.DownloadLink } +func (c *Cache) GenerateDownloadLinks(t *CachedTorrent) { + if err := c.client.GenerateDownloadLinks(t.Torrent); err != nil { + c.logger.Error().Err(err).Msg("Failed to generate download links") + } + for _, file := range t.Files { + c.updateDownloadLink(file) + } + + go func() { + if err := c.SaveTorrent(t); err != nil { + c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id) + } + }() +} + +func (c *Cache) AddTorrent(t *types.Torrent) error { + if len(t.Files) == 0 { + if err := c.client.UpdateTorrent(t); err != nil { + return fmt.Errorf("failed to update torrent: %w", err) + } + } + ct := &CachedTorrent{ + Torrent: t, + LastRead: time.Now(), + IsComplete: len(t.Files) > 0, + } + c.setTorrent(ct) + c.refreshListings() + go c.GenerateDownloadLinks(ct) + return nil + +} + func (c *Cache) updateDownloadLink(file types.File) { c.downloadLinksMutex.Lock() defer c.downloadLinksMutex.Unlock() @@ -489,6 +532,8 @@ func (c *Cache) DeleteTorrent(id string) { delete(c.torrentsNames, t.Name) c.removeFromDB(id) + + c.refreshListings() } } @@ -500,9 +545,10 @@ func (c *Cache) DeleteTorrents(ids []string) { if t, ok := c.torrents[id]; ok { delete(c.torrents, id) delete(c.torrentsNames, c.GetTorrentFolder(t.Torrent)) - c.removeFromDB(id) + go c.removeFromDB(id) } } + c.refreshListings() } func (c *Cache) removeFromDB(torrentId string) { diff --git a/pkg/debrid/debrid/xml.go b/pkg/debrid/debrid/xml.go index c1fde10..23d20c1 100644 --- a/pkg/debrid/debrid/xml.go +++ b/pkg/debrid/debrid/xml.go @@ -18,7 +18,7 @@ func (c *Cache) RefreshXml() error { return fmt.Errorf("failed to refresh XML for %s: %v", parent, err) } } - c.logger.Trace().Msgf("Refreshed XML cache for %s", c.client.GetName()) + c.logger.Debug().Msgf("Refreshed XML cache for %s", c.client.GetName()) return nil } diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index d4027ec..7467ef0 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -74,12 +74,6 @@ func getTorrentFiles(t *types.Torrent, data TorrentInfo, validate bool) map[stri _link = data.Links[idx] } - if a, ok := t.Files[name]; ok { - a.Link = _link - files[name] = a - continue - } - file := types.File{ Name: name, Path: name, @@ -268,17 +262,15 @@ func (r *RealDebrid) DeleteTorrent(torrentId string) { func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error { url := fmt.Sprintf("%s/unrestrict/link/", r.Host) + files := make(map[string]types.File) for _, f := range t.Files { - if f.DownloadLink != "" { - // Or check the generated link - continue - } payload := gourl.Values{ "link": {f.Link}, } req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) resp, err := r.client.MakeRequest(req) if err != nil { + fmt.Println(err) return err } var data UnrestrictResponse @@ -287,8 +279,9 @@ func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error { } f.DownloadLink = data.Download f.Generated = time.Now() - t.Files[f.Name] = f + files[f.Name] = f } + t.Files = files return nil } diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index 56c5b11..57ab409 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -112,7 +112,7 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr if ok { q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid) // Use webdav to download the file - err := cache.ProcessTorrent(debridTorrent, true) + err := cache.AddTorrent(debridTorrent) if err != nil { return } diff --git a/pkg/webdav/webdav.go b/pkg/webdav/webdav.go index 3617943..f3469a4 100644 --- a/pkg/webdav/webdav.go +++ b/pkg/webdav/webdav.go @@ -13,12 +13,14 @@ import ( type WebDav struct { Handlers []*Handler + ready chan struct{} } func New() *WebDav { svc := service.GetService() w := &WebDav{ Handlers: make([]*Handler, 0), + ready: make(chan struct{}), } for name, c := range svc.Debrid.Caches { h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name))) @@ -38,6 +40,22 @@ func (wd *WebDav) Routes() http.Handler { wr := chi.NewRouter() wr.Use(wd.commonMiddleware) + // Create a readiness check middleware + readinessMiddleware := func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + select { + case <-wd.ready: + // WebDAV is ready, proceed + next.ServeHTTP(w, r) + default: + // WebDAV is still initializing + w.Header().Set("Retry-After", "10") + http.Error(w, "WebDAV service is initializing, please try again shortly", http.StatusServiceUnavailable) + } + }) + } + wr.Use(readinessMiddleware) + wd.setupRootHandler(wr) wd.mountHandlers(wr) @@ -65,6 +83,9 @@ func (wd *WebDav) Start(ctx context.Context) error { go func() { wg.Wait() close(errChan) + + // Signal that WebDAV is ready + close(wd.ready) }() // Collect all errors From 7bd38736b1e4f5070dd8818d0efb4a3a0719bd27 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Wed, 26 Mar 2025 21:12:01 +0100 Subject: [PATCH 15/39] Fix for file namings --- doc/config.full.json | 2 + internal/config/config.go | 6 + internal/request/errors.go | 23 +++ internal/utils/magnet.go | 12 ++ pkg/debrid/alldebrid/alldebrid.go | 8 + pkg/debrid/debrid/cache.go | 177 ++++++++++-------- pkg/debrid/debrid/refresh.go | 66 +++++-- pkg/debrid/debrid/repair.go | 166 +++++++++++++++++ pkg/debrid/debrid_link/debrid_link.go | 8 + pkg/debrid/realdebrid/realdebrid.go | 115 +++++++++--- pkg/debrid/torbox/torbox.go | 8 + pkg/debrid/types/debrid.go | 2 + pkg/repair/clean.go | 146 +++++++++++++++ pkg/repair/misc.go | 18 ++ pkg/repair/repair.go | 248 +++++++++----------------- pkg/webdav/file.go | 12 +- pkg/webdav/handler.go | 1 - pkg/webdav/webdav.go | 1 - 18 files changed, 731 insertions(+), 288 deletions(-) create mode 100644 internal/request/errors.go create mode 100644 pkg/debrid/debrid/repair.go create mode 100644 pkg/repair/clean.go diff --git a/doc/config.full.json b/doc/config.full.json index 8ef74a3..f3eddeb 100644 --- a/doc/config.full.json +++ b/doc/config.full.json @@ -95,6 +95,8 @@ "webdav": { "torrents_refresh_interval": "15s", "download_links_refresh_interval": "1h", + "folder_naming": "original", + "auto_expire_links_after": "24h", "rc_url": "http://192.168.0.219:9990", "rc_user": "your_rclone_rc_user", "rc_pass": "your_rclone_rc_pass" diff --git a/internal/config/config.go b/internal/config/config.go index 042877a..d8a8554 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -31,6 +31,7 @@ type Debrid struct { DownloadLinksRefreshInterval string `json:"downloads_refresh_interval"` TorrentRefreshWorkers int `json:"torrent_refresh_workers"` WebDavFolderNaming string `json:"webdav_folder_naming"` + AutoExpireLinksAfter string `json:"auto_expire_links_after"` } type Proxy struct { @@ -67,6 +68,7 @@ type Repair struct { RunOnStart bool `json:"run_on_start"` ZurgURL string `json:"zurg_url"` AutoProcess bool `json:"auto_process"` + UseWebDav bool `json:"use_webdav"` } type Auth struct { @@ -78,6 +80,7 @@ type WebDav struct { TorrentsRefreshInterval string `json:"torrents_refresh_interval"` DownloadLinksRefreshInterval string `json:"download_links_refresh_interval"` Workers int `json:"workers"` + AutoExpireLinksAfter string `json:"auto_expire_links_after"` // Folder FolderNaming string `json:"folder_naming"` @@ -322,5 +325,8 @@ func (c *Config) GetDebridWebDav(d Debrid) Debrid { if d.WebDavFolderNaming == "" { d.WebDavFolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext") } + if d.AutoExpireLinksAfter == "" { + d.AutoExpireLinksAfter = cmp.Or(c.WebDav.AutoExpireLinksAfter, "24h") + } return d } diff --git a/internal/request/errors.go b/internal/request/errors.go new file mode 100644 index 0000000..37ad6e4 --- /dev/null +++ b/internal/request/errors.go @@ -0,0 +1,23 @@ +package request + +type HTTPError struct { + StatusCode int + Message string + Code string +} + +func (e *HTTPError) Error() string { + return e.Message +} + +var HosterUnavailableError = &HTTPError{ + StatusCode: 503, + Message: "Hoster is unavailable", + Code: "hoster_unavailable", +} + +var ErrLinkBroken = &HTTPError{ + StatusCode: 404, + Message: "File is unavailable", + Code: "file_unavailable", +} diff --git a/internal/utils/magnet.go b/internal/utils/magnet.go index accad90..17d14bc 100644 --- a/internal/utils/magnet.go +++ b/internal/utils/magnet.go @@ -233,3 +233,15 @@ func GetInfohashFromURL(url string) (string, error) { infoHash := hash.HexString() return infoHash, nil } + +func ConstructMagnet(infoHash, name string) *Magnet { + // Create a magnet link from the infohash and name + name = url.QueryEscape(strings.TrimSpace(name)) + magnetUri := fmt.Sprintf("magnet:?xt=urn:btih:%s&dn=%s", infoHash, name) + return &Magnet{ + InfoHash: infoHash, + Name: name, + Size: 0, + Link: magnetUri, + } +} diff --git a/pkg/debrid/alldebrid/alldebrid.go b/pkg/debrid/alldebrid/alldebrid.go index a24b63d..5ff8339 100644 --- a/pkg/debrid/alldebrid/alldebrid.go +++ b/pkg/debrid/alldebrid/alldebrid.go @@ -309,6 +309,14 @@ func (ad *AllDebrid) GetDownloadUncached() bool { return ad.DownloadUncached } +func (ad *AllDebrid) CheckLink(link string) error { + return nil +} + +func (ad *AllDebrid) GetMountPath() string { + return ad.MountPath +} + func New(dc config.Debrid) *AllDebrid { rl := request.ParseRateLimit(dc.RateLimit) headers := map[string]string{ diff --git a/pkg/debrid/debrid/cache.go b/pkg/debrid/debrid/cache.go index fdabe2d..7f5c207 100644 --- a/pkg/debrid/debrid/cache.go +++ b/pkg/debrid/debrid/cache.go @@ -3,12 +3,14 @@ package debrid import ( "bufio" "context" + "errors" "fmt" "github.com/goccy/go-json" "github.com/puzpuzpuz/xsync/v3" "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" + "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "os" @@ -39,31 +41,43 @@ type CachedTorrent struct { IsComplete bool `json:"is_complete"` } +type downloadLinkCache struct { + Link string + ExpiresAt time.Time +} + +type RepairRequest struct { + TorrentID string + Priority int + FileName string +} + type Cache struct { dir string client types.Client logger zerolog.Logger - torrents map[string]*CachedTorrent // key: torrent.Id, value: *CachedTorrent - torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent + torrents *xsync.MapOf[string, *CachedTorrent] // key: torrent.Id, value: *CachedTorrent + torrentsNames *xsync.MapOf[string, *CachedTorrent] // key: torrent.Name, value: torrent listings atomic.Value - downloadLinks map[string]string // key: file.Link, value: download link + downloadLinks *xsync.MapOf[string, downloadLinkCache] PropfindResp *xsync.MapOf[string, PropfindResponse] folderNaming WebDavFolderNaming + // repair + repairChan chan RepairRequest + repairsInProgress *xsync.MapOf[string, bool] + // config workers int torrentRefreshInterval time.Duration downloadLinksRefreshInterval time.Duration + autoExpiresLinksAfter time.Duration // refresh mutex listingRefreshMu sync.RWMutex // for refreshing torrents downloadLinksRefreshMu sync.RWMutex // for refreshing download links torrentsRefreshMu sync.RWMutex // for refreshing torrents - - // Data Mutexes - torrentsMutex sync.RWMutex // for torrents and torrentsNames - downloadLinksMutex sync.Mutex // for downloadLinks } func NewCache(dc config.Debrid, client types.Client) *Cache { @@ -76,37 +90,41 @@ func NewCache(dc config.Debrid, client types.Client) *Cache { if err != nil { downloadLinksRefreshInterval = time.Minute * 40 } + autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter) + if err != nil { + autoExpiresLinksAfter = time.Hour * 24 + } return &Cache{ dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files - torrents: make(map[string]*CachedTorrent), - torrentsNames: make(map[string]*CachedTorrent), + torrents: xsync.NewMapOf[string, *CachedTorrent](), + torrentsNames: xsync.NewMapOf[string, *CachedTorrent](), client: client, logger: logger.NewLogger(fmt.Sprintf("%s-cache", client.GetName())), workers: 200, - downloadLinks: make(map[string]string), + downloadLinks: xsync.NewMapOf[string, downloadLinkCache](), torrentRefreshInterval: torrentRefreshInterval, downloadLinksRefreshInterval: downloadLinksRefreshInterval, PropfindResp: xsync.NewMapOf[string, PropfindResponse](), folderNaming: WebDavFolderNaming(dc.WebDavFolderNaming), + autoExpiresLinksAfter: autoExpiresLinksAfter, + repairsInProgress: xsync.NewMapOf[string, bool](), } } func (c *Cache) GetTorrentFolder(torrent *types.Torrent) string { - folderName := torrent.Name + folderName := torrent.Filename if c.folderNaming == WebDavUseID { folderName = torrent.Id } else if c.folderNaming == WebDavUseOriginalNameNoExt { - folderName = utils.RemoveExtension(torrent.Name) + folderName = utils.RemoveExtension(folderName) } return folderName } func (c *Cache) setTorrent(t *CachedTorrent) { - c.torrentsMutex.Lock() - c.torrents[t.Id] = t + c.torrents.Store(t.Id, t) - c.torrentsNames[c.GetTorrentFolder(t.Torrent)] = t - c.torrentsMutex.Unlock() + c.torrentsNames.Store(c.GetTorrentFolder(t.Torrent), t) go func() { if err := c.SaveTorrent(t); err != nil { @@ -116,14 +134,11 @@ func (c *Cache) setTorrent(t *CachedTorrent) { } func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) { - c.torrentsMutex.Lock() for _, t := range torrents { - c.torrents[t.Id] = t - c.torrentsNames[c.GetTorrentFolder(t.Torrent)] = t + c.torrents.Store(t.Id, t) + c.torrentsNames.Store(c.GetTorrentFolder(t.Torrent), t) } - c.torrentsMutex.Unlock() - c.refreshListings() go func() { @@ -140,22 +155,6 @@ func (c *Cache) GetListing() []os.FileInfo { return nil } -func (c *Cache) GetTorrents() map[string]*CachedTorrent { - c.torrentsMutex.RLock() - defer c.torrentsMutex.RUnlock() - result := make(map[string]*CachedTorrent, len(c.torrents)) - for k, v := range c.torrents { - result[k] = v - } - return result -} - -func (c *Cache) GetTorrentNames() map[string]*CachedTorrent { - c.torrentsMutex.RLock() - defer c.torrentsMutex.RUnlock() - return c.torrentsNames -} - func (c *Cache) Start() error { if err := os.MkdirAll(c.dir, 0755); err != nil { return fmt.Errorf("failed to create cache directory: %w", err) @@ -167,10 +166,6 @@ func (c *Cache) Start() error { // initial download links go func() { - // lock download refresh mutex - c.downloadLinksRefreshMu.Lock() - defer c.downloadLinksRefreshMu.Unlock() - // This prevents the download links from being refreshed twice c.refreshDownloadLinks() }() @@ -181,6 +176,9 @@ func (c *Cache) Start() error { } }() + c.repairChan = make(chan RepairRequest, 100) + go c.repairWorker() + return nil } @@ -239,28 +237,36 @@ func (c *Cache) load() (map[string]*CachedTorrent, error) { return torrents, nil } +func (c *Cache) GetTorrents() map[string]*CachedTorrent { + torrents := make(map[string]*CachedTorrent) + c.torrents.Range(func(key string, value *CachedTorrent) bool { + torrents[key] = value + return true + }) + return torrents +} + func (c *Cache) GetTorrent(id string) *CachedTorrent { - c.torrentsMutex.RLock() - defer c.torrentsMutex.RUnlock() - if t, ok := c.torrents[id]; ok { + if t, ok := c.torrents.Load(id); ok { return t } return nil } func (c *Cache) GetTorrentByName(name string) *CachedTorrent { - if t, ok := c.GetTorrentNames()[name]; ok { + if t, ok := c.torrentsNames.Load(name); ok { return t } return nil } func (c *Cache) SaveTorrents() error { - for _, ct := range c.GetTorrents() { - if err := c.SaveTorrent(ct); err != nil { - return err + c.torrents.Range(func(key string, value *CachedTorrent) bool { + if err := c.SaveTorrent(value); err != nil { + c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", key) } - } + return true + }) return nil } @@ -383,6 +389,7 @@ func (c *Cache) sync(torrents []*types.Torrent) error { count := atomic.AddInt64(&processed, 1) if count%1000 == 0 { + c.refreshListings() c.logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents)) } @@ -448,9 +455,6 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string { if file.Link == "" { // file link is empty, refresh the torrent to get restricted links - if ct.IsComplete { - return "" - } ct = c.refreshTorrent(ct) // Refresh the torrent from the debrid if ct == nil { return "" @@ -458,17 +462,40 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string { file = ct.Files[filename] } } - c.logger.Trace().Msgf("Getting download link for %s", ct.Name) - link, err := c.client.GetDownloadLink(ct.Torrent, &file) + + c.logger.Trace().Msgf("Getting download link for %s", filename) + downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file) if err != nil { - c.logger.Error().Err(err).Msg("Failed to get download link") + if errors.Is(err, request.HosterUnavailableError) { + // Check link here?? + c.logger.Debug().Err(err).Msgf("Hoster is unavailable. Triggering repair for %s", ct.Name) + if err := c.repairTorrent(ct); err != nil { + c.logger.Error().Err(err).Msgf("Failed to trigger repair for %s", ct.Name) + return "" + } + // Generate download link for the file then + f := ct.Files[filename] + downloadLink, _ = c.client.GetDownloadLink(ct.Torrent, &f) + f.DownloadLink = downloadLink + file.Generated = time.Now() + ct.Files[filename] = f + c.updateDownloadLink(file.Link, downloadLink) + + go func() { + go c.setTorrent(ct) + }() + + return downloadLink // Gets download link in the next pass + } + + c.logger.Debug().Err(err).Msgf("Failed to get download link for :%s", file.Link) return "" } - file.DownloadLink = link + file.DownloadLink = downloadLink file.Generated = time.Now() ct.Files[filename] = file - go c.updateDownloadLink(file) + go c.updateDownloadLink(file.Link, downloadLink) go c.setTorrent(ct) return file.DownloadLink } @@ -478,7 +505,7 @@ func (c *Cache) GenerateDownloadLinks(t *CachedTorrent) { c.logger.Error().Err(err).Msg("Failed to generate download links") } for _, file := range t.Files { - c.updateDownloadLink(file) + c.updateDownloadLink(file.Link, file.DownloadLink) } go func() { @@ -506,15 +533,18 @@ func (c *Cache) AddTorrent(t *types.Torrent) error { } -func (c *Cache) updateDownloadLink(file types.File) { - c.downloadLinksMutex.Lock() - defer c.downloadLinksMutex.Unlock() - c.downloadLinks[file.Link] = file.DownloadLink +func (c *Cache) updateDownloadLink(link, downloadLink string) { + c.downloadLinks.Store(link, downloadLinkCache{ + Link: downloadLink, + ExpiresAt: time.Now().Add(c.autoExpiresLinksAfter), // Expires in 24 hours + }) } func (c *Cache) checkDownloadLink(link string) string { - if dl, ok := c.downloadLinks[link]; ok { - return dl + if dl, ok := c.downloadLinks.Load(link); ok { + if dl.ExpiresAt.After(time.Now()) { + return dl.Link + } } return "" } @@ -525,26 +555,21 @@ func (c *Cache) GetClient() types.Client { func (c *Cache) DeleteTorrent(id string) { c.logger.Info().Msgf("Deleting torrent %s", id) - c.torrentsMutex.Lock() - defer c.torrentsMutex.Unlock() - if t, ok := c.torrents[id]; ok { - delete(c.torrents, id) - delete(c.torrentsNames, t.Name) - - c.removeFromDB(id) + if t, ok := c.torrents.Load(id); ok { + c.torrents.Delete(id) + c.torrentsNames.Delete(c.GetTorrentFolder(t.Torrent)) + go c.removeFromDB(id) c.refreshListings() } } func (c *Cache) DeleteTorrents(ids []string) { c.logger.Info().Msgf("Deleting %d torrents", len(ids)) - c.torrentsMutex.Lock() - defer c.torrentsMutex.Unlock() for _, id := range ids { - if t, ok := c.torrents[id]; ok { - delete(c.torrents, id) - delete(c.torrentsNames, c.GetTorrentFolder(t.Torrent)) + if t, ok := c.torrents.Load(id); ok { + c.torrents.Delete(id) + c.torrentsNames.Delete(c.GetTorrentFolder(t.Torrent)) go c.removeFromDB(id) } } diff --git a/pkg/debrid/debrid/refresh.go b/pkg/debrid/debrid/refresh.go index 66d3db1..34c498e 100644 --- a/pkg/debrid/debrid/refresh.go +++ b/pkg/debrid/debrid/refresh.go @@ -8,6 +8,7 @@ import ( "io" "net/http" "os" + "path/filepath" "slices" "sort" "strings" @@ -37,12 +38,11 @@ func (c *Cache) refreshListings() { return } // Copy the current torrents to avoid concurrent issues - c.torrentsMutex.RLock() - torrents := make([]string, 0, len(c.torrentsNames)) - for k, _ := range c.torrentsNames { - torrents = append(torrents, k) - } - c.torrentsMutex.RUnlock() + torrents := make([]string, 0, c.torrentsNames.Size()) + c.torrentsNames.Range(func(key string, value *CachedTorrent) bool { + torrents = append(torrents, key) + return true + }) sort.Slice(torrents, func(i, j int) bool { return torrents[i] < torrents[j] @@ -61,26 +61,47 @@ func (c *Cache) refreshListings() { } // Atomic store of the complete ready-to-use slice c.listings.Store(files) - _ = c.RefreshXml() + c.resetPropfindResponse() if err := c.RefreshRclone(); err != nil { c.logger.Debug().Err(err).Msg("Failed to refresh rclone") } } +func (c *Cache) resetPropfindResponse() { + // Right now, parents are hardcoded + parents := []string{"__all__", "torrents"} + // Reset only the parent directories + // Convert the parents to a keys + // This is a bit hacky, but it works + // Instead of deleting all the keys, we only delete the parent keys, e.g __all__/ or torrents/ + keys := make([]string, 0, len(parents)) + for _, p := range parents { + // Construct the key + // construct url + url := filepath.Clean(filepath.Join("/webdav", c.client.GetName(), p)) + key0 := fmt.Sprintf("propfind:%s:0", url) + key1 := fmt.Sprintf("propfind:%s:1", url) + keys = append(keys, key0, key1) + } + + // Delete the keys + for _, k := range keys { + c.PropfindResp.Delete(k) + } +} + func (c *Cache) refreshTorrents() { if c.torrentsRefreshMu.TryLock() { defer c.torrentsRefreshMu.Unlock() } else { return } - c.torrentsMutex.RLock() - currentTorrents := c.torrents // // Create a copy of the current torrents to avoid concurrent issues - torrents := make(map[string]string, len(currentTorrents)) // a mpa of id and name - for _, v := range currentTorrents { - torrents[v.Id] = v.Name - } - c.torrentsMutex.RUnlock() + torrents := make(map[string]string, c.torrents.Size()) // a mpa of id and name + c.torrents.Range(func(key string, t *CachedTorrent) bool { + torrents[t.Id] = t.Name + return true + }) // Get new torrents from the debrid service debTorrents, err := c.client.GetTorrents() @@ -206,14 +227,25 @@ func (c *Cache) refreshDownloadLinks() { } else { return } - c.downloadLinksMutex.Lock() - defer c.downloadLinksMutex.Unlock() downloadLinks, err := c.client.GetDownloads() if err != nil { c.logger.Debug().Err(err).Msg("Failed to get download links") } for k, v := range downloadLinks { - c.downloadLinks[k] = v.DownloadLink + // if link is generated in the last 24 hours, add it to cache + timeSince := time.Since(v.Generated) + if timeSince < c.autoExpiresLinksAfter { + c.downloadLinks.Store(k, downloadLinkCache{ + Link: v.DownloadLink, + ExpiresAt: v.Generated.Add(c.autoExpiresLinksAfter - timeSince), + }) + } else { + //c.downloadLinks.Delete(k) don't delete, just log + c.logger.Trace().Msgf("Download link for %s expired", k) + } } + + c.logger.Debug().Msgf("Refreshed %d download links", len(downloadLinks)) + } diff --git a/pkg/debrid/debrid/repair.go b/pkg/debrid/debrid/repair.go new file mode 100644 index 0000000..be1085b --- /dev/null +++ b/pkg/debrid/debrid/repair.go @@ -0,0 +1,166 @@ +package debrid + +import ( + "errors" + "fmt" + "github.com/sirrobot01/debrid-blackhole/internal/request" + "github.com/sirrobot01/debrid-blackhole/internal/utils" + "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" + "slices" +) + +func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool { + // Check torrent files + + isBroken := false + files := make(map[string]types.File) + if len(filenames) > 0 { + for name, f := range t.Files { + if slices.Contains(filenames, name) { + files[name] = f + } + } + } else { + files = t.Files + } + + // Check empty links + for _, f := range files { + // Check if file is missing + if f.Link == "" { + // refresh torrent and then break + t = c.refreshTorrent(t) + break + } + } + + for _, f := range files { + // Check if file link is still missing + if f.Link == "" { + isBroken = true + break + } else { + // Check if file.Link not in the downloadLink Cache + if _, ok := c.downloadLinks.Load(f.Link); !ok { + // File not in cache + // Check link + if err := c.client.CheckLink(f.Link); err != nil { + if errors.Is(err, request.ErrLinkBroken) { + isBroken = true + break + } else { + // This might just be a temporary error + } + } else { + // Generate a new download link? + } + } else { + // Link is in cache + // We might skip checking for now, it seems rd removes uncached links + } + } + } + return isBroken +} + +func (c *Cache) repairWorker() { + // This watches a channel for torrents to repair + c.logger.Info().Msg("Starting repair worker") + + for { + select { + case req := <-c.repairChan: + torrentId := req.TorrentID + if _, inProgress := c.repairsInProgress.Load(torrentId); inProgress { + c.logger.Debug().Str("torrentId", torrentId).Msg("Skipping duplicate repair request") + continue + } + + // Mark as in progress + c.repairsInProgress.Store(torrentId, true) + c.logger.Debug().Str("torrentId", req.TorrentID).Msg("Received repair request") + + // Get the torrent from the cache + cachedTorrent, ok := c.torrents.Load(torrentId) + if !ok || cachedTorrent == nil { + c.logger.Warn().Str("torrentId", torrentId).Msg("Torrent not found in cache") + continue + } + + // Check if torrent is broken + if c.IsTorrentBroken(cachedTorrent, nil) { + c.logger.Info().Str("torrentId", torrentId).Msg("Repairing broken torrent") + // Repair torrent + if err := c.repairTorrent(cachedTorrent); err != nil { + c.logger.Error().Err(err).Str("torrentId", torrentId).Msg("Failed to repair torrent") + } else { + c.logger.Info().Str("torrentId", torrentId).Msg("Torrent repaired") + } + } else { + c.logger.Debug().Str("torrentId", torrentId).Msg("Torrent is not broken") + } + c.repairsInProgress.Delete(torrentId) + } + } +} + +func (c *Cache) SubmitForRepair(torrentId, fileName string) { + // Submitting a torrent for repair.Not used yet + + // Check if already in progress before even submitting + if _, inProgress := c.repairsInProgress.Load(torrentId); inProgress { + c.logger.Debug().Str("torrentID", torrentId).Msg("Repair already in progress") + return + } + + select { + case c.repairChan <- RepairRequest{TorrentID: torrentId, FileName: fileName}: + c.logger.Debug().Str("torrentID", torrentId).Msg("Submitted for repair") + default: + c.logger.Warn().Str("torrentID", torrentId).Msg("Repair channel full, skipping repair request") + } +} + +func (c *Cache) repairTorrent(t *CachedTorrent) error { + // Check if Magnet is not empty, if empty, reconstruct the magnet + + if _, inProgress := c.repairsInProgress.Load(t.Id); inProgress { + c.logger.Debug().Str("torrentID", t.Id).Msg("Repair already in progress") + return nil + } + + torrent := t.Torrent + if torrent.Magnet == nil { + torrent.Magnet = utils.ConstructMagnet(t.InfoHash, t.Name) + } + + oldID := torrent.Id + + // Submit the magnet to the debrid service + torrent.Id = "" + var err error + torrent, err = c.client.SubmitMagnet(torrent) + if err != nil { + return fmt.Errorf("failed to submit magnet: %w", err) + } + + // Check if the torrent was submitted + if torrent == nil || torrent.Id == "" { + return fmt.Errorf("failed to submit magnet: empty torrent") + } + torrent, err = c.client.CheckStatus(torrent, true) + if err != nil { + return fmt.Errorf("failed to check status: %w", err) + } + + c.client.DeleteTorrent(oldID) // delete the old torrent + c.DeleteTorrent(oldID) // Remove from listings + + // Update the torrent in the cache + t.Torrent = torrent + c.setTorrent(t) + c.refreshListings() + + c.repairsInProgress.Delete(oldID) + return nil +} diff --git a/pkg/debrid/debrid_link/debrid_link.go b/pkg/debrid/debrid_link/debrid_link.go index ae5bb3c..1dc5797 100644 --- a/pkg/debrid/debrid_link/debrid_link.go +++ b/pkg/debrid/debrid_link/debrid_link.go @@ -353,3 +353,11 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) { } return torrents, nil } + +func (dl *DebridLink) CheckLink(link string) error { + return nil +} + +func (dl *DebridLink) GetMountPath() string { + return dl.MountPath +} diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index 7467ef0..d57b076 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -17,7 +17,6 @@ import ( "strconv" "strings" "sync" - "time" ) type RealDebrid struct { @@ -167,7 +166,7 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error { if err != nil { return err } - t.Name = data.OriginalFilename + t.Name = data.Filename t.Bytes = data.Bytes t.Folder = data.OriginalFilename t.Progress = data.Progress @@ -262,41 +261,105 @@ func (r *RealDebrid) DeleteTorrent(torrentId string) { func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error { url := fmt.Sprintf("%s/unrestrict/link/", r.Host) - files := make(map[string]types.File) + filesCh := make(chan types.File, len(t.Files)) + errCh := make(chan error, len(t.Files)) + + var wg sync.WaitGroup + for _, f := range t.Files { - payload := gourl.Values{ - "link": {f.Link}, - } - req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) - resp, err := r.client.MakeRequest(req) - if err != nil { - fmt.Println(err) - return err - } - var data UnrestrictResponse - if err = json.Unmarshal(resp, &data); err != nil { - return err - } - f.DownloadLink = data.Download - f.Generated = time.Now() - files[f.Name] = f + wg.Add(1) + go func(file types.File) { + defer wg.Done() + + payload := gourl.Values{"link": {file.Link}} + req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) + if err != nil { + errCh <- err + return + } + + resp, err := r.client.Do(req) + if err != nil { + errCh <- err + return + } + if resp.StatusCode == http.StatusServiceUnavailable { + errCh <- request.HosterUnavailableError + return + } + defer resp.Body.Close() + b, err := io.ReadAll(resp.Body) + + var data UnrestrictResponse + if err = json.Unmarshal(b, &data); err != nil { + errCh <- err + return + } + + file.DownloadLink = data.Download + filesCh <- file + }(f) } + + go func() { + wg.Wait() + close(filesCh) + close(errCh) + }() + + // Collect results + files := make(map[string]types.File, len(t.Files)) + for file := range filesCh { + files[file.Name] = file + } + + // Check for errors + for err := range errCh { + if err != nil { + return err // Return the first error encountered + } + } + t.Files = files return nil } +func (r *RealDebrid) CheckLink(link string) error { + url := fmt.Sprintf("%s/unrestrict/check", r.Host) + payload := gourl.Values{ + "link": {link}, + } + req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) + resp, err := r.client.Do(req) + if err != nil { + return err + } + if resp.StatusCode == http.StatusNotFound { + return request.ErrLinkBroken // File has been removed + } + return nil +} + func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string, error) { url := fmt.Sprintf("%s/unrestrict/link/", r.Host) payload := gourl.Values{ "link": {file.Link}, } req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) - resp, err := r.client.MakeRequest(req) + resp, err := r.client.Do(req) + if err != nil { + return "", err + } + if resp.StatusCode == http.StatusServiceUnavailable { + return "", request.HosterUnavailableError + } + defer resp.Body.Close() + b, err := io.ReadAll(resp.Body) if err != nil { return "", err } var data UnrestrictResponse - if err = json.Unmarshal(resp, &data); err != nil { + if err = json.Unmarshal(b, &data); err != nil { return "", err } return data.Download, nil @@ -348,7 +411,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, } torrents = append(torrents, &types.Torrent{ Id: t.Id, - Name: utils.RemoveInvalidChars(t.Filename), // This changes when we get the files + Name: t.Filename, Bytes: t.Bytes, Progress: t.Progress, Status: t.Status, @@ -481,6 +544,10 @@ func (r *RealDebrid) GetDownloadUncached() bool { return r.DownloadUncached } +func (r *RealDebrid) GetMountPath() string { + return r.MountPath +} + func New(dc config.Debrid) *RealDebrid { rl := request.ParseRateLimit(dc.RateLimit) headers := map[string]string{ @@ -489,7 +556,9 @@ func New(dc config.Debrid) *RealDebrid { _log := logger.NewLogger(dc.Name) client := request.New(). WithHeaders(headers). - WithRateLimiter(rl).WithLogger(_log) + WithRateLimiter(rl).WithLogger(_log). + WithMaxRetries(5). + WithRetryableStatus(429) return &RealDebrid{ Name: "realdebrid", Host: dc.Host, diff --git a/pkg/debrid/torbox/torbox.go b/pkg/debrid/torbox/torbox.go index 91ff0ba..ba8c81e 100644 --- a/pkg/debrid/torbox/torbox.go +++ b/pkg/debrid/torbox/torbox.go @@ -337,3 +337,11 @@ func New(dc config.Debrid) *Torbox { func (tb *Torbox) GetDownloads() (map[string]types.DownloadLinks, error) { return nil, nil } + +func (tb *Torbox) CheckLink(link string) error { + return nil +} + +func (tb *Torbox) GetMountPath() string { + return tb.MountPath +} diff --git a/pkg/debrid/types/debrid.go b/pkg/debrid/types/debrid.go index 7bfb873..a734cb1 100644 --- a/pkg/debrid/types/debrid.go +++ b/pkg/debrid/types/debrid.go @@ -19,4 +19,6 @@ type Client interface { GetLogger() zerolog.Logger GetDownloadingStatus() []string GetDownloads() (map[string]DownloadLinks, error) + CheckLink(link string) error + GetMountPath() string } diff --git a/pkg/repair/clean.go b/pkg/repair/clean.go new file mode 100644 index 0000000..bd40968 --- /dev/null +++ b/pkg/repair/clean.go @@ -0,0 +1,146 @@ +package repair + +import ( + "context" + "fmt" + "github.com/sirrobot01/debrid-blackhole/internal/request" + "golang.org/x/sync/errgroup" + "runtime" + "sync" + "time" +) + +func (r *Repair) clean(job *Job) error { + // Create a new error group + g, ctx := errgroup.WithContext(context.Background()) + + uniqueItems := make(map[string]string) + mu := sync.Mutex{} + + // Limit concurrent goroutines + g.SetLimit(runtime.NumCPU() * 4) + + for _, a := range job.Arrs { + a := a // Capture range variable + g.Go(func() error { + // Check if context was canceled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + items, err := r.cleanArr(job, a, "") + if err != nil { + r.logger.Error().Err(err).Msgf("Error cleaning %s", a) + return err + } + + // Safely append the found items to the shared slice + if len(items) > 0 { + mu.Lock() + for k, v := range items { + uniqueItems[k] = v + } + mu.Unlock() + } + + return nil + }) + } + + if err := g.Wait(); err != nil { + return err + } + + if len(uniqueItems) == 0 { + job.CompletedAt = time.Now() + job.Status = JobCompleted + + go func() { + if err := request.SendDiscordMessage("repair_clean_complete", "success", job.discordContext()); err != nil { + r.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() + + return nil + } + + cache := r.deb.Caches["realdebrid"] + if cache == nil { + return fmt.Errorf("cache not found") + } + torrents := cache.GetTorrents() + + dangling := make([]string, 0) + for _, t := range torrents { + if _, ok := uniqueItems[t.Name]; !ok { + dangling = append(dangling, t.Id) + } + } + + r.logger.Info().Msgf("Found %d delapitated items", len(dangling)) + + if len(dangling) == 0 { + job.CompletedAt = time.Now() + job.Status = JobCompleted + return nil + } + + client := r.deb.Clients["realdebrid"] + if client == nil { + return fmt.Errorf("client not found") + } + for _, id := range dangling { + client.DeleteTorrent(id) + } + + return nil +} + +func (r *Repair) cleanArr(j *Job, _arr string, tmdbId string) (map[string]string, error) { + uniqueItems := make(map[string]string) + a := r.arrs.Get(_arr) + + r.logger.Info().Msgf("Starting repair for %s", a.Name) + media, err := a.GetMedia(tmdbId) + if err != nil { + r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err) + return uniqueItems, err + } + + // Create a new error group + g, ctx := errgroup.WithContext(context.Background()) + + mu := sync.Mutex{} + + // Limit concurrent goroutines + g.SetLimit(runtime.NumCPU() * 4) + + for _, m := range media { + m := m // Create a new variable scoped to the loop iteration + g.Go(func() error { + // Check if context was canceled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + u := r.getUniquePaths(m) + for k, v := range u { + mu.Lock() + uniqueItems[k] = v + mu.Unlock() + } + return nil + }) + } + + if err := g.Wait(); err != nil { + return uniqueItems, err + } + + r.logger.Info().Msgf("Repair completed for %s. %d unique items", a.Name, len(uniqueItems)) + return uniqueItems, nil +} diff --git a/pkg/repair/misc.go b/pkg/repair/misc.go index 31b281d..6cd0137 100644 --- a/pkg/repair/misc.go +++ b/pkg/repair/misc.go @@ -2,6 +2,7 @@ package repair import ( "fmt" + "github.com/sirrobot01/debrid-blackhole/pkg/arr" "os" "path/filepath" "strconv" @@ -129,3 +130,20 @@ func checkFileStart(filePath string) error { } return nil } + +func collectFiles(media arr.Content) map[string][]arr.ContentFile { + uniqueParents := make(map[string][]arr.ContentFile) + files := media.Files + for _, file := range files { + target := getSymlinkTarget(file.Path) + if target != "" { + file.IsSymlink = true + dir, f := filepath.Split(target) + torrentNamePath := filepath.Clean(dir) + // Set target path folder/file.mkv + file.TargetPath = f + uniqueParents[torrentNamePath] = append(uniqueParents[torrentNamePath], file) + } + } + return uniqueParents +} diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 6c38cbf..7f1e7d2 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -34,6 +34,7 @@ type Repair struct { runOnStart bool ZurgURL string IsZurg bool + useWebdav bool autoProcess bool logger zerolog.Logger filename string @@ -51,6 +52,7 @@ func New(arrs *arr.Storage, engine *debrid.Engine) *Repair { duration: duration, runOnStart: cfg.Repair.RunOnStart, ZurgURL: cfg.Repair.ZurgURL, + useWebdav: cfg.Repair.UseWebDav, autoProcess: cfg.Repair.AutoProcess, filename: filepath.Join(cfg.Path, "repair.json"), deb: engine, @@ -157,6 +159,13 @@ func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job { } func (r *Repair) preRunChecks() error { + + if r.useWebdav { + if len(r.deb.Caches) == 0 { + return fmt.Errorf("no caches found") + } + } + // Check if zurg url is reachable if !r.IsZurg { return nil @@ -362,141 +371,6 @@ func (r *Repair) getUniquePaths(media arr.Content) map[string]string { return uniqueParents } -func (r *Repair) clean(job *Job) error { - // Create a new error group - g, ctx := errgroup.WithContext(context.Background()) - - uniqueItems := make(map[string]string) - mu := sync.Mutex{} - - // Limit concurrent goroutines - g.SetLimit(runtime.NumCPU() * 4) - - for _, a := range job.Arrs { - a := a // Capture range variable - g.Go(func() error { - // Check if context was canceled - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - items, err := r.cleanArr(job, a, "") - if err != nil { - r.logger.Error().Err(err).Msgf("Error cleaning %s", a) - return err - } - - // Safely append the found items to the shared slice - if len(items) > 0 { - mu.Lock() - for k, v := range items { - uniqueItems[k] = v - } - mu.Unlock() - } - - return nil - }) - } - - if err := g.Wait(); err != nil { - return err - } - - if len(uniqueItems) == 0 { - job.CompletedAt = time.Now() - job.Status = JobCompleted - - go func() { - if err := request.SendDiscordMessage("repair_clean_complete", "success", job.discordContext()); err != nil { - r.logger.Error().Msgf("Error sending discord message: %v", err) - } - }() - - return nil - } - - cache := r.deb.Caches["realdebrid"] - if cache == nil { - return fmt.Errorf("cache not found") - } - torrents := cache.GetTorrents() - - dangling := make([]string, 0) - for _, t := range torrents { - if _, ok := uniqueItems[t.Name]; !ok { - dangling = append(dangling, t.Id) - } - } - - r.logger.Info().Msgf("Found %d delapitated items", len(dangling)) - - if len(dangling) == 0 { - job.CompletedAt = time.Now() - job.Status = JobCompleted - return nil - } - - client := r.deb.Clients["realdebrid"] - if client == nil { - return fmt.Errorf("client not found") - } - for _, id := range dangling { - client.DeleteTorrent(id) - } - - return nil -} - -func (r *Repair) cleanArr(j *Job, _arr string, tmdbId string) (map[string]string, error) { - uniqueItems := make(map[string]string) - a := r.arrs.Get(_arr) - - r.logger.Info().Msgf("Starting repair for %s", a.Name) - media, err := a.GetMedia(tmdbId) - if err != nil { - r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err) - return uniqueItems, err - } - - // Create a new error group - g, ctx := errgroup.WithContext(context.Background()) - - mu := sync.Mutex{} - - // Limit concurrent goroutines - g.SetLimit(runtime.NumCPU() * 4) - - for _, m := range media { - m := m // Create a new variable scoped to the loop iteration - g.Go(func() error { - // Check if context was canceled - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - u := r.getUniquePaths(m) - for k, v := range u { - mu.Lock() - uniqueItems[k] = v - mu.Unlock() - } - return nil - }) - } - - if err := g.Wait(); err != nil { - return uniqueItems, err - } - - r.logger.Info().Msgf("Repair completed for %s. %d unique items", a.Name, len(uniqueItems)) - return uniqueItems, nil -} - func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) { brokenItems := make([]arr.ContentFile, 0) a := r.arrs.Get(_arr) @@ -598,7 +472,9 @@ func (r *Repair) isMediaAccessible(m arr.Content) bool { func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile { - if r.IsZurg { + if r.useWebdav { + return r.getWebdavBrokenFiles(media) + } else if r.IsZurg { return r.getZurgBrokenFiles(media) } else { return r.getFileBrokenFiles(media) @@ -610,17 +486,7 @@ func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile { brokenFiles := make([]arr.ContentFile, 0) - uniqueParents := make(map[string][]arr.ContentFile) - files := media.Files - for _, file := range files { - target := getSymlinkTarget(file.Path) - if target != "" { - file.IsSymlink = true - dir, _ := filepath.Split(target) - parent := filepath.Base(filepath.Clean(dir)) - uniqueParents[parent] = append(uniqueParents[parent], file) - } - } + uniqueParents := collectFiles(media) for parent, f := range uniqueParents { // Check stat @@ -646,19 +512,7 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { // This reduces bandwidth usage significantly brokenFiles := make([]arr.ContentFile, 0) - uniqueParents := make(map[string][]arr.ContentFile) - files := media.Files - for _, file := range files { - target := getSymlinkTarget(file.Path) - if target != "" { - file.IsSymlink = true - dir, f := filepath.Split(target) - parent := filepath.Base(filepath.Clean(dir)) - // Set target path folder/file.mkv - file.TargetPath = f - uniqueParents[parent] = append(uniqueParents[parent], file) - } - } + uniqueParents := collectFiles(media) client := &http.Client{ Timeout: 0, Transport: &http.Transport{ @@ -672,9 +526,9 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { // Access zurg url + symlink folder + first file(encoded) for parent, f := range uniqueParents { r.logger.Debug().Msgf("Checking %s", parent) - encodedParent := url.PathEscape(parent) + torrentName := url.PathEscape(filepath.Base(parent)) encodedFile := url.PathEscape(f[0].TargetPath) - fullURL := fmt.Sprintf("%s/http/__all__/%s/%s", r.ZurgURL, encodedParent, encodedFile) + fullURL := fmt.Sprintf("%s/http/__all__/%s/%s", r.ZurgURL, torrentName, encodedFile) // Check file stat first if _, err := os.Stat(f[0].Path); os.IsNotExist(err) { r.logger.Debug().Msgf("Broken symlink found: %s", fullURL) @@ -715,6 +569,76 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { return brokenFiles } +func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile { + // Use internal webdav setup to check file availability + + caches := r.deb.Caches + if len(caches) == 0 { + r.logger.Info().Msg("No caches found. Can't use webdav") + return nil + } + + clients := r.deb.Clients + if len(clients) == 0 { + r.logger.Info().Msg("No clients found. Can't use webdav") + return nil + } + + brokenFiles := make([]arr.ContentFile, 0) + uniqueParents := collectFiles(media) + // Access zurg url + symlink folder + first file(encoded) + for torrentPath, f := range uniqueParents { + r.logger.Debug().Msgf("Checking %s", torrentPath) + // Get the debrid first + dir := filepath.Dir(torrentPath) + debridName := "" + for _, client := range clients { + mountPath := client.GetMountPath() + if mountPath == "" { + continue + } + if filepath.Clean(mountPath) == filepath.Clean(dir) { + debridName = client.GetName() + break + } + } + if debridName == "" { + r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath) + continue + } + cache, ok := caches[debridName] + if !ok { + r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName) + continue + } + // Check if torrent exists + torrentName := filepath.Clean(filepath.Base(torrentPath)) + torrent := cache.GetTorrentByName(torrentName) + if torrent == nil { + r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName) + continue + } + files := make([]string, 0) + for _, file := range f { + files = append(files, file.TargetPath) + } + + if cache.IsTorrentBroken(torrent, files) { + r.logger.Debug().Msgf("[webdav] Broken symlink found: %s", torrentPath) + // Delete the torrent? + brokenFiles = append(brokenFiles, f...) + continue + } + + } + if len(brokenFiles) == 0 { + r.logger.Debug().Msgf("No broken files found for %s", media.Title) + return nil + } + r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title) + return brokenFiles +} + func (r *Repair) GetJob(id string) *Job { for _, job := range r.Jobs { if job.ID == id { diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 20b775c..66fc4ab 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -1,6 +1,7 @@ package webdav import ( + "crypto/tls" "fmt" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "io" @@ -11,13 +12,8 @@ import ( var sharedClient = &http.Client{ Transport: &http.Transport{ - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - ResponseHeaderTimeout: 30 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DisableCompression: false, // Enable compression for faster transfers - DisableKeepAlives: false, - Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + Proxy: http.ProxyFromEnvironment, }, Timeout: 0, } @@ -92,7 +88,7 @@ func (f *File) Read(p []byte) (n int, err error) { downloadLink := f.GetDownloadLink() if downloadLink == "" { - return 0, fmt.Errorf("failed to get download link for file") + return 0, io.EOF } req, err := http.NewRequest("GET", downloadLink, nil) diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index 939d9af..9e5ed09 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -191,7 +191,6 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F name: file.Name, size: file.Size, link: file.Link, - downloadLink: file.DownloadLink, metadataOnly: metadataOnly, } return fi, nil diff --git a/pkg/webdav/webdav.go b/pkg/webdav/webdav.go index f3469a4..c815955 100644 --- a/pkg/webdav/webdav.go +++ b/pkg/webdav/webdav.go @@ -115,7 +115,6 @@ func (wd *WebDav) setupRootHandler(r chi.Router) { func (wd *WebDav) commonMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("DAV", "1, 2") - w.Header().Set("Cache-Control", "max-age=3600") w.Header().Set("Allow", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK") w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK") From d49fbea60f98025c774c98ed4df9dfa06675d0c2 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Thu, 27 Mar 2025 08:24:40 +0100 Subject: [PATCH 16/39] - Add more limit to number of gorutines - Add gorutine stats to logs - Fix issues with repair --- cmd/decypharr/main.go | 20 ++++++ pkg/arr/content.go | 40 +++++++----- pkg/debrid/debrid/cache.go | 96 +++++++++++++++++------------ pkg/debrid/debrid/refresh.go | 31 ++++++---- pkg/debrid/debrid/repair.go | 19 ++---- pkg/debrid/realdebrid/realdebrid.go | 27 ++------ pkg/repair/repair.go | 17 ++--- pkg/web/web/repair.html | 6 ++ pkg/webdav/file.go | 14 ++++- pkg/webdav/webdav.go | 3 +- pkg/worker/worker.go | 29 --------- 11 files changed, 163 insertions(+), 139 deletions(-) diff --git a/cmd/decypharr/main.go b/cmd/decypharr/main.go index ed7c9bc..0238692 100644 --- a/cmd/decypharr/main.go +++ b/cmd/decypharr/main.go @@ -3,6 +3,7 @@ package decypharr import ( "context" "fmt" + "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/pkg/proxy" @@ -14,12 +15,26 @@ import ( "github.com/sirrobot01/debrid-blackhole/pkg/webdav" "github.com/sirrobot01/debrid-blackhole/pkg/worker" "os" + "runtime" "runtime/debug" "strconv" "sync" "syscall" + "time" ) +func monitorGoroutines(interval time.Duration, _log zerolog.Logger) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + _log.Debug().Msgf("Current goroutines: %d", runtime.NumGoroutine()) + } + } +} + func Start(ctx context.Context) error { if umaskStr := os.Getenv("UMASK"); umaskStr != "" { @@ -106,6 +121,11 @@ func Start(ctx context.Context) error { }) } + safeGo(func() error { + monitorGoroutines(1*time.Minute, _log) + return nil + }) + go func() { wg.Wait() close(errChan) diff --git a/pkg/arr/content.go b/pkg/arr/content.go index 50f4192..74158c8 100644 --- a/pkg/arr/content.go +++ b/pkg/arr/content.go @@ -1,8 +1,10 @@ package arr import ( + "context" "fmt" "github.com/goccy/go-json" + "golang.org/x/sync/errgroup" "net/http" "strconv" "strings" @@ -155,20 +157,32 @@ func (a *Arr) searchSonarr(files []ContentFile) error { id := fmt.Sprintf("%d-%d", f.Id, f.SeasonNumber) ids[id] = nil } - errs := make(chan error, len(ids)) + + g, ctx := errgroup.WithContext(context.Background()) + + // Limit concurrent goroutines + g.SetLimit(10) for id := range ids { - go func() { + id := id + g.Go(func() error { + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + parts := strings.Split(id, "-") if len(parts) != 2 { - return + return fmt.Errorf("invalid id: %s", id) } seriesId, err := strconv.Atoi(parts[0]) if err != nil { - return + return err } seasonNumber, err := strconv.Atoi(parts[1]) if err != nil { - return + return err } payload := sonarrSearch{ Name: "SeasonSearch", @@ -177,20 +191,16 @@ func (a *Arr) searchSonarr(files []ContentFile) error { } resp, err := a.Request(http.MethodPost, "api/v3/command", payload) if err != nil { - errs <- fmt.Errorf("failed to automatic search: %v", err) - return + return fmt.Errorf("failed to automatic search: %v", err) } if resp.StatusCode >= 300 || resp.StatusCode < 200 { - errs <- fmt.Errorf("failed to automatic search. Status Code: %s", resp.Status) - return + return fmt.Errorf("failed to automatic search. Status Code: %s", resp.Status) } - }() + return nil + }) } - for range ids { - err := <-errs - if err != nil { - return err - } + if err := g.Wait(); err != nil { + return err } return nil } diff --git a/pkg/debrid/debrid/cache.go b/pkg/debrid/debrid/cache.go index 7f5c207..10c20b6 100644 --- a/pkg/debrid/debrid/cache.go +++ b/pkg/debrid/debrid/cache.go @@ -24,9 +24,11 @@ import ( type WebDavFolderNaming string const ( + WebDavUseFileName WebDavFolderNaming = "filename" WebDavUseOriginalName WebDavFolderNaming = "original" - WebDavUseID WebDavFolderNaming = "use_id" + WebDavUseFileNameNoExt WebDavFolderNaming = "filename_no_ext" WebDavUseOriginalNameNoExt WebDavFolderNaming = "original_no_ext" + WebDavUseID WebDavFolderNaming = "id" ) type PropfindResponse struct { @@ -78,6 +80,8 @@ type Cache struct { listingRefreshMu sync.RWMutex // for refreshing torrents downloadLinksRefreshMu sync.RWMutex // for refreshing download links torrentsRefreshMu sync.RWMutex // for refreshing torrents + + saveSemaphore chan struct{} } func NewCache(dc config.Debrid, client types.Client) *Cache { @@ -99,7 +103,7 @@ func NewCache(dc config.Debrid, client types.Client) *Cache { torrents: xsync.NewMapOf[string, *CachedTorrent](), torrentsNames: xsync.NewMapOf[string, *CachedTorrent](), client: client, - logger: logger.NewLogger(fmt.Sprintf("%s-cache", client.GetName())), + logger: logger.NewLogger(fmt.Sprintf("%s-webdav", client.GetName())), workers: 200, downloadLinks: xsync.NewMapOf[string, downloadLinkCache](), torrentRefreshInterval: torrentRefreshInterval, @@ -108,17 +112,25 @@ func NewCache(dc config.Debrid, client types.Client) *Cache { folderNaming: WebDavFolderNaming(dc.WebDavFolderNaming), autoExpiresLinksAfter: autoExpiresLinksAfter, repairsInProgress: xsync.NewMapOf[string, bool](), + saveSemaphore: make(chan struct{}, 10), } } func (c *Cache) GetTorrentFolder(torrent *types.Torrent) string { - folderName := torrent.Filename - if c.folderNaming == WebDavUseID { - folderName = torrent.Id - } else if c.folderNaming == WebDavUseOriginalNameNoExt { - folderName = utils.RemoveExtension(folderName) + switch c.folderNaming { + case WebDavUseFileName: + return torrent.Filename + case WebDavUseOriginalName: + return torrent.OriginalFilename + case WebDavUseFileNameNoExt: + return utils.RemoveExtension(torrent.Filename) + case WebDavUseOriginalNameNoExt: + return utils.RemoveExtension(torrent.OriginalFilename) + case WebDavUseID: + return torrent.Id + default: + return torrent.Filename } - return folderName } func (c *Cache) setTorrent(t *CachedTorrent) { @@ -126,11 +138,7 @@ func (c *Cache) setTorrent(t *CachedTorrent) { c.torrentsNames.Store(c.GetTorrentFolder(t.Torrent), t) - go func() { - if err := c.SaveTorrent(t); err != nil { - c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id) - } - }() + c.SaveTorrent(t) } func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) { @@ -141,11 +149,7 @@ func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) { c.refreshListings() - go func() { - if err := c.SaveTorrents(); err != nil { - c.logger.Debug().Err(err).Msgf("Failed to save torrents") - } - }() + c.SaveTorrents() } func (c *Cache) GetListing() []os.FileInfo { @@ -260,20 +264,31 @@ func (c *Cache) GetTorrentByName(name string) *CachedTorrent { return nil } -func (c *Cache) SaveTorrents() error { +func (c *Cache) SaveTorrents() { c.torrents.Range(func(key string, value *CachedTorrent) bool { - if err := c.SaveTorrent(value); err != nil { - c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", key) - } + c.SaveTorrent(value) return true }) - return nil } -func (c *Cache) SaveTorrent(ct *CachedTorrent) error { +func (c *Cache) SaveTorrent(ct *CachedTorrent) { + // Try to acquire semaphore without blocking + select { + case c.saveSemaphore <- struct{}{}: + go func() { + defer func() { <-c.saveSemaphore }() + c.saveTorrent(ct) + }() + default: + c.saveTorrent(ct) + } +} + +func (c *Cache) saveTorrent(ct *CachedTorrent) { data, err := json.MarshalIndent(ct, "", " ") if err != nil { - return fmt.Errorf("failed to marshal torrent: %w", err) + c.logger.Debug().Err(err).Msgf("Failed to marshal torrent: %s", ct.Id) + return } fileName := ct.Torrent.Id + ".json" @@ -282,20 +297,25 @@ func (c *Cache) SaveTorrent(ct *CachedTorrent) error { f, err := os.Create(tmpFile) if err != nil { - return fmt.Errorf("failed to create temp file: %w", err) + c.logger.Debug().Err(err).Msgf("Failed to create file: %s", tmpFile) + return } defer f.Close() w := bufio.NewWriter(f) if _, err := w.Write(data); err != nil { - return fmt.Errorf("failed to write data: %w", err) + c.logger.Debug().Err(err).Msgf("Failed to write data: %s", tmpFile) + return } if err := w.Flush(); err != nil { - return fmt.Errorf("failed to flush data: %w", err) + c.logger.Debug().Err(err).Msgf("Failed to flush data: %s", tmpFile) } - return os.Rename(tmpFile, filePath) + if err := os.Rename(tmpFile, filePath); err != nil { + c.logger.Debug().Err(err).Msgf("Failed to rename file: %s", tmpFile) + } + return } func (c *Cache) Sync() error { @@ -508,11 +528,7 @@ func (c *Cache) GenerateDownloadLinks(t *CachedTorrent) { c.updateDownloadLink(file.Link, file.DownloadLink) } - go func() { - if err := c.SaveTorrent(t); err != nil { - c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id) - } - }() + c.SaveTorrent(t) } func (c *Cache) AddTorrent(t *types.Torrent) error { @@ -559,7 +575,7 @@ func (c *Cache) DeleteTorrent(id string) { if t, ok := c.torrents.Load(id); ok { c.torrents.Delete(id) c.torrentsNames.Delete(c.GetTorrentFolder(t.Torrent)) - go c.removeFromDB(id) + c.removeFromDB(id) c.refreshListings() } } @@ -570,7 +586,7 @@ func (c *Cache) DeleteTorrents(ids []string) { if t, ok := c.torrents.Load(id); ok { c.torrents.Delete(id) c.torrentsNames.Delete(c.GetTorrentFolder(t.Torrent)) - go c.removeFromDB(id) + c.removeFromDB(id) } } c.refreshListings() @@ -585,6 +601,10 @@ func (c *Cache) removeFromDB(torrentId string) { func (c *Cache) OnRemove(torrentId string) { c.logger.Debug().Msgf("OnRemove triggered for %s", torrentId) - go c.DeleteTorrent(torrentId) - go c.refreshListings() + c.DeleteTorrent(torrentId) + c.refreshListings() +} + +func (c *Cache) GetLogger() zerolog.Logger { + return c.logger } diff --git a/pkg/debrid/debrid/refresh.go b/pkg/debrid/debrid/refresh.go index 34c498e..a49808d 100644 --- a/pkg/debrid/debrid/refresh.go +++ b/pkg/debrid/debrid/refresh.go @@ -1,10 +1,12 @@ package debrid import ( + "context" "fmt" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" + "golang.org/x/sync/errgroup" "io" "net/http" "os" @@ -12,7 +14,6 @@ import ( "slices" "sort" "strings" - "sync" "time" ) @@ -148,20 +149,28 @@ func (c *Cache) refreshTorrents() { } c.logger.Info().Msgf("Found %d new torrents", len(newTorrents)) - // No need for a complex sync process, just add the new torrents - wg := sync.WaitGroup{} - wg.Add(len(newTorrents)) + g, ctx := errgroup.WithContext(context.Background()) for _, t := range newTorrents { - // ProcessTorrent is concurrent safe - go func() { - defer wg.Done() - if err := c.ProcessTorrent(t, true); err != nil { - c.logger.Info().Err(err).Msg("Failed to process torrent") + t := t + g.Go(func() error { + + select { + case <-ctx.Done(): + return ctx.Err() + default: } - }() + if err := c.ProcessTorrent(t, true); err != nil { + return err + } + return nil + }) } - wg.Wait() + + if err := g.Wait(); err != nil { + c.logger.Debug().Err(err).Msg("Failed to process new torrents") + } + } func (c *Cache) RefreshRclone() error { diff --git a/pkg/debrid/debrid/repair.go b/pkg/debrid/debrid/repair.go index be1085b..3095bae 100644 --- a/pkg/debrid/debrid/repair.go +++ b/pkg/debrid/debrid/repair.go @@ -41,22 +41,15 @@ func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool { break } else { // Check if file.Link not in the downloadLink Cache - if _, ok := c.downloadLinks.Load(f.Link); !ok { - // File not in cache - // Check link - if err := c.client.CheckLink(f.Link); err != nil { - if errors.Is(err, request.ErrLinkBroken) { - isBroken = true - break - } else { - // This might just be a temporary error - } + if err := c.client.CheckLink(f.Link); err != nil { + if errors.Is(err, request.ErrLinkBroken) { + isBroken = true + break } else { - // Generate a new download link? + // This might just be a temporary error } } else { - // Link is in cache - // We might skip checking for now, it seems rd removes uncached links + // Generate a new download link? } } } diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index d57b076..96b8087 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -445,35 +445,20 @@ func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) { } // Prepare for concurrent fetching - var wg sync.WaitGroup - var mu sync.Mutex var fetchError error // Calculate how many more requests we need batchCount := (remaining + limit - 1) / limit // ceiling division for i := 1; i <= batchCount; i++ { - wg.Add(1) - go func(batchOffset int) { - defer wg.Done() - - _, batch, err := r.getTorrents(batchOffset, limit) - if err != nil { - mu.Lock() - fetchError = err - mu.Unlock() - return - } - - mu.Lock() - allTorrents = append(allTorrents, batch...) - mu.Unlock() - }(i * limit) + _, batch, err := r.getTorrents(i*limit, limit) + if err != nil { + fetchError = err + continue + } + allTorrents = append(allTorrents, batch...) } - // Wait for all fetches to complete - wg.Wait() - if fetchError != nil { return nil, fetchError } diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 7f1e7d2..e46a475 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -164,6 +164,7 @@ func (r *Repair) preRunChecks() error { if len(r.deb.Caches) == 0 { return fmt.Errorf("no caches found") } + return nil } // Check if zurg url is reachable @@ -195,10 +196,9 @@ func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess, recu job.Recurrent = recurrent r.reset(job) r.Jobs[key] = job - go r.saveToFile() + r.saveToFile() go func() { if err := r.repair(job); err != nil { - r.logger.Error().Err(err).Msg("Error running repair") r.logger.Error().Err(err).Msg("Error running repair") job.FailedAt = time.Now() job.Error = err.Error() @@ -453,9 +453,10 @@ func (r *Repair) isMediaAccessible(m arr.Content) bool { } firstFile := files[0] r.logger.Debug().Msgf("Checking parent directory for %s", firstFile.Path) - if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) { - return false - } + //if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) { + // r.logger.Debug().Msgf("Parent directory not accessible for %s", firstFile.Path) + // return false + //} // Check symlink parent directory symlinkPath := getSymlinkTarget(firstFile.Path) @@ -597,6 +598,7 @@ func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile { if mountPath == "" { continue } + if filepath.Clean(mountPath) == filepath.Clean(dir) { debridName = client.GetName() break @@ -615,7 +617,8 @@ func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile { torrentName := filepath.Clean(filepath.Base(torrentPath)) torrent := cache.GetTorrentByName(torrentName) if torrent == nil { - r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName) + r.logger.Debug().Msgf("Torrent not found for %s. Marking as broken", torrentName) + brokenFiles = append(brokenFiles, f...) continue } files := make([]string, 0) @@ -774,5 +777,5 @@ func (r *Repair) DeleteJobs(ids []string) { } } } - go r.saveToFile() + r.saveToFile() } diff --git a/pkg/web/web/repair.html b/pkg/web/web/repair.html index 68f9e86..7885f15 100644 --- a/pkg/web/web/repair.html +++ b/pkg/web/web/repair.html @@ -268,6 +268,9 @@ } else if (job.status === 'pending') { status = 'Pending'; statusClass = 'text-warning'; + } else if (job.status === "processing") { + status = 'Processing'; + statusClass = 'text-info'; } row.innerHTML = ` @@ -486,6 +489,9 @@ } else if (job.status === 'pending') { status = 'Pending'; statusClass = 'text-warning'; + } else if (job.status === "processing") { + status = 'Processing'; + statusClass = 'text-info'; } document.getElementById('modalJobStatus').innerHTML = `${status}`; diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 66fc4ab..3ce8a94 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -12,10 +12,18 @@ import ( var sharedClient = &http.Client{ Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + Proxy: http.ProxyFromEnvironment, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 20, + MaxConnsPerHost: 50, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ResponseHeaderTimeout: 30 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableKeepAlives: false, }, - Timeout: 0, + Timeout: 60 * time.Second, } type File struct { diff --git a/pkg/webdav/webdav.go b/pkg/webdav/webdav.go index c815955..f9f6f33 100644 --- a/pkg/webdav/webdav.go +++ b/pkg/webdav/webdav.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "github.com/go-chi/chi/v5" - "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/pkg/service" "html/template" "net/http" @@ -23,7 +22,7 @@ func New() *WebDav { ready: make(chan struct{}), } for name, c := range svc.Debrid.Caches { - h := NewHandler(name, c, logger.NewLogger(fmt.Sprintf("%s-webdav", name))) + h := NewHandler(name, c, c.GetLogger()) w.Handlers = append(w.Handlers, h) } return w diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go index 884b82c..156e9fb 100644 --- a/pkg/worker/worker.go +++ b/pkg/worker/worker.go @@ -37,24 +37,6 @@ func Start(ctx context.Context) error { return nil } -//func arrRefreshWorker(ctx context.Context, cfg *config.Config) { -// // Start Arr Refresh Worker -// _logger := getLogger() -// _logger.Debug().Msg("Refresh Worker started") -// refreshCtx := context.WithValue(ctx, "worker", "refresh") -// refreshTicker := time.NewTicker(time.Duration(cfg.QBitTorrent.RefreshInterval) * time.Second) -// -// for { -// select { -// case <-refreshCtx.Done(): -// _logger.Debug().Msg("Refresh Worker stopped") -// return -// case <-refreshTicker.C: -// refreshArrs() -// } -// } -//} - func cleanUpQueuesWorker(ctx context.Context, cfg *config.Config) { // Start Clean up Queues Worker _logger := getLogger() @@ -80,17 +62,6 @@ func cleanUpQueuesWorker(ctx context.Context, cfg *config.Config) { } } -//func refreshArrs() { -// for _, a := range service.GetService().Arr.GetAll() { -// err := a.Refresh() -// if err != nil { -// _logger := getLogger() -// _logger.Debug().Err(err).Msg("Error refreshing arr") -// return -// } -// } -//} - func cleanUpQueues() { // Clean up queues _logger := getLogger() From 4ae5de99e865da87421111b3a3096d901d7b754b Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Thu, 27 Mar 2025 09:01:33 +0100 Subject: [PATCH 17/39] Fix deleting torrent bug --- pkg/debrid/debrid/refresh.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/debrid/debrid/refresh.go b/pkg/debrid/debrid/refresh.go index a49808d..56f5d54 100644 --- a/pkg/debrid/debrid/refresh.go +++ b/pkg/debrid/debrid/refresh.go @@ -136,7 +136,7 @@ func (c *Cache) refreshTorrents() { newTorrents := make([]*types.Torrent, 0) for _, t := range _newTorrents { if !slices.Contains(deletedTorrents, t.Id) { - _newTorrents = append(_newTorrents, t) + newTorrents = append(newTorrents, t) // <-- FIXED: Use newTorrents } } From f9bc7ad91488315e36cbe7020756ecbc6f5078c9 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Fri, 28 Mar 2025 00:25:02 +0100 Subject: [PATCH 18/39] Fixes - Be conservative about the number of goroutines - Minor fixes - Add Webdav to ui - Add more configs to UI --- cmd/decypharr/main.go | 20 ------ internal/config/config.go | 32 +++++----- pkg/debrid/debrid/cache.go | 64 ++++++++++++------- pkg/debrid/debrid/engine.go | 3 +- pkg/debrid/debrid/refresh.go | 21 ++++--- pkg/debrid/realdebrid/realdebrid.go | 3 + pkg/repair/clean.go | 2 +- pkg/repair/repair.go | 4 +- pkg/server/server.go | 29 +++++++++ pkg/web/web/config.html | 98 ++++++++++++++++++++++++----- pkg/web/web/layout.html | 19 +++++- pkg/webdav/file.go | 6 +- pkg/webdav/handler.go | 14 ++++- pkg/webdav/templates.go | 33 +++++++--- 14 files changed, 252 insertions(+), 96 deletions(-) diff --git a/cmd/decypharr/main.go b/cmd/decypharr/main.go index 0238692..ed7c9bc 100644 --- a/cmd/decypharr/main.go +++ b/cmd/decypharr/main.go @@ -3,7 +3,6 @@ package decypharr import ( "context" "fmt" - "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/pkg/proxy" @@ -15,26 +14,12 @@ import ( "github.com/sirrobot01/debrid-blackhole/pkg/webdav" "github.com/sirrobot01/debrid-blackhole/pkg/worker" "os" - "runtime" "runtime/debug" "strconv" "sync" "syscall" - "time" ) -func monitorGoroutines(interval time.Duration, _log zerolog.Logger) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - _log.Debug().Msgf("Current goroutines: %d", runtime.NumGoroutine()) - } - } -} - func Start(ctx context.Context) error { if umaskStr := os.Getenv("UMASK"); umaskStr != "" { @@ -121,11 +106,6 @@ func Start(ctx context.Context) error { }) } - safeGo(func() error { - monitorGoroutines(1*time.Minute, _log) - return nil - }) - go func() { wg.Wait() close(errChan) diff --git a/internal/config/config.go b/internal/config/config.go index d8a8554..76449d6 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -25,13 +25,8 @@ type Debrid struct { CheckCached bool `json:"check_cached"` RateLimit string `json:"rate_limit"` // 200/minute or 10/second - // Webdav - UseWebdav bool `json:"use_webdav"` - TorrentRefreshInterval string `json:"torrent_refresh_interval"` - DownloadLinksRefreshInterval string `json:"downloads_refresh_interval"` - TorrentRefreshWorkers int `json:"torrent_refresh_workers"` - WebDavFolderNaming string `json:"webdav_folder_naming"` - AutoExpireLinksAfter string `json:"auto_expire_links_after"` + UseWebDav bool `json:"use_webdav"` + WebDav } type Proxy struct { @@ -136,6 +131,10 @@ func (c *Config) loadConfig() error { c.Debrids = append(c.Debrids, c.Debrid) } + for i, debrid := range c.Debrids { + c.Debrids[i] = c.GetDebridWebDav(debrid) + } + if len(c.AllowedExt) == 0 { c.AllowedExt = getDefaultExtensions() } @@ -313,17 +312,22 @@ func (c *Config) NeedsSetup() bool { } func (c *Config) GetDebridWebDav(d Debrid) Debrid { - if d.TorrentRefreshInterval == "" { - d.TorrentRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "15s") // 15 seconds + + if !d.UseWebDav { + return d } - if d.DownloadLinksRefreshInterval == "" { + + if d.TorrentsRefreshInterval == "" { + d.TorrentsRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "15s") // 15 seconds + } + if d.WebDav.DownloadLinksRefreshInterval == "" { d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes } - if d.TorrentRefreshWorkers == 0 { - d.TorrentRefreshWorkers = cmp.Or(c.WebDav.Workers, 30) // 30 workers + if d.Workers == 0 { + d.Workers = cmp.Or(c.WebDav.Workers, 30) // 30 workers } - if d.WebDavFolderNaming == "" { - d.WebDavFolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext") + if d.FolderNaming == "" { + d.FolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext") } if d.AutoExpireLinksAfter == "" { d.AutoExpireLinksAfter = cmp.Or(c.WebDav.AutoExpireLinksAfter, "24h") diff --git a/pkg/debrid/debrid/cache.go b/pkg/debrid/debrid/cache.go index 10c20b6..87369f8 100644 --- a/pkg/debrid/debrid/cache.go +++ b/pkg/debrid/debrid/cache.go @@ -39,7 +39,7 @@ type PropfindResponse struct { type CachedTorrent struct { *types.Torrent - LastRead time.Time `json:"last_read"` + AddedOn time.Time `json:"added_on"` IsComplete bool `json:"is_complete"` } @@ -86,7 +86,7 @@ type Cache struct { func NewCache(dc config.Debrid, client types.Client) *Cache { cfg := config.GetConfig() - torrentRefreshInterval, err := time.ParseDuration(dc.TorrentRefreshInterval) + torrentRefreshInterval, err := time.ParseDuration(dc.TorrentsRefreshInterval) if err != nil { torrentRefreshInterval = time.Second * 15 } @@ -109,7 +109,7 @@ func NewCache(dc config.Debrid, client types.Client) *Cache { torrentRefreshInterval: torrentRefreshInterval, downloadLinksRefreshInterval: downloadLinksRefreshInterval, PropfindResp: xsync.NewMapOf[string, PropfindResponse](), - folderNaming: WebDavFolderNaming(dc.WebDavFolderNaming), + folderNaming: WebDavFolderNaming(dc.FolderNaming), autoExpiresLinksAfter: autoExpiresLinksAfter, repairsInProgress: xsync.NewMapOf[string, bool](), saveSemaphore: make(chan struct{}, 10), @@ -201,6 +201,7 @@ func (c *Cache) load() (map[string]*CachedTorrent, error) { return torrents, fmt.Errorf("failed to read cache directory: %w", err) } + now := time.Now() for _, file := range files { if file.IsDir() || filepath.Ext(file.Name()) != ".json" { continue @@ -232,7 +233,11 @@ func (c *Cache) load() (map[string]*CachedTorrent, error) { linkStore[f.Link] = true } } - + addedOn, err := time.Parse(time.RFC3339, ct.Added) + if err != nil { + addedOn = now + } + ct.AddedOn = addedOn ct.IsComplete = true torrents[ct.Id] = &ct } @@ -447,10 +452,15 @@ func (c *Cache) ProcessTorrent(t *types.Torrent, refreshRclone bool) error { return fmt.Errorf("failed to update torrent: %w", err) } } + + addedOn, err := time.Parse(time.RFC3339, t.Added) + if err != nil { + addedOn = time.Now() + } ct := &CachedTorrent{ Torrent: t, - LastRead: time.Now(), IsComplete: len(t.Files) > 0, + AddedOn: addedOn, } c.setTorrent(ct) @@ -487,25 +497,27 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string { downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file) if err != nil { if errors.Is(err, request.HosterUnavailableError) { + // This code is commented iut due to the fact that if a torrent link is uncached, it's likely that we can't redownload it again + // Do not attempt to repair the torrent if the hoster is unavailable // Check link here?? - c.logger.Debug().Err(err).Msgf("Hoster is unavailable. Triggering repair for %s", ct.Name) - if err := c.repairTorrent(ct); err != nil { - c.logger.Error().Err(err).Msgf("Failed to trigger repair for %s", ct.Name) - return "" - } - // Generate download link for the file then - f := ct.Files[filename] - downloadLink, _ = c.client.GetDownloadLink(ct.Torrent, &f) - f.DownloadLink = downloadLink - file.Generated = time.Now() - ct.Files[filename] = f - c.updateDownloadLink(file.Link, downloadLink) - - go func() { - go c.setTorrent(ct) - }() - - return downloadLink // Gets download link in the next pass + //c.logger.Debug().Err(err).Msgf("Hoster is unavailable. Triggering repair for %s", ct.Name) + //if err := c.repairTorrent(ct); err != nil { + // c.logger.Error().Err(err).Msgf("Failed to trigger repair for %s", ct.Name) + // return "" + //} + //// Generate download link for the file then + //f := ct.Files[filename] + //downloadLink, _ = c.client.GetDownloadLink(ct.Torrent, &f) + //f.DownloadLink = downloadLink + //file.Generated = time.Now() + //ct.Files[filename] = f + //c.updateDownloadLink(file.Link, downloadLink) + // + //go func() { + // go c.setTorrent(ct) + //}() + // + //return downloadLink // Gets download link in the next pass } c.logger.Debug().Err(err).Msgf("Failed to get download link for :%s", file.Link) @@ -537,10 +549,14 @@ func (c *Cache) AddTorrent(t *types.Torrent) error { return fmt.Errorf("failed to update torrent: %w", err) } } + addedOn, err := time.Parse(time.RFC3339, t.Added) + if err != nil { + addedOn = time.Now() + } ct := &CachedTorrent{ Torrent: t, - LastRead: time.Now(), IsComplete: len(t.Files) > 0, + AddedOn: addedOn, } c.setTorrent(ct) c.refreshListings() diff --git a/pkg/debrid/debrid/engine.go b/pkg/debrid/debrid/engine.go index 10f3f37..03a75d0 100644 --- a/pkg/debrid/debrid/engine.go +++ b/pkg/debrid/debrid/engine.go @@ -18,10 +18,9 @@ func NewEngine() *Engine { caches := make(map[string]*Cache) for _, dc := range cfg.Debrids { - dc = cfg.GetDebridWebDav(dc) client := createDebridClient(dc) logger := client.GetLogger() - if dc.UseWebdav { + if dc.UseWebDav { caches[dc.Name] = NewCache(dc, client) logger.Info().Msg("Debrid Service started with WebDAV") } else { diff --git a/pkg/debrid/debrid/refresh.go b/pkg/debrid/debrid/refresh.go index 56f5d54..cead3c9 100644 --- a/pkg/debrid/debrid/refresh.go +++ b/pkg/debrid/debrid/refresh.go @@ -38,25 +38,25 @@ func (c *Cache) refreshListings() { } else { return } - // Copy the current torrents to avoid concurrent issues - torrents := make([]string, 0, c.torrentsNames.Size()) + // COpy the torrents to a string|time map + torrentsTime := make(map[string]time.Time, c.torrents.Size()) + torrents := make([]string, 0, c.torrents.Size()) c.torrentsNames.Range(func(key string, value *CachedTorrent) bool { + torrentsTime[key] = value.AddedOn torrents = append(torrents, key) return true }) - sort.Slice(torrents, func(i, j int) bool { - return torrents[i] < torrents[j] - }) + // Sort the torrents by name + sort.Strings(torrents) files := make([]os.FileInfo, 0, len(torrents)) - now := time.Now() for _, t := range torrents { files = append(files, &fileInfo{ name: t, size: 0, mode: 0755 | os.ModeDir, - modTime: now, + modTime: torrentsTime[t], isDir: true, }) } @@ -219,10 +219,13 @@ func (c *Cache) refreshTorrent(t *CachedTorrent) *CachedTorrent { if len(t.Files) == 0 { return nil } - + addedOn, err := time.Parse(time.RFC3339, _torrent.Added) + if err != nil { + addedOn = time.Now() + } ct := &CachedTorrent{ Torrent: _torrent, - LastRead: time.Now(), + AddedOn: addedOn, IsComplete: len(t.Files) > 0, } c.setTorrent(ct) diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index 96b8087..00846a7 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -17,6 +17,7 @@ import ( "strconv" "strings" "sync" + "time" ) type RealDebrid struct { @@ -178,6 +179,7 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error { t.Links = data.Links t.MountPath = r.MountPath t.Debrid = r.Name + t.Added = data.Added t.Files = getTorrentFiles(t, data, false) // Get selected files return nil } @@ -422,6 +424,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, InfoHash: t.Hash, Debrid: r.Name, MountPath: r.MountPath, + Added: t.Added.Format(time.RFC3339), }) } return totalItems, torrents, nil diff --git a/pkg/repair/clean.go b/pkg/repair/clean.go index bd40968..d10274d 100644 --- a/pkg/repair/clean.go +++ b/pkg/repair/clean.go @@ -18,7 +18,7 @@ func (r *Repair) clean(job *Job) error { mu := sync.Mutex{} // Limit concurrent goroutines - g.SetLimit(runtime.NumCPU() * 4) + g.SetLimit(10) for _, a := range job.Arrs { a := a // Capture range variable diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index e46a475..19942c7 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -218,6 +218,8 @@ func (r *Repair) repair(job *Job) error { // Create a new error group with context g, ctx := errgroup.WithContext(context.Background()) + g.SetLimit(4) + // Use a mutex to protect concurrent access to brokenItems var mu sync.Mutex brokenItems := map[string][]arr.ContentFile{} @@ -397,7 +399,7 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil g, ctx := errgroup.WithContext(context.Background()) // Limit concurrent goroutines - g.SetLimit(runtime.NumCPU() * 4) + g.SetLimit(10) // Mutex for brokenItems var mu sync.Mutex diff --git a/pkg/server/server.go b/pkg/server/server.go index 47ef225..203d5dc 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" + "github.com/goccy/go-json" "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" @@ -13,6 +14,7 @@ import ( "net/http" "os" "os/signal" + "runtime" "syscall" ) @@ -41,6 +43,7 @@ func (s *Server) Start(ctx context.Context) error { // Register logs s.router.Get("/logs", s.getLogs) + s.router.Get("/stats", s.getStats) port := fmt.Sprintf(":%s", cfg.QBitTorrent.Port) s.logger.Info().Msgf("Starting server on %s", port) srv := &http.Server{ @@ -102,3 +105,29 @@ func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) { return } } + +func (s *Server) getStats(w http.ResponseWriter, r *http.Request) { + var memStats runtime.MemStats + runtime.ReadMemStats(&memStats) + + stats := map[string]interface{}{ + // Memory stats + "heap_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.HeapAlloc)/1024/1024), + "total_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.TotalAlloc)/1024/1024), + "sys_mb": fmt.Sprintf("%.2fMB", float64(memStats.Sys)/1024/1024), + + // GC stats + "gc_cycles": memStats.NumGC, + // Goroutine stats + "goroutines": runtime.NumGoroutine(), + + // System info + "num_cpu": runtime.NumCPU(), + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(stats); err != nil { + s.logger.Error().Err(err).Msg("Failed to encode stats") + } +} diff --git a/pkg/web/web/config.html b/pkg/web/web/config.html index 8627757..300d7fb 100644 --- a/pkg/web/web/config.html +++ b/pkg/web/web/config.html @@ -11,7 +11,7 @@
- +
+
+ + +
-
Arr Configurations
+
Arrs
@@ -141,6 +145,10 @@ +
+ + +
@@ -159,7 +167,7 @@ // Templates for dynamic elements const debridTemplate = (index) => `
-
+
@@ -191,6 +199,47 @@
+
+
Webdav
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
`; @@ -360,16 +409,37 @@ container.insertAdjacentHTML('beforeend', debridTemplate(debridCount)); if (data) { - Object.entries(data).forEach(([key, value]) => { - const input = container.querySelector(`[name="debrid[${debridCount}].${key}"]`); - if (input) { - if (input.type === 'checkbox') { - input.checked = value; - } else { - input.value = value; - } + + if (data.use_webdav) { + let _webCfg = container.querySelector(`.webdav-${debridCount}`); + if (_webCfg) { + _webCfg.classList.remove('d-none'); } - }); + } + + function setFieldValues(obj, prefix) { + Object.entries(obj).forEach(([key, value]) => { + const fieldName = prefix ? `${prefix}.${key}` : key; + + // If value is an object and not null, recursively process nested fields + if (value !== null && typeof value === 'object' && !Array.isArray(value)) { + setFieldValues(value, fieldName); + } else { + // Handle leaf values (actual form fields) + const input = container.querySelector(`[name="debrid[${debridCount}].${fieldName}"]`); + if (input) { + if (input.type === 'checkbox') { + input.checked = value; + } else { + input.value = value; + } + } + } + }); + } + + // Start processing with the root object + setFieldValues(data, ''); } debridCount++; diff --git a/pkg/web/web/layout.html b/pkg/web/web/layout.html index 1ebe9bf..3edee82 100644 --- a/pkg/web/web/layout.html +++ b/pkg/web/web/layout.html @@ -117,6 +117,18 @@ background-color: rgba(128, 128, 128, 0.2); } +
@@ -149,7 +161,12 @@ +