23 Commits

Author SHA1 Message Date
Mukhtar Akere
f9861e3b54 Implementing a streaming setup with Usenet 2025-08-01 15:27:24 +01:00
Mukhtar Akere
afe577bf2f - Fix repair bugs
- Minor html/js bugs from new template
- Other minor issues
2025-07-13 06:30:02 +01:00
Mukhtar Akere
604402250e hotfix login and registration 2025-07-12 00:57:48 +01:00
Mukhtar Akere
74615a80ff Fix config.js 2025-07-11 13:17:43 +01:00
Sadman Sakib
b901bd5175 Feature/torbox provider improvements (#100)
- Add Torbox WebDAV implementation
- Fix Issues with sample and extension checks
2025-07-11 13:17:03 +01:00
Mukhtar Akere
8c56e59107 Fix some UI bugs; colors etc 2025-07-11 06:03:11 +01:00
Mukhtar Akere
b8b9e76753 Add seeders, add Remove selected from debrid button 2025-07-10 15:15:02 +01:00
Mukhtar Akere
6fb54d322e Fix dockerignore 2025-07-10 02:31:30 +01:00
Mukhtar Akere
cf61546bec Move to tailwind-build instead of CDNs 2025-07-10 02:17:35 +01:00
Mukhtar Akere
c72867ff57 Testing a new UI 2025-07-09 20:08:09 +01:00
Mukhtar Akere
dba5604d79 fix refresh rclone http client 2025-07-07 00:08:48 +01:00
iPromKnight
f656b7e4e2 feat: Allow deleting all __bad__ with a single button (#98) 2025-07-04 20:13:12 +01:00
Mukhtar Akere
c7b07137c5 Fix repair bug 2025-07-03 23:36:30 +01:00
Mukhtar Akere
c0aa4eaeba Fix modtime bug 2025-07-02 01:17:31 +01:00
Mukhtar Akere
2c90e518aa fix playback issues 2025-07-01 16:10:23 +01:00
Mukhtar Akere
dec7d93272 fix streaming 2025-07-01 15:28:19 +01:00
Mukhtar Akere
8d092615db Update stream client; Add repair strategy 2025-07-01 04:42:33 +01:00
iPromKnight
a4ee0973cc fix: AllDebrid webdav compatibility, and uncached downloads (#97) 2025-07-01 04:10:21 +01:00
Mukhtar Akere
1d19be9013 hotfix repair html table 2025-06-26 07:31:12 +01:00
Mukhtar Akere
cee0e20fe1 hotfix repair and download rate limit 2025-06-26 06:08:50 +01:00
Mukhtar Akere
a3e698e04f Add repair and download rate limit 2025-06-26 05:45:20 +01:00
Mukhtar Akere
e123a2fd5e Hotfix issues with 1.0.3 2025-06-26 03:51:28 +01:00
Mukhtar Akere
817051589e Move to per-torrent repair; Fix issues issues with adding torrents 2025-06-23 18:54:52 +01:00
108 changed files with 19755 additions and 4579 deletions

View File

@@ -5,16 +5,16 @@ tmp_dir = "tmp"
[build]
args_bin = ["--config", "data/"]
bin = "./tmp/main"
cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/decypharr/pkg/version.Version=0.0.0 -X github.com/sirrobot01/decypharr/pkg/version.Channel=dev\" -o ./tmp/main .'"
cmd = "bash -c 'npm run build && go build -ldflags \"-X github.com/sirrobot01/decypharr/pkg/version.Version=0.0.0 -X github.com/sirrobot01/decypharr/pkg/version.Channel=dev\" -o ./tmp/main .'"
delay = 1000
exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"]
exclude_dir = ["tmp", "vendor", "testdata", "data", "logs", "docs", "dist", "node_modules", ".ven"]
exclude_file = []
exclude_regex = ["_test.go"]
exclude_unchanged = false
follow_symlink = false
full_bin = ""
include_dir = []
include_ext = ["go", "tpl", "tmpl", "html", ".json"]
include_ext = ["go", "tpl", "tmpl", "html", ".json", ".js", ".css"]
include_file = []
kill_delay = "0s"
log = "build-errors.log"

View File

@@ -11,3 +11,19 @@ torrents.json
*.json
.ven/**
docs/**
# Don't copy node modules
node_modules/
# Don't copy development files
.git/
.gitignore
*.md
.env*
*.log
# Build artifacts
decypharr
healthcheck
*.exe
.venv/

View File

@@ -72,5 +72,5 @@ body:
label: Trace Logs have been provided as applicable
description: Trace logs are **generally required** and are not optional for all bug reports and contain `trace`. Info logs are invalid for bug reports and do not contain `debug` nor `trace`
options:
- label: I have read and followed the steps in the wiki link above and provided the required trace logs - the logs contain `trace` - that are relevant and show this issue.
- label: I have read and followed the steps in the documentation link and provided the required trace logs - the logs contain `trace` - that are relevant and show this issue.
required: true

5
.gitignore vendored
View File

@@ -12,4 +12,7 @@ tmp/**
torrents.json
logs/**
auth.json
.ven/
.ven/
.env
.venv/
node_modules/

View File

@@ -6,8 +6,10 @@ import (
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/qbit"
"github.com/sirrobot01/decypharr/pkg/sabnzbd"
"github.com/sirrobot01/decypharr/pkg/server"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"github.com/sirrobot01/decypharr/pkg/version"
"github.com/sirrobot01/decypharr/pkg/web"
"github.com/sirrobot01/decypharr/pkg/webdav"
@@ -58,20 +60,30 @@ func Start(ctx context.Context) error {
`, version.GetInfo(), cfg.LogLevel)
// Initialize services
qb := qbit.New()
wd := webdav.New()
_usenet := usenet.New()
debridCaches := store.Get().Debrid().Caches()
wd := webdav.New(debridCaches, _usenet)
var sb *sabnzbd.SABnzbd
ui := web.New().Routes()
ui := web.New(_usenet).Routes()
webdavRoutes := wd.Routes()
qbitRoutes := qb.Routes()
qb := qbit.New()
// Register routes
handlers := map[string]http.Handler{
"/": ui,
"/api/v2": qbitRoutes,
"/webdav": webdavRoutes,
}
srv := server.New(handlers)
if qb != nil {
handlers["/api/v2"] = qb.Routes()
}
if _usenet != nil {
sb = sabnzbd.New(_usenet)
sabRoutes := sb.Routes()
handlers["/sabnzbd"] = sabRoutes
}
srv := server.New(_usenet, handlers)
done := make(chan struct{})
go func(ctx context.Context) {
@@ -93,8 +105,13 @@ func Start(ctx context.Context) error {
cancelSvc() // tell existing services to shut down
_log.Info().Msg("Restarting Decypharr...")
<-done // wait for them to finish
qb.Reset()
if qb != nil {
qb.Reset()
}
store.Reset()
if _usenet != nil {
_usenet.Close()
}
// rebuild svcCtx off the original parent
svcCtx, cancelSvc = context.WithCancel(ctx)

View File

@@ -62,6 +62,11 @@ Create a `config.json` file in `/opt/decypharr/` with your Decypharr configurati
```
### Docker Compose Setup
- Check your current user and group IDs by running `id -u` and `id -g` in your terminal. You can use these values to set the `PUID` and `PGID` environment variables in the Docker Compose file.
- You should also set `user` to your user ID and group ID in the Docker Compose file to ensure proper file permissions.
Create a `docker-compose.yml` file with the following content:
```yaml
@@ -69,11 +74,14 @@ services:
decypharr:
image: cy01/blackhole:latest
container_name: decypharr
user: "${PUID:-1000}:${PGID:-1000}"
volumes:
- /mnt/:/mnt:rslave
- /opt/decypharr/:/app
environment:
- UMASK=002
- PUID=1000 # Replace with your user ID
- PGID=1000 # Replace with your group ID
ports:
- "8282:8282/tcp"
restart: unless-stopped

14
go.mod
View File

@@ -5,25 +5,32 @@ go 1.24.0
toolchain go1.24.3
require (
github.com/Tensai75/nzbparser v0.1.0
github.com/anacrolix/torrent v1.55.0
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/chrisfarms/yenc v0.0.0-20140520125709-00bca2f8b3cb
github.com/go-chi/chi/v5 v5.1.0
github.com/go-co-op/gocron/v2 v2.16.1
github.com/google/uuid v1.6.0
github.com/gorilla/sessions v1.4.0
github.com/nwaples/rardecode/v2 v2.0.0-beta.4
github.com/puzpuzpuz/xsync/v4 v4.1.0
github.com/robfig/cron/v3 v3.0.1
github.com/rs/zerolog v1.33.0
github.com/sourcegraph/conc v0.3.0
github.com/stanNthe5/stringbuf v0.0.3
go.uber.org/ratelimit v0.3.1
golang.org/x/crypto v0.33.0
golang.org/x/net v0.35.0
golang.org/x/sync v0.12.0
golang.org/x/time v0.8.0
golang.org/x/sync v0.15.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
require (
github.com/Tensai75/subjectparser v0.1.0 // indirect
github.com/anacrolix/missinggo v1.3.0 // indirect
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/google/go-cmp v0.6.0 // indirect
@@ -34,5 +41,8 @@ require (
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.26.0 // indirect
)

28
go.sum
View File

@@ -8,6 +8,10 @@ github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrX
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/Tensai75/nzbparser v0.1.0 h1:6RppAuWFahqu/kKjWO5Br0xuEYcxGz+XBTxYc+qvPo4=
github.com/Tensai75/nzbparser v0.1.0/go.mod h1:IUIIaeGaYp2dLAAF29BWYeKTfI4COvXaeQAzQiTOfMY=
github.com/Tensai75/subjectparser v0.1.0 h1:6fEWnRov8lDHxJS2EWqY6VonwYfrIRN+k8h8H7fFwHA=
github.com/Tensai75/subjectparser v0.1.0/go.mod h1:PNBFBnkOGbVDfX+56ZmC4GKSpqoRMCF1Y44xYd7NLGI=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -36,6 +40,8 @@ github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CM
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -47,6 +53,8 @@ github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67
github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4=
github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chrisfarms/yenc v0.0.0-20140520125709-00bca2f8b3cb h1:BK9pqCayfiXrcRypTPxDsunA6hPJtOyOTJYY2DJ429g=
github.com/chrisfarms/yenc v0.0.0-20140520125709-00bca2f8b3cb/go.mod h1:V4bkS2felTTOSIsYx9JivzrbdBOuksi02ZkzfbHUVAk=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -155,6 +163,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nwaples/rardecode/v2 v2.0.0-beta.4 h1:sdiJxQdPjECn2lh9nLFFhgLCf+0ulDU5rODbtERTlUY=
github.com/nwaples/rardecode/v2 v2.0.0-beta.4/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -183,6 +193,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
@@ -198,6 +210,8 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/stanNthe5/stringbuf v0.0.3 h1:3ChRipDckEY6FykaQ1Dowy3B+ZQa72EDBCasvT5+D1w=
github.com/stanNthe5/stringbuf v0.0.3/go.mod h1:hii5Vr+mucoWkNJlIYQVp8YvuPtq45fFnJEAhcPf2cQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -216,8 +230,14 @@ github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPy
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0=
go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
@@ -246,8 +266,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -266,8 +286,8 @@ golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -12,6 +12,13 @@ import (
"sync"
)
type RepairStrategy string
const (
RepairStrategyPerFile RepairStrategy = "per_file"
RepairStrategyPerTorrent RepairStrategy = "per_torrent"
)
var (
instance *Config
once sync.Once
@@ -19,17 +26,19 @@ var (
)
type Debrid struct {
Name string `json:"name,omitempty"`
APIKey string `json:"api_key,omitempty"`
DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
Folder string `json:"folder,omitempty"`
DownloadUncached bool `json:"download_uncached,omitempty"`
CheckCached bool `json:"check_cached,omitempty"`
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
Proxy string `json:"proxy,omitempty"`
UnpackRar bool `json:"unpack_rar,omitempty"`
AddSamples bool `json:"add_samples,omitempty"`
MinimumFreeSlot int `json:"minimum_free_slot,omitempty"` // Minimum active pots to use this debrid
Name string `json:"name,omitempty"`
APIKey string `json:"api_key,omitempty"`
DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
Folder string `json:"folder,omitempty"`
DownloadUncached bool `json:"download_uncached,omitempty"`
CheckCached bool `json:"check_cached,omitempty"`
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
RepairRateLimit string `json:"repair_rate_limit,omitempty"`
DownloadRateLimit string `json:"download_rate_limit,omitempty"`
Proxy string `json:"proxy,omitempty"`
UnpackRar bool `json:"unpack_rar,omitempty"`
AddSamples bool `json:"add_samples,omitempty"`
MinimumFreeSlot int `json:"minimum_free_slot,omitempty"` // Minimum active pots to use this debrid
UseWebDav bool `json:"use_webdav,omitempty"`
WebDav
@@ -38,7 +47,6 @@ type Debrid struct {
type QBitTorrent struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Port string `json:"port,omitempty"` // deprecated
DownloadFolder string `json:"download_folder,omitempty"`
Categories []string `json:"categories,omitempty"`
RefreshInterval int `json:"refresh_interval,omitempty"`
@@ -58,13 +66,14 @@ type Arr struct {
}
type Repair struct {
Enabled bool `json:"enabled,omitempty"`
Interval string `json:"interval,omitempty"`
ZurgURL string `json:"zurg_url,omitempty"`
AutoProcess bool `json:"auto_process,omitempty"`
UseWebDav bool `json:"use_webdav,omitempty"`
Workers int `json:"workers,omitempty"`
ReInsert bool `json:"reinsert,omitempty"`
Enabled bool `json:"enabled,omitempty"`
Interval string `json:"interval,omitempty"`
ZurgURL string `json:"zurg_url,omitempty"`
AutoProcess bool `json:"auto_process,omitempty"`
UseWebDav bool `json:"use_webdav,omitempty"`
Workers int `json:"workers,omitempty"`
ReInsert bool `json:"reinsert,omitempty"`
Strategy RepairStrategy `json:"strategy,omitempty"`
}
type Auth struct {
@@ -72,26 +81,55 @@ type Auth struct {
Password string `json:"password,omitempty"`
}
type SABnzbd struct {
DownloadFolder string `json:"download_folder,omitempty"`
RefreshInterval int `json:"refresh_interval,omitempty"`
Categories []string `json:"categories,omitempty"`
}
type Usenet struct {
Providers []UsenetProvider `json:"providers,omitempty"` // List of usenet providers
MountFolder string `json:"mount_folder,omitempty"` // Folder where usenet downloads are mounted
SkipPreCache bool `json:"skip_pre_cache,omitempty"`
Chunks int `json:"chunks,omitempty"` // Number of chunks to pre-cache
RcUrl string `json:"rc_url,omitempty"` // Rclone RC URL for the webdav
RcUser string `json:"rc_user,omitempty"` // Rclone RC username
RcPass string `json:"rc_pass,omitempty"` // Rclone RC password
}
type UsenetProvider struct {
Name string `json:"name,omitempty"`
Host string `json:"host,omitempty"` // Host of the usenet server
Port int `json:"port,omitempty"` // Port of the usenet server
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Connections int `json:"connections,omitempty"` // Number of connections to use
SSL bool `json:"ssl,omitempty"` // Use SSL for the connection
UseTLS bool `json:"use_tls,omitempty"` // Use TLS for the connection
}
type Config struct {
// server
BindAddress string `json:"bind_address,omitempty"`
URLBase string `json:"url_base,omitempty"`
Port string `json:"port,omitempty"`
LogLevel string `json:"log_level,omitempty"`
Debrids []Debrid `json:"debrids,omitempty"`
QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"`
Arrs []Arr `json:"arrs,omitempty"`
Repair Repair `json:"repair,omitempty"`
WebDav WebDav `json:"webdav,omitempty"`
AllowedExt []string `json:"allowed_file_types,omitempty"`
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
Path string `json:"-"` // Path to save the config file
UseAuth bool `json:"use_auth,omitempty"`
Auth *Auth `json:"-"`
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
LogLevel string `json:"log_level,omitempty"`
Debrids []Debrid `json:"debrids,omitempty"`
QBitTorrent *QBitTorrent `json:"qbittorrent,omitempty"`
SABnzbd *SABnzbd `json:"sabnzbd,omitempty"`
Usenet *Usenet `json:"usenet,omitempty"` // Usenet configuration
Arrs []Arr `json:"arrs,omitempty"`
Repair Repair `json:"repair,omitempty"`
WebDav WebDav `json:"webdav,omitempty"`
AllowedExt []string `json:"allowed_file_types,omitempty"`
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
Path string `json:"-"` // Path to save the config file
UseAuth bool `json:"use_auth,omitempty"`
Auth *Auth `json:"-"`
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
}
func (c *Config) JsonFile() string {
@@ -105,6 +143,10 @@ func (c *Config) TorrentsFile() string {
return filepath.Join(c.Path, "torrents.json")
}
func (c *Config) NZBsPath() string {
return filepath.Join(c.Path, "cache/nzbs")
}
func (c *Config) loadConfig() error {
// Load the config file
if configPath == "" {
@@ -132,9 +174,6 @@ func (c *Config) loadConfig() error {
}
func validateDebrids(debrids []Debrid) error {
if len(debrids) == 0 {
return errors.New("no debrids configured")
}
for _, debrid := range debrids {
// Basic field validation
@@ -149,17 +188,51 @@ func validateDebrids(debrids []Debrid) error {
return nil
}
func validateQbitTorrent(config *QBitTorrent) error {
if config.DownloadFolder == "" {
return errors.New("qbittorent download folder is required")
func validateUsenet(usenet *Usenet) error {
if usenet == nil {
return nil // No usenet configuration provided
}
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
return fmt.Errorf("qbittorent download folder(%s) does not exist", config.DownloadFolder)
for _, usenet := range usenet.Providers {
// Basic field validation
if usenet.Host == "" {
return errors.New("usenet host is required")
}
if usenet.Username == "" {
return errors.New("usenet username is required")
}
if usenet.Password == "" {
return errors.New("usenet password is required")
}
}
return nil
}
func validateSabznbd(config *SABnzbd) error {
if config == nil {
return nil // No SABnzbd configuration provided
}
if config.DownloadFolder != "" {
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
return fmt.Errorf("sabnzbd download folder(%s) does not exist", config.DownloadFolder)
}
}
return nil
}
func validateRepair(config *Repair) error {
func validateQbitTorrent(config *QBitTorrent) error {
if config == nil {
return nil // No qBittorrent configuration provided
}
if config.DownloadFolder != "" {
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
return fmt.Errorf("qbittorent download folder(%s) does not exist", config.DownloadFolder)
}
}
return nil
}
func validateRepair(config Repair) error {
if !config.Enabled {
return nil
}
@@ -171,19 +244,34 @@ func validateRepair(config *Repair) error {
func ValidateConfig(config *Config) error {
// Run validations concurrently
// Check if there's at least one debrid or usenet configured
hasUsenet := false
if config.Usenet != nil && len(config.Usenet.Providers) > 0 {
hasUsenet = true
}
if len(config.Debrids) == 0 && !hasUsenet {
return errors.New("at least one debrid or usenet provider must be configured")
}
if err := validateDebrids(config.Debrids); err != nil {
return err
}
if err := validateQbitTorrent(&config.QBitTorrent); err != nil {
if err := validateUsenet(config.Usenet); err != nil {
return err
}
if err := validateRepair(&config.Repair); err != nil {
if err := validateSabznbd(config.SABnzbd); err != nil {
return err
}
if err := validateQbitTorrent(config.QBitTorrent); err != nil {
return err
}
if err := validateRepair(config.Repair); err != nil {
return err
}
return nil
}
@@ -289,6 +377,10 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
}
d.DownloadAPIKeys = downloadKeys
if d.Workers == 0 {
d.Workers = perDebrid
}
if !d.UseWebDav {
return d
}
@@ -299,9 +391,6 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
if d.WebDav.DownloadLinksRefreshInterval == "" {
d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes
}
if d.Workers == 0 {
d.Workers = perDebrid
}
if d.FolderNaming == "" {
d.FolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext")
}
@@ -328,17 +417,47 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
return d
}
func (c *Config) updateUsenet(u UsenetProvider) UsenetProvider {
if u.Name == "" {
parts := strings.Split(u.Host, ".")
if len(parts) >= 2 {
u.Name = parts[len(parts)-2] // Gets "example" from "news.example.com"
} else {
u.Name = u.Host // Fallback to host if it doesn't look like a domain
}
}
if u.Port == 0 {
u.Port = 119 // Default port for usenet
}
if u.Connections == 0 {
u.Connections = 30 // Default connections
}
if u.SSL && !u.UseTLS {
u.UseTLS = true // Use TLS if SSL is enabled
}
return u
}
func (c *Config) setDefaults() {
for i, debrid := range c.Debrids {
c.Debrids[i] = c.updateDebrid(debrid)
}
if c.SABnzbd != nil {
c.SABnzbd.RefreshInterval = cmp.Or(c.SABnzbd.RefreshInterval, 10) // Default to 10 seconds
}
if c.Usenet != nil {
c.Usenet.Chunks = cmp.Or(c.Usenet.Chunks, 5)
for i, provider := range c.Usenet.Providers {
c.Usenet.Providers[i] = c.updateUsenet(provider)
}
}
if len(c.AllowedExt) == 0 {
c.AllowedExt = getDefaultExtensions()
}
c.Port = cmp.Or(c.Port, c.QBitTorrent.Port)
if c.URLBase == "" {
c.URLBase = "/"
}
@@ -350,6 +469,11 @@ func (c *Config) setDefaults() {
c.URLBase += "/"
}
// Set repair defaults
if c.Repair.Strategy == "" {
c.Repair.Strategy = RepairStrategyPerTorrent
}
// Load the auth file
c.Auth = c.GetAuth()
}
@@ -380,11 +504,6 @@ func (c *Config) createConfig(path string) error {
c.Port = "8282"
c.LogLevel = "info"
c.UseAuth = true
c.QBitTorrent = QBitTorrent{
DownloadFolder: filepath.Join(path, "downloads"),
Categories: []string{"sonarr", "radarr"},
RefreshInterval: 15,
}
return nil
}
@@ -393,7 +512,3 @@ func Reload() {
instance = nil
once = sync.Once{}
}
func DefaultFreeSlot() int {
return 10
}

View File

@@ -24,7 +24,7 @@ func (c *Config) IsAllowedFile(filename string) bool {
}
func getDefaultExtensions() []string {
videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,img,iso,vob,mkv,mk3d,ts,wtv,m2ts'", ",")
videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,img,iso,vob,mkv,mk3d,ts,wtv,m2ts", ",")
musicExts := strings.Split("MP3,WAV,FLAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",")
// Combine both slices

178
internal/nntp/client.go Normal file
View File

@@ -0,0 +1,178 @@
package nntp
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"sync/atomic"
"time"
)
// Client represents a failover NNTP client that manages multiple providers
type Client struct {
providers []config.UsenetProvider
pools *xsync.Map[string, *Pool]
logger zerolog.Logger
closed atomic.Bool
minimumMaxConns int // Minimum number of max connections across all pools
}
func NewClient(providers []config.UsenetProvider) (*Client, error) {
client := &Client{
providers: providers,
logger: logger.New("nntp"),
pools: xsync.NewMap[string, *Pool](),
}
if len(providers) == 0 {
return nil, fmt.Errorf("no NNTP providers configured")
}
return client, nil
}
func (c *Client) InitPools() error {
var initErrors []error
successfulPools := 0
for _, provider := range c.providers {
serverPool, err := NewPool(provider, c.logger)
if err != nil {
c.logger.Error().
Err(err).
Str("server", provider.Host).
Int("port", provider.Port).
Msg("Failed to initialize server pool")
initErrors = append(initErrors, err)
continue
}
if c.minimumMaxConns == 0 {
// Set minimumMaxConns to the max connections of the first successful pool
c.minimumMaxConns = serverPool.ConnectionCount()
} else {
c.minimumMaxConns = min(c.minimumMaxConns, serverPool.ConnectionCount())
}
c.pools.Store(provider.Name, serverPool)
successfulPools++
}
if successfulPools == 0 {
return fmt.Errorf("failed to initialize any server pools: %v", initErrors)
}
c.logger.Info().
Int("providers", len(c.providers)).
Msg("NNTP client created")
return nil
}
func (c *Client) Close() {
if c.closed.Load() {
c.logger.Warn().Msg("NNTP client already closed")
return
}
c.pools.Range(func(key string, value *Pool) bool {
if value != nil {
err := value.Close()
if err != nil {
return false
}
}
return true
})
c.closed.Store(true)
c.logger.Info().Msg("NNTP client closed")
}
func (c *Client) GetConnection(ctx context.Context) (*Connection, func(), error) {
if c.closed.Load() {
return nil, nil, fmt.Errorf("nntp client is closed")
}
// Prevent workers from waiting too long for connections
connCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
providerCount := len(c.providers)
for _, provider := range c.providers {
pool, ok := c.pools.Load(provider.Name)
if !ok {
return nil, nil, fmt.Errorf("no pool found for provider %s", provider.Name)
}
if !pool.IsFree() && providerCount > 1 {
continue
}
conn, err := pool.Get(connCtx) // Use timeout context
if err != nil {
if errors.Is(err, ErrNoAvailableConnection) || errors.Is(err, context.DeadlineExceeded) {
continue
}
return nil, nil, fmt.Errorf("error getting connection from provider %s: %w", provider.Name, err)
}
if conn == nil {
continue
}
return conn, func() { pool.Put(conn) }, nil
}
return nil, nil, ErrNoAvailableConnection
}
func (c *Client) DownloadHeader(ctx context.Context, messageID string) (*YencMetadata, error) {
conn, cleanup, err := c.GetConnection(ctx)
if err != nil {
return nil, err
}
defer cleanup()
data, err := conn.GetBody(messageID)
if err != nil {
return nil, err
}
// yEnc decode
part, err := DecodeYencHeaders(bytes.NewReader(data))
if err != nil || part == nil {
return nil, fmt.Errorf("failed to decode segment")
}
// Return both the filename and decoded data
return part, nil
}
func (c *Client) MinimumMaxConns() int {
return c.minimumMaxConns
}
func (c *Client) TotalActiveConnections() int {
total := 0
c.pools.Range(func(key string, value *Pool) bool {
if value != nil {
total += value.ActiveConnections()
}
return true
})
return total
}
func (c *Client) Pools() *xsync.Map[string, *Pool] {
return c.pools
}
func (c *Client) GetProviders() []config.UsenetProvider {
return c.providers
}

394
internal/nntp/conns.go Normal file
View File

@@ -0,0 +1,394 @@
package nntp
import (
"bufio"
"crypto/tls"
"fmt"
"github.com/chrisfarms/yenc"
"github.com/rs/zerolog"
"io"
"net"
"net/textproto"
"strconv"
"strings"
)
// Connection represents an NNTP connection
type Connection struct {
username, password, address string
port int
conn net.Conn
text *textproto.Conn
reader *bufio.Reader
writer *bufio.Writer
logger zerolog.Logger
}
func (c *Connection) authenticate() error {
// Send AUTHINFO USER command
if err := c.sendCommand(fmt.Sprintf("AUTHINFO USER %s", c.username)); err != nil {
return NewConnectionError(fmt.Errorf("failed to send username: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return NewConnectionError(fmt.Errorf("failed to read user response: %w", err))
}
if resp.Code != 381 {
return classifyNNTPError(resp.Code, fmt.Sprintf("unexpected response to AUTHINFO USER: %s", resp.Message))
}
// Send AUTHINFO PASS command
if err := c.sendCommand(fmt.Sprintf("AUTHINFO PASS %s", c.password)); err != nil {
return NewConnectionError(fmt.Errorf("failed to send password: %w", err))
}
resp, err = c.readResponse()
if err != nil {
return NewConnectionError(fmt.Errorf("failed to read password response: %w", err))
}
if resp.Code != 281 {
return classifyNNTPError(resp.Code, fmt.Sprintf("authentication failed: %s", resp.Message))
}
return nil
}
// startTLS initiates TLS encryption with proper error handling
func (c *Connection) startTLS() error {
if err := c.sendCommand("STARTTLS"); err != nil {
return NewConnectionError(fmt.Errorf("failed to send STARTTLS: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return NewConnectionError(fmt.Errorf("failed to read STARTTLS response: %w", err))
}
if resp.Code != 382 {
return classifyNNTPError(resp.Code, fmt.Sprintf("STARTTLS not supported: %s", resp.Message))
}
// Upgrade connection to TLS
tlsConn := tls.Client(c.conn, &tls.Config{
ServerName: c.address,
InsecureSkipVerify: false,
})
c.conn = tlsConn
c.reader = bufio.NewReader(tlsConn)
c.writer = bufio.NewWriter(tlsConn)
c.text = textproto.NewConn(tlsConn)
c.logger.Debug().Msg("TLS encryption enabled")
return nil
}
// ping sends a simple command to test the connection
func (c *Connection) ping() error {
if err := c.sendCommand("DATE"); err != nil {
return NewConnectionError(err)
}
_, err := c.readResponse()
if err != nil {
return NewConnectionError(err)
}
return nil
}
// sendCommand sends a command to the NNTP server
func (c *Connection) sendCommand(command string) error {
_, err := fmt.Fprintf(c.writer, "%s\r\n", command)
if err != nil {
return err
}
return c.writer.Flush()
}
// readResponse reads a response from the NNTP server
func (c *Connection) readResponse() (*Response, error) {
line, err := c.text.ReadLine()
if err != nil {
return nil, err
}
parts := strings.SplitN(line, " ", 2)
code, err := strconv.Atoi(parts[0])
if err != nil {
return nil, fmt.Errorf("invalid response code: %s", parts[0])
}
message := ""
if len(parts) > 1 {
message = parts[1]
}
return &Response{
Code: code,
Message: message,
}, nil
}
// readMultilineResponse reads a multiline response
func (c *Connection) readMultilineResponse() (*Response, error) {
resp, err := c.readResponse()
if err != nil {
return nil, err
}
// Check if this is a multiline response
if resp.Code < 200 || resp.Code >= 300 {
return resp, nil
}
lines, err := c.text.ReadDotLines()
if err != nil {
return nil, err
}
resp.Lines = lines
return resp, nil
}
// GetArticle retrieves an article by message ID with proper error classification
func (c *Connection) GetArticle(messageID string) (*Article, error) {
messageID = FormatMessageID(messageID)
if err := c.sendCommand(fmt.Sprintf("ARTICLE %s", messageID)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send ARTICLE command: %w", err))
}
resp, err := c.readMultilineResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read article response: %w", err))
}
if resp.Code != 220 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
return c.parseArticle(messageID, resp.Lines)
}
// GetBody retrieves article body by message ID with proper error classification
func (c *Connection) GetBody(messageID string) ([]byte, error) {
messageID = FormatMessageID(messageID)
if err := c.sendCommand(fmt.Sprintf("BODY %s", messageID)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send BODY command: %w", err))
}
// Read the initial response
resp, err := c.readResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read body response: %w", err))
}
if resp.Code != 222 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
// Read the raw body data directly using textproto to preserve exact formatting for yEnc
lines, err := c.text.ReadDotLines()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read body data: %w", err))
}
// Join with \r\n to preserve original line endings and add final \r\n
body := strings.Join(lines, "\r\n")
if len(lines) > 0 {
body += "\r\n"
}
return []byte(body), nil
}
// GetHead retrieves article headers by message ID
func (c *Connection) GetHead(messageID string) ([]byte, error) {
messageID = FormatMessageID(messageID)
if err := c.sendCommand(fmt.Sprintf("HEAD %s", messageID)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send HEAD command: %w", err))
}
// Read the initial response
resp, err := c.readResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read head response: %w", err))
}
if resp.Code != 221 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
// Read the header data using textproto
lines, err := c.text.ReadDotLines()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read header data: %w", err))
}
// Join with \r\n to preserve original line endings and add final \r\n
headers := strings.Join(lines, "\r\n")
if len(lines) > 0 {
headers += "\r\n"
}
return []byte(headers), nil
}
// GetSegment retrieves a specific segment with proper error handling
func (c *Connection) GetSegment(messageID string, segmentNumber int) (*Segment, error) {
messageID = FormatMessageID(messageID)
body, err := c.GetBody(messageID)
if err != nil {
return nil, err // GetBody already returns classified errors
}
return &Segment{
MessageID: messageID,
Number: segmentNumber,
Bytes: int64(len(body)),
Data: body,
}, nil
}
// Stat retrieves article statistics by message ID with proper error classification
func (c *Connection) Stat(messageID string) (articleNumber int, echoedID string, err error) {
messageID = FormatMessageID(messageID)
if err = c.sendCommand(fmt.Sprintf("STAT %s", messageID)); err != nil {
return 0, "", NewConnectionError(fmt.Errorf("failed to send STAT: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return 0, "", NewConnectionError(fmt.Errorf("failed to read STAT response: %w", err))
}
if resp.Code != 223 {
return 0, "", classifyNNTPError(resp.Code, resp.Message)
}
fields := strings.Fields(resp.Message)
if len(fields) < 2 {
return 0, "", NewProtocolError(resp.Code, fmt.Sprintf("unexpected STAT response format: %q", resp.Message))
}
if articleNumber, err = strconv.Atoi(fields[0]); err != nil {
return 0, "", NewProtocolError(resp.Code, fmt.Sprintf("invalid article number %q: %v", fields[0], err))
}
echoedID = fields[1]
return articleNumber, echoedID, nil
}
// SelectGroup selects a newsgroup and returns group information
func (c *Connection) SelectGroup(groupName string) (*GroupInfo, error) {
if err := c.sendCommand(fmt.Sprintf("GROUP %s", groupName)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send GROUP command: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read GROUP response: %w", err))
}
if resp.Code != 211 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
// Parse GROUP response: "211 number low high group-name"
fields := strings.Fields(resp.Message)
if len(fields) < 4 {
return nil, NewProtocolError(resp.Code, fmt.Sprintf("unexpected GROUP response format: %q", resp.Message))
}
groupInfo := &GroupInfo{
Name: groupName,
}
if count, err := strconv.Atoi(fields[0]); err == nil {
groupInfo.Count = count
}
if low, err := strconv.Atoi(fields[1]); err == nil {
groupInfo.Low = low
}
if high, err := strconv.Atoi(fields[2]); err == nil {
groupInfo.High = high
}
return groupInfo, nil
}
// parseArticle parses article data from response lines
func (c *Connection) parseArticle(messageID string, lines []string) (*Article, error) {
article := &Article{
MessageID: messageID,
Groups: []string{},
}
headerEnd := -1
for i, line := range lines {
if line == "" {
headerEnd = i
break
}
// Parse headers
if strings.HasPrefix(line, "Subject: ") {
article.Subject = strings.TrimPrefix(line, "Subject: ")
} else if strings.HasPrefix(line, "From: ") {
article.From = strings.TrimPrefix(line, "From: ")
} else if strings.HasPrefix(line, "Date: ") {
article.Date = strings.TrimPrefix(line, "Date: ")
} else if strings.HasPrefix(line, "Newsgroups: ") {
groups := strings.TrimPrefix(line, "Newsgroups: ")
article.Groups = strings.Split(groups, ",")
for i := range article.Groups {
article.Groups[i] = strings.TrimSpace(article.Groups[i])
}
}
}
// Join body lines
if headerEnd != -1 && headerEnd+1 < len(lines) {
body := strings.Join(lines[headerEnd+1:], "\n")
article.Body = []byte(body)
article.Size = int64(len(article.Body))
}
return article, nil
}
// close closes the NNTP connection
func (c *Connection) close() error {
if c.conn != nil {
return c.conn.Close()
}
return nil
}
func DecodeYenc(reader io.Reader) (*yenc.Part, error) {
part, err := yenc.Decode(reader)
if err != nil {
return nil, NewYencDecodeError(fmt.Errorf("failed to create yenc decoder: %w", err))
}
return part, nil
}
func IsValidMessageID(messageID string) bool {
if len(messageID) < 3 {
return false
}
return strings.Contains(messageID, "@")
}
// FormatMessageID ensures message ID has proper format
func FormatMessageID(messageID string) string {
messageID = strings.TrimSpace(messageID)
if !strings.HasPrefix(messageID, "<") {
messageID = "<" + messageID
}
if !strings.HasSuffix(messageID, ">") {
messageID = messageID + ">"
}
return messageID
}

116
internal/nntp/decoder.go Normal file
View File

@@ -0,0 +1,116 @@
package nntp
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
)
// YencMetadata contains just the header information
type YencMetadata struct {
Name string // filename
Size int64 // total file size
Part int // part number
Total int // total parts
Begin int64 // part start byte
End int64 // part end byte
LineSize int // line length
}
// DecodeYencHeaders extracts only yenc header metadata without decoding body
func DecodeYencHeaders(reader io.Reader) (*YencMetadata, error) {
buf := bufio.NewReader(reader)
metadata := &YencMetadata{}
// Find and parse =ybegin header
if err := parseYBeginHeader(buf, metadata); err != nil {
return nil, NewYencDecodeError(fmt.Errorf("failed to parse ybegin header: %w", err))
}
// Parse =ypart header if this is a multipart file
if metadata.Part > 0 {
if err := parseYPartHeader(buf, metadata); err != nil {
return nil, NewYencDecodeError(fmt.Errorf("failed to parse ypart header: %w", err))
}
}
return metadata, nil
}
func parseYBeginHeader(buf *bufio.Reader, metadata *YencMetadata) error {
var s string
var err error
// Find the =ybegin line
for {
s, err = buf.ReadString('\n')
if err != nil {
return err
}
if len(s) >= 7 && s[:7] == "=ybegin" {
break
}
}
// Parse the header line
parts := strings.SplitN(s[7:], "name=", 2)
if len(parts) > 1 {
metadata.Name = strings.TrimSpace(parts[1])
}
// Parse other parameters
for _, header := range strings.Split(parts[0], " ") {
kv := strings.SplitN(strings.TrimSpace(header), "=", 2)
if len(kv) < 2 {
continue
}
switch kv[0] {
case "size":
metadata.Size, _ = strconv.ParseInt(kv[1], 10, 64)
case "line":
metadata.LineSize, _ = strconv.Atoi(kv[1])
case "part":
metadata.Part, _ = strconv.Atoi(kv[1])
case "total":
metadata.Total, _ = strconv.Atoi(kv[1])
}
}
return nil
}
func parseYPartHeader(buf *bufio.Reader, metadata *YencMetadata) error {
var s string
var err error
// Find the =ypart line
for {
s, err = buf.ReadString('\n')
if err != nil {
return err
}
if len(s) >= 6 && s[:6] == "=ypart" {
break
}
}
// Parse part parameters
for _, header := range strings.Split(s[6:], " ") {
kv := strings.SplitN(strings.TrimSpace(header), "=", 2)
if len(kv) < 2 {
continue
}
switch kv[0] {
case "begin":
metadata.Begin, _ = strconv.ParseInt(kv[1], 10, 64)
case "end":
metadata.End, _ = strconv.ParseInt(kv[1], 10, 64)
}
}
return nil
}

195
internal/nntp/errors.go Normal file
View File

@@ -0,0 +1,195 @@
package nntp
import (
"errors"
"fmt"
)
// Error types for NNTP operations
type ErrorType int
const (
ErrorTypeUnknown ErrorType = iota
ErrorTypeConnection
ErrorTypeAuthentication
ErrorTypeTimeout
ErrorTypeArticleNotFound
ErrorTypeGroupNotFound
ErrorTypePermissionDenied
ErrorTypeServerBusy
ErrorTypeInvalidCommand
ErrorTypeProtocol
ErrorTypeYencDecode
ErrorTypeNoAvailableConnection
)
// Error represents an NNTP-specific error
type Error struct {
Type ErrorType
Code int // NNTP response code
Message string // Error message
Err error // Underlying error
}
// Predefined errors for common cases
var (
ErrArticleNotFound = &Error{Type: ErrorTypeArticleNotFound, Code: 430, Message: "article not found"}
ErrGroupNotFound = &Error{Type: ErrorTypeGroupNotFound, Code: 411, Message: "group not found"}
ErrPermissionDenied = &Error{Type: ErrorTypePermissionDenied, Code: 502, Message: "permission denied"}
ErrAuthenticationFail = &Error{Type: ErrorTypeAuthentication, Code: 482, Message: "authentication failed"}
ErrServerBusy = &Error{Type: ErrorTypeServerBusy, Code: 400, Message: "server busy"}
ErrPoolNotFound = &Error{Type: ErrorTypeUnknown, Code: 0, Message: "NNTP pool not found", Err: nil}
ErrNoAvailableConnection = &Error{Type: ErrorTypeNoAvailableConnection, Code: 0, Message: "no available connection in pool", Err: nil}
)
func (e *Error) Error() string {
if e.Err != nil {
return fmt.Sprintf("NNTP %s (code %d): %s - %v", e.Type.String(), e.Code, e.Message, e.Err)
}
return fmt.Sprintf("NNTP %s (code %d): %s", e.Type.String(), e.Code, e.Message)
}
func (e *Error) Unwrap() error {
return e.Err
}
func (e *Error) Is(target error) bool {
if t, ok := target.(*Error); ok {
return e.Type == t.Type
}
return false
}
// IsRetryable returns true if the error might be resolved by retrying
func (e *Error) IsRetryable() bool {
switch e.Type {
case ErrorTypeConnection, ErrorTypeTimeout, ErrorTypeServerBusy:
return true
case ErrorTypeArticleNotFound, ErrorTypeGroupNotFound, ErrorTypePermissionDenied, ErrorTypeAuthentication:
return false
default:
return false
}
}
// ShouldStopParsing returns true if this error should stop the entire parsing process
func (e *Error) ShouldStopParsing() bool {
switch e.Type {
case ErrorTypeAuthentication, ErrorTypePermissionDenied:
return true // Critical auth issues
case ErrorTypeConnection:
return false // Can continue with other connections
case ErrorTypeArticleNotFound:
return false // Can continue searching for other articles
case ErrorTypeServerBusy:
return false // Temporary issue
default:
return false
}
}
func (et ErrorType) String() string {
switch et {
case ErrorTypeConnection:
return "CONNECTION"
case ErrorTypeAuthentication:
return "AUTHENTICATION"
case ErrorTypeTimeout:
return "TIMEOUT"
case ErrorTypeArticleNotFound:
return "ARTICLE_NOT_FOUND"
case ErrorTypeGroupNotFound:
return "GROUP_NOT_FOUND"
case ErrorTypePermissionDenied:
return "PERMISSION_DENIED"
case ErrorTypeServerBusy:
return "SERVER_BUSY"
case ErrorTypeInvalidCommand:
return "INVALID_COMMAND"
case ErrorTypeProtocol:
return "PROTOCOL"
case ErrorTypeYencDecode:
return "YENC_DECODE"
default:
return "UNKNOWN"
}
}
// Helper functions to create specific errors
func NewConnectionError(err error) *Error {
return &Error{
Type: ErrorTypeConnection,
Message: "connection failed",
Err: err,
}
}
func NewTimeoutError(err error) *Error {
return &Error{
Type: ErrorTypeTimeout,
Message: "operation timed out",
Err: err,
}
}
func NewProtocolError(code int, message string) *Error {
return &Error{
Type: ErrorTypeProtocol,
Code: code,
Message: message,
}
}
func NewYencDecodeError(err error) *Error {
return &Error{
Type: ErrorTypeYencDecode,
Message: "yEnc decode failed",
Err: err,
}
}
// classifyNNTPError classifies an NNTP response code into an error type
func classifyNNTPError(code int, message string) *Error {
switch {
case code == 430 || code == 423:
return &Error{Type: ErrorTypeArticleNotFound, Code: code, Message: message}
case code == 411:
return &Error{Type: ErrorTypeGroupNotFound, Code: code, Message: message}
case code == 502 || code == 503:
return &Error{Type: ErrorTypePermissionDenied, Code: code, Message: message}
case code == 481 || code == 482:
return &Error{Type: ErrorTypeAuthentication, Code: code, Message: message}
case code == 400:
return &Error{Type: ErrorTypeServerBusy, Code: code, Message: message}
case code == 500 || code == 501:
return &Error{Type: ErrorTypeInvalidCommand, Code: code, Message: message}
case code >= 400:
return &Error{Type: ErrorTypeProtocol, Code: code, Message: message}
default:
return &Error{Type: ErrorTypeUnknown, Code: code, Message: message}
}
}
func IsArticleNotFoundError(err error) bool {
var nntpErr *Error
if errors.As(err, &nntpErr) {
return nntpErr.Type == ErrorTypeArticleNotFound
}
return false
}
func IsAuthenticationError(err error) bool {
var nntpErr *Error
if errors.As(err, &nntpErr) {
return nntpErr.Type == ErrorTypeAuthentication
}
return false
}
func IsRetryableError(err error) bool {
var nntpErr *Error
if errors.As(err, &nntpErr) {
return nntpErr.IsRetryable()
}
return false
}

299
internal/nntp/pool.go Normal file
View File

@@ -0,0 +1,299 @@
package nntp
import (
"bufio"
"context"
"crypto/tls"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"net"
"net/textproto"
"sync"
"sync/atomic"
"time"
)
// Pool manages a pool of NNTP connections
type Pool struct {
address, username, password string
maxConns, port int
ssl bool
useTLS bool
connections chan *Connection
logger zerolog.Logger
closed atomic.Bool
totalConnections atomic.Int32
activeConnections atomic.Int32
}
// Segment represents a usenet segment
type Segment struct {
MessageID string
Number int
Bytes int64
Data []byte
}
// Article represents a complete usenet article
type Article struct {
MessageID string
Subject string
From string
Date string
Groups []string
Body []byte
Size int64
}
// Response represents an NNTP server response
type Response struct {
Code int
Message string
Lines []string
}
// GroupInfo represents information about a newsgroup
type GroupInfo struct {
Name string
Count int // Number of articles in the group
Low int // Lowest article number
High int // Highest article number
}
// NewPool creates a new NNTP connection pool
func NewPool(provider config.UsenetProvider, logger zerolog.Logger) (*Pool, error) {
maxConns := provider.Connections
if maxConns <= 0 {
maxConns = 1
}
pool := &Pool{
address: provider.Host,
username: provider.Username,
password: provider.Password,
port: provider.Port,
maxConns: maxConns,
ssl: provider.SSL,
useTLS: provider.UseTLS,
connections: make(chan *Connection, maxConns),
logger: logger,
}
return pool.initializeConnections()
}
func (p *Pool) initializeConnections() (*Pool, error) {
var wg sync.WaitGroup
var mu sync.Mutex
var successfulConnections []*Connection
var errs []error
// Create connections concurrently
for i := 0; i < p.maxConns; i++ {
wg.Add(1)
go func(connIndex int) {
defer wg.Done()
conn, err := p.createConnection()
mu.Lock()
defer mu.Unlock()
if err != nil {
errs = append(errs, err)
} else {
successfulConnections = append(successfulConnections, conn)
}
}(i)
}
// Wait for all connection attempts to complete
wg.Wait()
// Add successful connections to the pool
for _, conn := range successfulConnections {
p.connections <- conn
}
p.totalConnections.Store(int32(len(successfulConnections)))
if len(successfulConnections) == 0 {
return nil, fmt.Errorf("failed to create any connections: %v", errs)
}
// Log results
p.logger.Info().
Str("server", p.address).
Int("port", p.port).
Int("requested_connections", p.maxConns).
Int("successful_connections", len(successfulConnections)).
Int("failed_connections", len(errs)).
Msg("NNTP connection pool created")
// If some connections failed, log a warning but continue
if len(errs) > 0 {
p.logger.Warn().
Int("failed_count", len(errs)).
Msg("Some connections failed during pool initialization")
}
return p, nil
}
// Get retrieves a connection from the pool
func (p *Pool) Get(ctx context.Context) (*Connection, error) {
if p.closed.Load() {
return nil, NewConnectionError(fmt.Errorf("connection pool is closed"))
}
select {
case conn := <-p.connections:
if conn == nil {
return nil, NewConnectionError(fmt.Errorf("received nil connection from pool"))
}
p.activeConnections.Add(1)
if err := conn.ping(); err != nil {
p.activeConnections.Add(-1)
err := conn.close()
if err != nil {
return nil, err
}
// Create a new connection
newConn, err := p.createConnection()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to create replacement connection: %w", err))
}
p.activeConnections.Add(1)
return newConn, nil
}
return conn, nil
case <-ctx.Done():
return nil, NewTimeoutError(ctx.Err())
}
}
// Put returns a connection to the pool
func (p *Pool) Put(conn *Connection) {
if conn == nil {
return
}
defer p.activeConnections.Add(-1)
if p.closed.Load() {
conn.close()
return
}
// Try non-blocking first
select {
case p.connections <- conn:
return
default:
}
// If pool is full, this usually means we have too many connections
// Force return by making space (close oldest connection)
select {
case oldConn := <-p.connections:
oldConn.close() // Close the old connection
p.connections <- conn // Put the new one back
case <-time.After(1 * time.Second):
// Still can't return - close this connection
conn.close()
}
}
// Close closes all connections in the pool
func (p *Pool) Close() error {
if p.closed.Load() {
return nil
}
p.closed.Store(true)
close(p.connections)
for conn := range p.connections {
err := conn.close()
if err != nil {
return err
}
}
p.logger.Info().Msg("NNTP connection pool closed")
return nil
}
// createConnection creates a new NNTP connection with proper error handling
func (p *Pool) createConnection() (*Connection, error) {
addr := fmt.Sprintf("%s:%d", p.address, p.port)
var conn net.Conn
var err error
if p.ssl {
conn, err = tls.DialWithDialer(&net.Dialer{}, "tcp", addr, &tls.Config{
InsecureSkipVerify: false,
})
} else {
conn, err = net.Dial("tcp", addr)
}
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to connect to %s: %w", addr, err))
}
reader := bufio.NewReaderSize(conn, 256*1024) // 256KB buffer for better performance
writer := bufio.NewWriterSize(conn, 256*1024) // 256KB buffer for better performance
text := textproto.NewConn(conn)
nntpConn := &Connection{
username: p.username,
password: p.password,
address: p.address,
port: p.port,
conn: conn,
text: text,
reader: reader,
writer: writer,
logger: p.logger,
}
// Read welcome message
_, err = nntpConn.readResponse()
if err != nil {
conn.Close()
return nil, NewConnectionError(fmt.Errorf("failed to read welcome message: %w", err))
}
// Authenticate if credentials are provided
if p.username != "" && p.password != "" {
if err := nntpConn.authenticate(); err != nil {
conn.Close()
return nil, err // authenticate() already returns NNTPError
}
}
// Enable TLS if requested (STARTTLS)
if p.useTLS && !p.ssl {
if err := nntpConn.startTLS(); err != nil {
conn.Close()
return nil, err // startTLS() already returns NNTPError
}
}
return nntpConn, nil
}
func (p *Pool) ConnectionCount() int {
return int(p.totalConnections.Load())
}
func (p *Pool) ActiveConnections() int {
return int(p.activeConnections.Load())
}
func (p *Pool) IsFree() bool {
return p.ActiveConnections() < p.maxConns
}

View File

@@ -5,14 +5,12 @@ import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/logger"
"go.uber.org/ratelimit"
"golang.org/x/net/proxy"
"golang.org/x/time/rate"
"io"
"math"
"math/rand"
"net"
"net/http"
@@ -52,7 +50,7 @@ type ClientOption func(*Client)
// Client represents an HTTP client with additional capabilities
type Client struct {
client *http.Client
rateLimiter *rate.Limiter
rateLimiter ratelimit.Limiter
headers map[string]string
headersMu sync.RWMutex
maxRetries int
@@ -84,7 +82,7 @@ func WithRedirectPolicy(policy func(req *http.Request, via []*http.Request) erro
}
// WithRateLimiter sets a rate limiter
func WithRateLimiter(rl *rate.Limiter) ClientOption {
func WithRateLimiter(rl ratelimit.Limiter) ClientOption {
return func(c *Client) {
c.rateLimiter = rl
}
@@ -136,9 +134,11 @@ func WithProxy(proxyURL string) ClientOption {
// doRequest performs a single HTTP request with rate limiting
func (c *Client) doRequest(req *http.Request) (*http.Response, error) {
if c.rateLimiter != nil {
err := c.rateLimiter.Wait(req.Context())
if err != nil {
return nil, fmt.Errorf("rate limiter wait: %w", err)
select {
case <-req.Context().Done():
return nil, req.Context().Err()
default:
c.rateLimiter.Take()
}
}
@@ -179,8 +179,7 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
resp, err = c.doRequest(req)
if err != nil {
// Check if this is a network error that might be worth retrying
if isRetryableError(err) && attempt < c.maxRetries {
if attempt < c.maxRetries {
// Apply backoff with jitter
jitter := time.Duration(rand.Int63n(int64(backoff / 4)))
sleepTime := backoff + jitter
@@ -339,7 +338,10 @@ func New(options ...ClientOption) *Client {
return client
}
func ParseRateLimit(rateStr string) *rate.Limiter {
func ParseRateLimit(rateStr string) ratelimit.Limiter {
if rateStr == "" {
return nil
}
parts := strings.SplitN(rateStr, "/", 2)
if len(parts) != 2 {
return nil
@@ -351,23 +353,21 @@ func ParseRateLimit(rateStr string) *rate.Limiter {
return nil
}
// Set slack size to 10%
slackSize := count / 10
// normalize unit
unit := strings.ToLower(strings.TrimSpace(parts[1]))
unit = strings.TrimSuffix(unit, "s")
burstSize := int(math.Ceil(float64(count) * 0.1))
if burstSize < 1 {
burstSize = 1
}
if burstSize > count {
burstSize = count
}
switch unit {
case "minute", "min":
return rate.NewLimiter(rate.Limit(float64(count)/60.0), burstSize)
return ratelimit.New(count, ratelimit.Per(time.Minute), ratelimit.WithSlack(slackSize))
case "second", "sec":
return rate.NewLimiter(rate.Limit(float64(count)), burstSize)
return ratelimit.New(count, ratelimit.Per(time.Second), ratelimit.WithSlack(slackSize))
case "hour", "hr":
return rate.NewLimiter(rate.Limit(float64(count)/3600.0), burstSize)
return ratelimit.New(count, ratelimit.Per(time.Hour), ratelimit.WithSlack(slackSize))
case "day", "d":
return ratelimit.New(count, ratelimit.Per(24*time.Hour), ratelimit.WithSlack(slackSize))
default:
return nil
}
@@ -388,30 +388,3 @@ func Default() *Client {
})
return instance
}
func isRetryableError(err error) bool {
errString := err.Error()
// Connection reset and other network errors
if strings.Contains(errString, "connection reset by peer") ||
strings.Contains(errString, "read: connection reset") ||
strings.Contains(errString, "connection refused") ||
strings.Contains(errString, "network is unreachable") ||
strings.Contains(errString, "connection timed out") ||
strings.Contains(errString, "no such host") ||
strings.Contains(errString, "i/o timeout") ||
strings.Contains(errString, "unexpected EOF") ||
strings.Contains(errString, "TLS handshake timeout") {
return true
}
// Check for net.Error type which can provide more information
var netErr net.Error
if errors.As(err, &netErr) {
// Retry on timeout errors and temporary errors
return netErr.Timeout()
}
// Not a retryable error
return false
}

View File

@@ -1,5 +1,16 @@
package utils
import (
"fmt"
"io"
"mime"
"net/http"
"net/url"
"path"
"path/filepath"
"strings"
)
func RemoveItem[S ~[]E, E comparable](s S, values ...E) S {
result := make(S, 0, len(s))
outer:
@@ -22,3 +33,131 @@ func Contains(slice []string, value string) bool {
}
return false
}
func GenerateHash(data string) string {
// Simple hash generation using a basic algorithm (for demonstration purposes)
_hash := 0
for _, char := range data {
_hash = (_hash*31 + int(char)) % 1000003 // Simple hash function
}
return string(rune(_hash))
}
func DownloadFile(url string) (string, []byte, error) {
resp, err := http.Get(url)
if err != nil {
return "", nil, fmt.Errorf("failed to download file: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", nil, fmt.Errorf("failed to download file: status code %d", resp.StatusCode)
}
filename := getFilenameFromResponse(resp, url)
data, err := io.ReadAll(resp.Body)
if err != nil {
return "", nil, fmt.Errorf("failed to read response body: %w", err)
}
return filename, data, nil
}
func getFilenameFromResponse(resp *http.Response, originalURL string) string {
// 1. Try Content-Disposition header
if cd := resp.Header.Get("Content-Disposition"); cd != "" {
if _, params, err := mime.ParseMediaType(cd); err == nil {
if filename := params["filename"]; filename != "" {
return filename
}
}
}
// 2. Try to decode URL-encoded filename from Content-Disposition
if cd := resp.Header.Get("Content-Disposition"); cd != "" {
if strings.Contains(cd, "filename*=") {
// Handle RFC 5987 encoded filenames
parts := strings.Split(cd, "filename*=")
if len(parts) > 1 {
encoded := strings.Trim(parts[1], `"`)
if strings.HasPrefix(encoded, "UTF-8''") {
if decoded, err := url.QueryUnescape(encoded[7:]); err == nil {
return decoded
}
}
}
}
}
// 3. Fall back to URL path
if parsedURL, err := url.Parse(originalURL); err == nil {
if filename := filepath.Base(parsedURL.Path); filename != "." && filename != "/" {
// URL decode the filename
if decoded, err := url.QueryUnescape(filename); err == nil {
return decoded
}
return filename
}
}
// 4. Default filename
return "downloaded_file"
}
func ValidateServiceURL(urlStr string) error {
if urlStr == "" {
return fmt.Errorf("URL cannot be empty")
}
// Try parsing as full URL first
u, err := url.Parse(urlStr)
if err == nil && u.Scheme != "" && u.Host != "" {
// It's a full URL, validate scheme
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("URL scheme must be http or https")
}
return nil
}
// Check if it's a host:port format (no scheme)
if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") {
// Try parsing with http:// prefix
testURL := "http://" + urlStr
u, err := url.Parse(testURL)
if err != nil {
return fmt.Errorf("invalid host:port format: %w", err)
}
if u.Host == "" {
return fmt.Errorf("host is required in host:port format")
}
// Validate port number
if u.Port() == "" {
return fmt.Errorf("port is required in host:port format")
}
return nil
}
return fmt.Errorf("invalid URL format: %s", urlStr)
}
func ExtractFilenameFromURL(rawURL string) string {
// Parse the URL
parsedURL, err := url.Parse(rawURL)
if err != nil {
return ""
}
// Get the base filename from path
filename := path.Base(parsedURL.Path)
// Handle edge cases
if filename == "/" || filename == "." || filename == "" {
return ""
}
return filename
}

View File

@@ -40,12 +40,10 @@ func RemoveInvalidChars(value string) string {
}
func RemoveExtension(value string) string {
loc := mediaRegex.FindStringIndex(value)
if loc != nil {
if loc := mediaRegex.FindStringIndex(value); loc != nil {
return value[:loc[0]]
} else {
return value
}
return value
}
func IsMediaFile(path string) bool {
@@ -53,8 +51,21 @@ func IsMediaFile(path string) bool {
}
func IsSampleFile(path string) bool {
if strings.HasSuffix(strings.ToLower(path), "sample.mkv") {
filename := filepath.Base(path)
if strings.HasSuffix(strings.ToLower(filename), "sample.mkv") {
return true
}
return RegexMatch(sampleRegex, path)
}
func IsParFile(path string) bool {
ext := filepath.Ext(path)
return strings.EqualFold(ext, ".par") || strings.EqualFold(ext, ".par2")
}
func IsRarFile(path string) bool {
ext := filepath.Ext(path)
return strings.EqualFold(ext, ".rar") || strings.EqualFold(ext, ".r00") ||
strings.EqualFold(ext, ".r01") || strings.EqualFold(ext, ".r02") ||
strings.EqualFold(ext, ".r03") || strings.EqualFold(ext, ".r04")
}

1624
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

19
package.json Normal file
View File

@@ -0,0 +1,19 @@
{
"name": "decypharr",
"version": "1.0.0",
"description": "Media management tool",
"scripts": {
"build-css": "tailwindcss -i ./pkg/web/assets/styles.css -o ./pkg/web/assets/build/css/styles.css --minify",
"minify-js": "node scripts/minify-js.js",
"download-assets": "node scripts/download-assets.js",
"build": "npm run build-css && npm run minify-js",
"build-all": "npm run download-assets && npm run build",
"dev": "npm run build && air"
},
"devDependencies": {
"tailwindcss": "^3.4.0",
"daisyui": "^4.12.10",
"terser": "^5.24.0",
"clean-css": "^5.3.3"
}
}

View File

@@ -115,8 +115,10 @@ func (a *Arr) Validate() error {
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("arr test failed: %s", resp.Status)
defer resp.Body.Close()
// If response is not 200 or 404(this is the case for Lidarr, etc), return an error
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNotFound {
return fmt.Errorf("failed to validate arr %s: %s", a.Name, resp.Status)
}
return nil
}

View File

@@ -309,7 +309,7 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
errCh <- err
return
}
if link != nil {
if link == nil {
errCh <- fmt.Errorf("download link is empty")
return
}

View File

@@ -1,5 +1,10 @@
package alldebrid
import (
"encoding/json"
"fmt"
)
type errorResponse struct {
Code string `json:"code"`
Message string `json:"message"`
@@ -32,6 +37,8 @@ type magnetInfo struct {
Files []MagnetFile `json:"files"`
}
type Magnets []magnetInfo
type TorrentInfoResponse struct {
Status string `json:"status"`
Data struct {
@@ -43,7 +50,7 @@ type TorrentInfoResponse struct {
type TorrentsListResponse struct {
Status string `json:"status"`
Data struct {
Magnets []magnetInfo `json:"magnets"`
Magnets Magnets `json:"magnets"`
} `json:"data"`
Error *errorResponse `json:"error"`
}
@@ -81,3 +88,27 @@ type DownloadLink struct {
} `json:"data"`
Error *errorResponse `json:"error"`
}
// UnmarshalJSON implements custom unmarshaling for Magnets type
// It can handle both an array of magnetInfo objects or a map with string keys.
// If the input is an array, it will be unmarshaled directly into the Magnets slice.
// If the input is a map, it will extract the values and append them to the Magnets slice.
// If the input is neither, it will return an error.
func (m *Magnets) UnmarshalJSON(data []byte) error {
// Try to unmarshal as array
var arr []magnetInfo
if err := json.Unmarshal(data, &arr); err == nil {
*m = arr
return nil
}
// Try to unmarshal as map
var obj map[string]magnetInfo
if err := json.Unmarshal(data, &obj); err == nil {
for _, v := range obj {
*m = append(*m, v)
}
return nil
}
return fmt.Errorf("magnets: unsupported JSON format")
}

View File

@@ -2,6 +2,7 @@ package realdebrid
import (
"bytes"
"cmp"
"encoding/json"
"errors"
"fmt"
@@ -33,6 +34,7 @@ type RealDebrid struct {
DownloadUncached bool
client *request.Client
downloadClient *request.Client
repairClient *request.Client
autoExpiresLinksAfter time.Duration
MountPath string
@@ -49,6 +51,8 @@ type RealDebrid struct {
func New(dc config.Debrid) (*RealDebrid, error) {
rl := request.ParseRateLimit(dc.RateLimit)
repairRl := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
downloadRl := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
@@ -77,11 +81,20 @@ func New(dc config.Debrid) (*RealDebrid, error) {
request.WithProxy(dc.Proxy),
),
downloadClient: request.New(
request.WithRateLimiter(downloadRl),
request.WithLogger(_log),
request.WithMaxRetries(10),
request.WithRetryableStatus(429, 447, 502),
request.WithProxy(dc.Proxy),
),
repairClient: request.New(
request.WithRateLimiter(repairRl),
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithMaxRetries(4),
request.WithRetryableStatus(429, 502),
request.WithProxy(dc.Proxy),
),
MountPath: dc.Folder,
logger: logger.New(dc.Name),
rarSemaphore: make(chan struct{}, 2),
@@ -608,7 +621,7 @@ func (r *RealDebrid) CheckLink(link string) error {
"link": {link},
}
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.client.Do(req)
resp, err := r.repairClient.Do(req)
if err != nil {
return err
}
@@ -621,7 +634,7 @@ func (r *RealDebrid) CheckLink(link string) error {
func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, error) {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
_link := file.Link
if strings.HasPrefix(_link, "https://real-debrid.com/d/") {
if strings.HasPrefix(file.Link, "https://real-debrid.com/d/") && len(file.Link) > 39 {
_link = file.Link[0:39]
}
payload := gourl.Values{

View File

@@ -4,13 +4,6 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/version"
"mime/multipart"
"net/http"
gourl "net/url"
@@ -21,6 +14,14 @@ import (
"strings"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/version"
)
type Torbox struct {
@@ -168,7 +169,7 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
return torrent, nil
}
func getTorboxStatus(status string, finished bool) string {
func (tb *Torbox) getTorboxStatus(status string, finished bool) string {
if finished {
return "downloaded"
}
@@ -176,12 +177,16 @@ func getTorboxStatus(status string, finished bool) string {
"checkingResumeData", "metaDL", "pausedUP", "queuedUP", "checkingUP",
"forcedUP", "allocating", "downloading", "metaDL", "pausedDL",
"queuedDL", "checkingDL", "forcedDL", "checkingResumeData", "moving"}
var determinedStatus string
switch {
case utils.Contains(downloading, status):
return "downloading"
determinedStatus = "downloading"
default:
return "error"
determinedStatus = "error"
}
return determinedStatus
}
func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
@@ -206,7 +211,7 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
Bytes: data.Size,
Folder: data.Name,
Progress: data.Progress * 100,
Status: getTorboxStatus(data.DownloadState, data.DownloadFinished),
Status: tb.getTorboxStatus(data.DownloadState, data.DownloadFinished),
Speed: data.DownloadSpeed,
Seeders: data.Seeds,
Filename: data.Name,
@@ -217,19 +222,33 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
Added: data.CreatedAt.Format(time.RFC3339),
}
cfg := config.Get()
totalFiles := 0
skippedSamples := 0
skippedFileType := 0
skippedSize := 0
validFiles := 0
filesWithLinks := 0
for _, f := range data.Files {
totalFiles++
fileName := filepath.Base(f.Name)
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
// Skip sample files
skippedSamples++
continue
}
if !cfg.IsAllowedFile(fileName) {
skippedFileType++
continue
}
if !cfg.IsSizeAllowed(f.Size) {
skippedSize++
continue
}
validFiles++
file := types.File{
TorrentId: t.Id,
Id: strconv.Itoa(f.Id),
@@ -237,8 +256,26 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
Size: f.Size,
Path: f.Name,
}
// For downloaded torrents, set a placeholder link to indicate file is available
if data.DownloadFinished {
file.Link = fmt.Sprintf("torbox://%s/%d", t.Id, f.Id)
filesWithLinks++
}
t.Files[fileName] = file
}
// Log summary only if there are issues or for debugging
tb.logger.Debug().
Str("torrent_id", t.Id).
Str("torrent_name", t.Name).
Bool("download_finished", data.DownloadFinished).
Str("status", t.Status).
Int("total_files", totalFiles).
Int("valid_files", validFiles).
Int("final_file_count", len(t.Files)).
Msg("Torrent file processing completed")
var cleanPath string
if len(t.Files) > 0 {
cleanPath = path.Clean(data.Files[0].Name)
@@ -266,24 +303,33 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
}
data := res.Data
name := data.Name
t.Name = name
t.Bytes = data.Size
t.Folder = name
t.Progress = data.Progress * 100
t.Status = getTorboxStatus(data.DownloadState, data.DownloadFinished)
t.Status = tb.getTorboxStatus(data.DownloadState, data.DownloadFinished)
t.Speed = data.DownloadSpeed
t.Seeders = data.Seeds
t.Filename = name
t.OriginalFilename = name
t.MountPath = tb.MountPath
t.Debrid = tb.name
// Clear existing files map to rebuild it
t.Files = make(map[string]types.File)
cfg := config.Get()
validFiles := 0
filesWithLinks := 0
for _, f := range data.Files {
fileName := filepath.Base(f.Name)
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
// Skip sample files
continue
}
if !cfg.IsAllowedFile(fileName) {
continue
}
@@ -291,6 +337,8 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
if !cfg.IsSizeAllowed(f.Size) {
continue
}
validFiles++
file := types.File{
TorrentId: t.Id,
Id: strconv.Itoa(f.Id),
@@ -298,8 +346,16 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
Size: f.Size,
Path: fileName,
}
// For downloaded torrents, set a placeholder link to indicate file is available
if data.DownloadFinished {
file.Link = fmt.Sprintf("torbox://%s/%s", t.Id, strconv.Itoa(f.Id))
filesWithLinks++
}
t.Files[fileName] = file
}
var cleanPath string
if len(t.Files) > 0 {
cleanPath = path.Clean(data.Files[0].Name)
@@ -409,30 +465,58 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
query.Add("token", tb.APIKey)
query.Add("file_id", file.Id)
url += "?" + query.Encode()
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req)
if err != nil {
tb.logger.Error().
Err(err).
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Failed to make request to Torbox API")
return nil, err
}
var data DownloadLinksResponse
if err = json.Unmarshal(resp, &data); err != nil {
tb.logger.Error().
Err(err).
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Failed to unmarshal Torbox API response")
return nil, err
}
if data.Data == nil {
tb.logger.Error().
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Bool("success", data.Success).
Interface("error", data.Error).
Str("detail", data.Detail).
Msg("Torbox API returned no data")
return nil, fmt.Errorf("error getting download links")
}
link := *data.Data
if link == "" {
tb.logger.Error().
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Torbox API returned empty download link")
return nil, fmt.Errorf("error getting download links")
}
now := time.Now()
return &types.DownloadLink{
downloadLink := &types.DownloadLink{
Link: file.Link,
DownloadLink: link,
Id: file.Id,
Generated: now,
ExpiresAt: now.Add(tb.autoExpiresLinksAfter),
}, nil
}
return downloadLink, nil
}
func (tb *Torbox) GetDownloadingStatus() []string {
@@ -440,7 +524,87 @@ func (tb *Torbox) GetDownloadingStatus() []string {
}
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
return nil, nil
url := fmt.Sprintf("%s/api/torrents/mylist", tb.Host)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req)
if err != nil {
return nil, err
}
var res TorrentsListResponse
err = json.Unmarshal(resp, &res)
if err != nil {
return nil, err
}
if !res.Success || res.Data == nil {
return nil, fmt.Errorf("torbox API error: %v", res.Error)
}
torrents := make([]*types.Torrent, 0, len(*res.Data))
cfg := config.Get()
for _, data := range *res.Data {
t := &types.Torrent{
Id: strconv.Itoa(data.Id),
Name: data.Name,
Bytes: data.Size,
Folder: data.Name,
Progress: data.Progress * 100,
Status: tb.getTorboxStatus(data.DownloadState, data.DownloadFinished),
Speed: data.DownloadSpeed,
Seeders: data.Seeds,
Filename: data.Name,
OriginalFilename: data.Name,
MountPath: tb.MountPath,
Debrid: tb.name,
Files: make(map[string]types.File),
Added: data.CreatedAt.Format(time.RFC3339),
InfoHash: data.Hash,
}
// Process files
for _, f := range data.Files {
fileName := filepath.Base(f.Name)
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
// Skip sample files
continue
}
if !cfg.IsAllowedFile(fileName) {
continue
}
if !cfg.IsSizeAllowed(f.Size) {
continue
}
file := types.File{
TorrentId: t.Id,
Id: strconv.Itoa(f.Id),
Name: fileName,
Size: f.Size,
Path: f.Name,
}
// For downloaded torrents, set a placeholder link to indicate file is available
if data.DownloadFinished {
file.Link = fmt.Sprintf("torbox://%s/%d", t.Id, f.Id)
}
t.Files[fileName] = file
}
// Set original filename based on first file or torrent name
var cleanPath string
if len(t.Files) > 0 {
cleanPath = path.Clean(data.Files[0].Name)
} else {
cleanPath = path.Clean(data.Name)
}
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
torrents = append(torrents, t)
}
return torrents, nil
}
func (tb *Torbox) GetDownloadUncached() bool {

View File

@@ -57,7 +57,7 @@ type torboxInfo struct {
} `json:"files"`
DownloadPath string `json:"download_path"`
InactiveCheck int `json:"inactive_check"`
Availability int `json:"availability"`
Availability float64 `json:"availability"`
DownloadFinished bool `json:"download_finished"`
Tracker interface{} `json:"tracker"`
TotalUploaded int `json:"total_uploaded"`
@@ -73,3 +73,5 @@ type torboxInfo struct {
type InfoResponse APIResponse[torboxInfo]
type DownloadLinksResponse APIResponse[string]
type TorrentsListResponse APIResponse[[]torboxInfo]

View File

@@ -6,7 +6,6 @@ import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"os"
"path"
"path/filepath"
@@ -17,13 +16,16 @@ import (
"sync/atomic"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"encoding/json"
_ "time/tzdata"
"github.com/go-co-op/gocron/v2"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/utils"
_ "time/tzdata"
)
type WebDavFolderNaming string
@@ -514,9 +516,9 @@ func (c *Cache) setTorrent(t CachedTorrent, callback func(torrent CachedTorrent)
updatedTorrent.Files = mergedFiles
}
c.torrents.set(torrentName, t, updatedTorrent)
c.SaveTorrent(t)
go c.SaveTorrent(t)
if callback != nil {
callback(updatedTorrent)
go callback(updatedTorrent)
}
}
@@ -682,8 +684,13 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
}
if !isComplete(t.Files) {
c.logger.Debug().Msgf("Torrent %s is still not complete. Triggering a reinsert(disabled)", t.Id)
c.logger.Debug().
Str("torrent_id", t.Id).
Str("torrent_name", t.Name).
Int("total_files", len(t.Files)).
Msg("Torrent still not complete after refresh")
} else {
addedOn, err := time.Parse(time.RFC3339, t.Added)
if err != nil {
addedOn = time.Now()
@@ -702,6 +709,7 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
func (c *Cache) Add(t *types.Torrent) error {
if len(t.Files) == 0 {
c.logger.Warn().Msgf("Torrent %s has no files to add. Refreshing", t.Id)
if err := c.client.UpdateTorrent(t); err != nil {
return fmt.Errorf("failed to update torrent: %w", err)
}

View File

@@ -3,6 +3,7 @@ package store
import (
"errors"
"fmt"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
@@ -102,9 +103,16 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*type
}
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
if errors.Is(err, utils.HosterUnavailableError) {
c.logger.Trace().
Str("filename", filename).
Str("torrent_id", ct.Id).
Msg("Hoster unavailable, attempting to reinsert torrent")
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return nil, fmt.Errorf("failed to reinsert torrent: %w", err)
@@ -117,12 +125,11 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*type
// Retry getting the download link
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
return nil, err
return nil, fmt.Errorf("retry failed to get download link: %w", err)
}
if downloadLink == nil {
return nil, fmt.Errorf("download link is empty for")
return nil, fmt.Errorf("download link is empty after retry")
}
return nil, nil
} else if errors.Is(err, utils.TrafficExceededError) {
// This is likely a fair usage limit error
return nil, err

View File

@@ -136,67 +136,67 @@ func (c *Cache) refreshRclone() error {
return nil
}
client := &http.Client{
Timeout: 60 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 60 * time.Second,
DisableCompression: false,
MaxIdleConnsPerHost: 5,
},
}
client := http.DefaultClient
// Create form data
data := ""
data := c.buildRcloneRequestData()
if err := c.sendRcloneRequest(client, "vfs/forget", data); err != nil {
c.logger.Error().Err(err).Msg("Failed to send rclone vfs/forget request")
}
if err := c.sendRcloneRequest(client, "vfs/refresh", data); err != nil {
c.logger.Error().Err(err).Msg("Failed to send rclone vfs/refresh request")
}
return nil
}
func (c *Cache) buildRcloneRequestData() string {
cfg := c.config
dirs := strings.FieldsFunc(cfg.RcRefreshDirs, func(r rune) bool {
return r == ',' || r == '&'
})
if len(dirs) == 0 {
data = "dir=__all__"
} else {
for index, dir := range dirs {
if dir != "" {
if index == 0 {
data += "dir=" + dir
} else {
data += "&dir" + fmt.Sprint(index+1) + "=" + dir
}
return "dir=__all__"
}
var data strings.Builder
for index, dir := range dirs {
if dir != "" {
if index == 0 {
data.WriteString("dir=" + dir)
} else {
data.WriteString("&dir" + fmt.Sprint(index+1) + "=" + dir)
}
}
}
return data.String()
}
sendRequest := func(endpoint string) error {
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", cfg.RcUrl, endpoint), strings.NewReader(data))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if cfg.RcUser != "" && cfg.RcPass != "" {
req.SetBasicAuth(cfg.RcUser, cfg.RcPass)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
}
_, _ = io.Copy(io.Discard, resp.Body)
return nil
}
if err := sendRequest("vfs/forget"); err != nil {
return err
}
if err := sendRequest("vfs/refresh"); err != nil {
func (c *Cache) sendRcloneRequest(client *http.Client, endpoint, data string) error {
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", c.config.RcUrl, endpoint), strings.NewReader(data))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if c.config.RcUser != "" && c.config.RcPass != "" {
req.SetBasicAuth(c.config.RcUser, c.config.RcPass)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
}
_, _ = io.Copy(io.Discard, resp.Body)
return nil
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
@@ -60,6 +61,7 @@ func (c *Cache) markAsSuccessfullyReinserted(torrentId string) {
func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
files := make(map[string]types.File)
repairStrategy := config.Get().Repair.Strategy
brokenFiles := make([]string, 0)
if len(filenames) > 0 {
for name, f := range t.Files {
@@ -89,23 +91,54 @@ func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
}
files = t.Files
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Use a mutex to protect brokenFiles slice and torrent-wide failure flag
var mu sync.Mutex
torrentWideFailed := false
wg.Add(len(files))
for _, f := range files {
// Check if file link is still missing
go func(f types.File) {
defer wg.Done()
select {
case <-ctx.Done():
return
default:
}
if f.Link == "" {
brokenFiles = append(brokenFiles, f.Name)
} else {
// Check if file.Link not in the downloadLink Cache
if err := c.client.CheckLink(f.Link); err != nil {
if errors.Is(err, utils.HosterUnavailableError) {
mu.Lock()
if repairStrategy == config.RepairStrategyPerTorrent {
torrentWideFailed = true
mu.Unlock()
cancel() // Signal all other goroutines to stop
return
} else {
// per_file strategy - only mark this file as broken
brokenFiles = append(brokenFiles, f.Name)
}
mu.Unlock()
return
}
if err := c.client.CheckLink(f.Link); err != nil {
if errors.Is(err, utils.HosterUnavailableError) {
mu.Lock()
if repairStrategy == config.RepairStrategyPerTorrent {
torrentWideFailed = true
mu.Unlock()
cancel() // Signal all other goroutines to stop
return
} else {
// per_file strategy - only mark this file as broken
brokenFiles = append(brokenFiles, f.Name)
}
mu.Unlock()
}
}
}(f)
@@ -113,6 +146,15 @@ func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
wg.Wait()
// Handle the result based on strategy
if repairStrategy == config.RepairStrategyPerTorrent && torrentWideFailed {
// Mark all files as broken for per_torrent strategy
for _, f := range files {
brokenFiles = append(brokenFiles, f.Name)
}
}
// For per_file strategy, brokenFiles already contains only the broken ones
// Try to reinsert the torrent if it's broken
if len(brokenFiles) > 0 && t.Torrent != nil {
// Check if the torrent is already in progress

View File

@@ -171,17 +171,18 @@ func (tc *torrentCache) refreshListing() {
wg.Add(1) // for all listing
go func() {
defer wg.Done()
listing := make([]os.FileInfo, len(all))
for i, sf := range all {
listing[i] = &fileInfo{sf.id, sf.name, sf.size, 0755 | os.ModeDir, sf.modTime, true}
}
tc.listing.Store(listing)
}()
wg.Done()
wg.Add(1)
// For __bad__
go func() {
defer wg.Done()
listing := make([]os.FileInfo, 0)
for _, sf := range all {
if sf.bad {
@@ -203,7 +204,6 @@ func (tc *torrentCache) refreshListing() {
}
tc.folders.Unlock()
}()
wg.Done()
now := time.Now()
wg.Add(len(tc.directoriesFilters)) // for each directory filter

View File

@@ -18,7 +18,7 @@ func NewAccounts(debridConf config.Debrid) *Accounts {
if token == "" {
continue
}
account := newAccount(token, idx)
account := newAccount(debridConf.Name, token, idx)
accounts = append(accounts, account)
}
@@ -33,6 +33,7 @@ func NewAccounts(debridConf config.Debrid) *Accounts {
}
type Account struct {
Debrid string // e.g., "realdebrid", "torbox", etc.
Order int
Disabled bool
Token string
@@ -176,30 +177,31 @@ func (a *Accounts) SetDownloadLinks(links map[string]*DownloadLink) {
a.Current().setLinks(links)
}
func newAccount(token string, index int) *Account {
func newAccount(debridName, token string, index int) *Account {
return &Account{
Token: token,
Order: index,
links: make(map[string]*DownloadLink),
Debrid: debridName,
Token: token,
Order: index,
links: make(map[string]*DownloadLink),
}
}
func (a *Account) getLink(fileLink string) (*DownloadLink, bool) {
a.mu.RLock()
defer a.mu.RUnlock()
dl, ok := a.links[fileLink[0:39]]
dl, ok := a.links[a.sliceFileLink(fileLink)]
return dl, ok
}
func (a *Account) setLink(fileLink string, dl *DownloadLink) {
a.mu.Lock()
defer a.mu.Unlock()
a.links[fileLink[0:39]] = dl
a.links[a.sliceFileLink(fileLink)] = dl
}
func (a *Account) deleteLink(fileLink string) {
a.mu.Lock()
defer a.mu.Unlock()
delete(a.links, fileLink[0:39])
delete(a.links, a.sliceFileLink(fileLink))
}
func (a *Account) resetDownloadLinks() {
a.mu.Lock()
@@ -225,6 +227,17 @@ func (a *Account) setLinks(links map[string]*DownloadLink) {
// Expired, continue
continue
}
a.links[dl.Link[0:39]] = dl
a.links[a.sliceFileLink(dl.Link)] = dl
}
}
// slice download link
func (a *Account) sliceFileLink(fileLink string) string {
if a.Debrid != "realdebrid" {
return fileLink
}
if len(fileLink) < 39 {
return fileLink
}
return fileLink[0:39]
}

View File

@@ -3,12 +3,11 @@ package qbit
import (
"context"
"encoding/base64"
"fmt"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"net/url"
"strings"
)
@@ -20,45 +19,6 @@ const (
arrKey contextKey = "arr"
)
func validateServiceURL(urlStr string) error {
if urlStr == "" {
return fmt.Errorf("URL cannot be empty")
}
// Try parsing as full URL first
u, err := url.Parse(urlStr)
if err == nil && u.Scheme != "" && u.Host != "" {
// It's a full URL, validate scheme
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("URL scheme must be http or https")
}
return nil
}
// Check if it's a host:port format (no scheme)
if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") {
// Try parsing with http:// prefix
testURL := "http://" + urlStr
u, err := url.Parse(testURL)
if err != nil {
return fmt.Errorf("invalid host:port format: %w", err)
}
if u.Host == "" {
return fmt.Errorf("host is required in host:port format")
}
// Validate port number
if u.Port() == "" {
return fmt.Errorf("port is required in host:port format")
}
return nil
}
return fmt.Errorf("invalid URL format: %s", urlStr)
}
func getCategory(ctx context.Context) string {
if category, ok := ctx.Value(categoryKey).(string); ok {
return category
@@ -146,7 +106,7 @@ func (q *QBit) authContext(next http.Handler) http.Handler {
}
}
a.Source = "auto"
if err := validateServiceURL(a.Host); err != nil {
if err := utils.ValidateServiceURL(a.Host); err != nil {
// Return silently, no need to raise a problem. Just do not add the Arr to the context/config.json
next.ServeHTTP(w, r)
return

View File

@@ -18,13 +18,16 @@ type QBit struct {
}
func New() *QBit {
_cfg := config.Get()
cfg := _cfg.QBitTorrent
cfg := config.Get()
qbitCfg := cfg.QBitTorrent
if qbitCfg == nil {
return nil
}
return &QBit{
Username: cfg.Username,
Password: cfg.Password,
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
Username: qbitCfg.Username,
Password: qbitCfg.Password,
DownloadFolder: qbitCfg.DownloadFolder,
Categories: qbitCfg.Categories,
storage: store.Get().Torrents(),
logger: logger.New("qbit"),
}

View File

@@ -684,18 +684,3 @@ func (r *Reader) ExtractFile(file *File) ([]byte, error) {
return r.readBytes(file.DataOffset, int(file.CompressedSize))
}
// Helper functions
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}

View File

@@ -88,6 +88,8 @@ func collectFiles(media arr.Content) map[string][]arr.ContentFile {
func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]types.Client, caches map[string]*store.Cache) []arr.ContentFile {
brokenFiles := make([]arr.ContentFile, 0)
emptyFiles := make([]arr.ContentFile, 0)
r.logger.Debug().Msgf("Checking %s", torrentPath)
// Get the debrid client
@@ -95,17 +97,18 @@ func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile,
debridName := r.findDebridForPath(dir, clients)
if debridName == "" {
r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath)
return files // Return all files as broken if no debrid found
return emptyFiles
}
cache, ok := caches[debridName]
if !ok {
r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName)
return files // Return all files as broken if no cache found
return emptyFiles
}
tor, ok := r.torrentsMap.Load(debridName)
if !ok {
r.logger.Debug().Msgf("Could not find torrents for %s. Skipping", debridName)
return emptyFiles
}
torrentsMap := tor.(map[string]store.CachedTorrent)
@@ -114,8 +117,9 @@ func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile,
torrentName := filepath.Clean(filepath.Base(torrentPath))
torrent, ok := torrentsMap[torrentName]
if !ok {
r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName)
return files // Return all files as broken if torrent not found
r.logger.Debug().Msgf("Can't find torrent %s in %s. Marking as broken", torrentName, debridName)
// Return all files as broken
return files
}
// Batch check files

View File

@@ -75,26 +75,6 @@ type Job struct {
ctx context.Context
}
func (j *Job) getUnprocessedBrokenItems() map[string][]arr.ContentFile {
items := make(map[string][]arr.ContentFile)
for arrName, files := range j.BrokenItems {
if len(files) == 0 {
continue // Skip empty file lists
}
items[arrName] = make([]arr.ContentFile, 0, len(files))
for _, file := range files {
if file.Path != "" && file.TargetPath != "" && !file.Processed {
items[arrName] = append(items[arrName], file)
}
}
}
if len(items) == 0 {
return nil // Return nil if no unprocessed items found
}
return items
}
func New(arrs *arr.Storage, engine *debrid.Storage) *Repair {
cfg := config.Get()
workers := runtime.NumCPU() * 20
@@ -234,7 +214,6 @@ func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job {
}
}
// initRun initializes the repair run, setting up necessary configurations, checks and caches
func (r *Repair) initRun(ctx context.Context) {
if r.useWebdav {
// Webdav use is enabled, initialize debrid torrent caches
@@ -554,17 +533,21 @@ func (r *Repair) checkMountUp(media []arr.Content) error {
if len(files) == 0 {
return fmt.Errorf("no files found in media %s", firstMedia.Title)
}
firstFile := files[0]
symlinkPath := getSymlinkTarget(firstFile.Path)
if symlinkPath == "" {
return fmt.Errorf("no symlink target found for %s", firstFile.Path)
}
r.logger.Debug().Msgf("Checking symlink parent directory for %s", symlinkPath)
parentSymlink := filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents
if _, err := os.Stat(parentSymlink); os.IsNotExist(err) {
return fmt.Errorf("parent directory %s not accessible for %s", parentSymlink, firstFile.Path)
for _, file := range files {
if _, err := os.Stat(file.Path); os.IsNotExist(err) {
// If the file does not exist, we can't check the symlink target
r.logger.Debug().Msgf("File %s does not exist, skipping repair", file.Path)
return fmt.Errorf("file %s does not exist, skipping repair", file.Path)
}
// Get the symlink target
symlinkPath := getSymlinkTarget(file.Path)
if symlinkPath != "" {
r.logger.Trace().Msgf("Found symlink target for %s: %s", file.Path, symlinkPath)
if _, err := os.Stat(symlinkPath); os.IsNotExist(err) {
r.logger.Debug().Msgf("Symlink target %s does not exist, skipping repair", symlinkPath)
return fmt.Errorf("symlink target %s does not exist for %s. skipping repair", symlinkPath, file.Path)
}
}
}
return nil
}
@@ -690,35 +673,17 @@ func (r *Repair) getWebdavBrokenFiles(job *Job, media arr.Content) []arr.Content
brokenFiles := make([]arr.ContentFile, 0)
uniqueParents := collectFiles(media)
var brokenFilesMutex sync.Mutex
var wg sync.WaitGroup
// Limit concurrent torrent checks
semaphore := make(chan struct{}, min(len(uniqueParents), 30)) // Limit to 5 concurrent checks
for torrentPath, files := range uniqueParents {
wg.Add(1)
go func(torrentPath string, files []arr.ContentFile) {
defer wg.Done()
semaphore <- struct{}{} // Acquire
defer func() { <-semaphore }() // Release
select {
case <-job.ctx.Done():
return
default:
}
brokenFilesForTorrent := r.checkTorrentFiles(torrentPath, files, clients, caches)
if len(brokenFilesForTorrent) > 0 {
brokenFilesMutex.Lock()
brokenFiles = append(brokenFiles, brokenFilesForTorrent...)
brokenFilesMutex.Unlock()
}
}(torrentPath, files)
select {
case <-job.ctx.Done():
return brokenFiles
default:
}
brokenFilesForTorrent := r.checkTorrentFiles(torrentPath, files, clients, caches)
if len(brokenFilesForTorrent) > 0 {
brokenFiles = append(brokenFiles, brokenFilesForTorrent...)
}
}
wg.Wait()
if len(brokenFiles) == 0 {
return nil
}
@@ -765,7 +730,7 @@ func (r *Repair) ProcessJob(id string) error {
return fmt.Errorf("job %s already failed", id)
}
brokenItems := job.getUnprocessedBrokenItems()
brokenItems := job.BrokenItems
if len(brokenItems) == 0 {
r.logger.Info().Msgf("No broken items found for job %s", id)
job.CompletedAt = time.Now()
@@ -773,144 +738,63 @@ func (r *Repair) ProcessJob(id string) error {
return nil
}
r.logger.Info().Msgf("Processing job %s with %d broken items", id, len(brokenItems))
go r.processJob(job, brokenItems)
return nil
}
func (r *Repair) processJob(job *Job, brokenItems map[string][]arr.ContentFile) {
if job.ctx == nil || job.ctx.Err() != nil {
job.ctx, job.cancelFunc = context.WithCancel(r.ctx)
}
errs := make([]error, 0)
processedCount := 0
g, ctx := errgroup.WithContext(job.ctx)
g.SetLimit(r.workers)
for arrName, items := range brokenItems {
select {
case <-job.ctx.Done():
r.logger.Info().Msgf("Job %s cancelled", job.ID)
job.Status = JobCancelled
job.CompletedAt = time.Now()
job.Error = "Job was cancelled"
return
default:
// Continue processing
}
items := items
arrName := arrName
g.Go(func() error {
a := r.arrs.Get(arrName)
if a == nil {
errs = append(errs, fmt.Errorf("arr %s not found", arrName))
continue
}
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if err := a.DeleteFiles(items); err != nil {
errs = append(errs, fmt.Errorf("failed to delete broken items for %s: %w", arrName, err))
continue
}
// Search for missing items
if err := a.SearchMissing(items); err != nil {
errs = append(errs, fmt.Errorf("failed to search missing items for %s: %w", arrName, err))
continue
}
processedCount += len(items)
// Mark this item as processed
for i := range items {
items[i].Processed = true
}
job.BrokenItems[arrName] = items
a := r.arrs.Get(arrName)
if a == nil {
r.logger.Error().Msgf("Arr %s not found", arrName)
return nil
}
if err := a.DeleteFiles(items); err != nil {
r.logger.Error().Err(err).Msgf("Failed to delete broken items for %s", arrName)
return nil
}
// Search for missing items
if err := a.SearchMissing(items); err != nil {
r.logger.Error().Err(err).Msgf("Failed to search missing items for %s", arrName)
return nil
}
return nil
})
}
// Update job status to in-progress
job.Status = JobProcessing
r.saveToFile()
if len(errs) > 0 {
errMsg := fmt.Sprintf("Job %s encountered errors: %v", job.ID, errs)
job.Error = errMsg
job.FailedAt = time.Now()
job.Status = JobFailed
r.logger.Error().Msg(errMsg)
go func() {
if err := request.SendDiscordMessage("repair_failed", "error", job.discordContext()); err != nil {
r.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
return
}
remainingItems := job.getUnprocessedBrokenItems()
if len(remainingItems) == 0 {
// All items processed, mark job as completed
job.CompletedAt = time.Now()
job.Status = JobCompleted
r.logger.Info().Msgf("Job %s completed successfully (all items processed)", job.ID)
go func() {
if err := request.SendDiscordMessage("repair_complete", "success", job.discordContext()); err != nil {
r.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
} else {
// Some items still remain, keep job as pending
job.Status = JobPending
r.logger.Info().Msgf("Job %s: processed %d selected items successfully, %d items remaining", job.ID, processedCount, len(remainingItems))
go func() {
if err := request.SendDiscordMessage("repair_partial_complete", "info", job.discordContext()); err != nil {
r.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
}
r.saveToFile()
}
// ProcessJobItems processes the selected items for a job
// selectedItems is the map of arr names to the list of file IDs to process
func (r *Repair) ProcessJobItems(id string, selectedItems map[string][]int) error {
job := r.GetJob(id)
if job == nil {
return fmt.Errorf("job %s not found", id)
}
if job.Status != JobPending {
return fmt.Errorf("job %s not pending", id)
}
if job.StartedAt.IsZero() {
return fmt.Errorf("job %s not started", id)
}
if !job.CompletedAt.IsZero() {
return fmt.Errorf("job %s already completed", id)
}
if !job.FailedAt.IsZero() {
return fmt.Errorf("job %s already failed", id)
}
brokenItems := job.getUnprocessedBrokenItems()
validatedItems := make(map[string][]arr.ContentFile)
for arrName, selectedItemsList := range selectedItems {
if jobItems, exists := brokenItems[arrName]; exists {
validItems := make([]arr.ContentFile, 0, len(selectedItemsList))
for _, item := range selectedItemsList {
// Find the item in the job items
for _, jobItem := range jobItems {
if jobItem.FileId == item {
validItems = append(validItems, jobItem)
break
}
}
}
if len(validItems) > 0 {
validatedItems[arrName] = validItems
}
// Launch a goroutine to wait for completion and update the job
go func() {
if err := g.Wait(); err != nil {
job.FailedAt = time.Now()
job.Error = err.Error()
job.CompletedAt = time.Now()
job.Status = JobFailed
r.logger.Error().Err(err).Msgf("Job %s failed", id)
} else {
job.CompletedAt = time.Now()
job.Status = JobCompleted
r.logger.Info().Msgf("Job %s completed successfully", id)
}
}
if len(validatedItems) == 0 {
return fmt.Errorf("no valid items found for job %s", id)
}
job.Status = JobProcessing
r.saveToFile()
go r.processJob(job, validatedItems)
r.saveToFile()
}()
return nil
}

171
pkg/sabnzbd/config.go Normal file
View File

@@ -0,0 +1,171 @@
package sabnzbd
// ConfigResponse represents configuration response
type ConfigResponse struct {
Config *Config `json:"config"`
}
type ConfigNewzbin struct {
Username string `json:"username"`
BookmarkRate int `json:"bookmark_rate"`
Url string `json:"url"`
Bookmarks int `json:"bookmarks"`
Password string `json:"password"`
Unbookmark int `json:"unbookmark"`
}
// Category represents a SABnzbd category
type Category struct {
Name string `json:"name"`
Order int `json:"order"`
Pp string `json:"pp"`
Script string `json:"script"`
Dir string `json:"dir"`
NewzBin string `json:"newzbin"`
Priority string `json:"priority"`
}
// Server represents a usenet server
type Server struct {
Name string `json:"name"`
Host string `json:"host"`
Port int `json:"port"`
Username string `json:"username"`
Password string `json:"password"`
Connections int `json:"connections"`
Retention int `json:"retention"`
Priority int `json:"priority"`
SSL bool `json:"ssl"`
Optional bool `json:"optional"`
}
type Config struct {
Misc MiscConfig `json:"misc"`
Categories []Category `json:"categories"`
Servers []Server `json:"servers"`
}
type MiscConfig struct {
// Directory Configuration
CompleteDir string `json:"complete_dir"`
DownloadDir string `json:"download_dir"`
AdminDir string `json:"admin_dir"`
NzbBackupDir string `json:"nzb_backup_dir"`
ScriptDir string `json:"script_dir"`
EmailDir string `json:"email_dir"`
WebDir string `json:"web_dir"`
// Processing Options
ParOption string `json:"par_option"`
ParOptionConvert string `json:"par_option_convert"`
ParOptionDuplicate string `json:"par_option_duplicate"`
DirectUnpack string `json:"direct_unpack"`
FlatUnpack string `json:"flat_unpack"`
EnableRecursiveUnpack string `json:"enable_recursive_unpack"`
OverwriteFiles string `json:"overwrite_files"`
IgnoreWrongUnrar string `json:"ignore_wrong_unrar"`
IgnoreUnrarDates string `json:"ignore_unrar_dates"`
PreCheck string `json:"pre_check"`
// File Handling
Permissions string `json:"permissions"`
FolderRename string `json:"folder_rename"`
FileRename string `json:"file_rename"`
ReplaceIllegal string `json:"replace_illegal"`
ReplaceDots string `json:"replace_dots"`
ReplaceSpaces string `json:"replace_spaces"`
SanitizeSafe string `json:"sanitize_safe"`
IgnoreSamples string `json:"ignore_samples"`
UnwantedExtensions []string `json:"unwanted_extensions"`
ActionOnUnwanted string `json:"action_on_unwanted"`
ActionOnDuplicate string `json:"action_on_duplicate"`
BackupForDuplicates string `json:"backup_for_duplicates"`
CleanupList []string `json:"cleanup_list"`
DeobfuscateFinalFilenames string `json:"deobfuscate_final_filenames"`
// Scripts and Processing
PreScript string `json:"pre_script"`
PostScript string `json:"post_script"`
EmptyPostproc string `json:"empty_postproc"`
PauseOnPostProcessing string `json:"pause_on_post_processing"`
// System Resources
Nice string `json:"nice"`
NiceUnpack string `json:"nice_unpack"`
Ionice string `json:"ionice"`
Fsync string `json:"fsync"`
// Bandwidth and Performance
BandwidthMax string `json:"bandwidth_max"`
BandwidthPerc string `json:"bandwidth_perc"`
RefreshRate string `json:"refresh_rate"`
DirscanSpeed string `json:"dirscan_speed"`
FolderMaxLength string `json:"folder_max_length"`
PropagationDelay string `json:"propagation_delay"`
// Storage Management
DownloadFree string `json:"download_free"`
CompleteFree string `json:"complete_free"`
// Queue Management
QueueComplete string `json:"queue_complete"`
QueueCompletePers string `json:"queue_complete_pers"`
AutoSort string `json:"auto_sort"`
NewNzbOnFailure string `json:"new_nzb_on_failure"`
PauseOnPwrar string `json:"pause_on_pwrar"`
WarnedOldQueue string `json:"warned_old_queue"`
// Web Interface
WebHost string `json:"web_host"`
WebPort string `json:"web_port"`
WebUsername string `json:"web_username"`
WebPassword string `json:"web_password"`
WebColor string `json:"web_color"`
WebColor2 string `json:"web_color2"`
AutoBrowser string `json:"auto_browser"`
Autobrowser string `json:"autobrowser"` // Duplicate field - may need to resolve
// HTTPS Configuration
EnableHTTPS string `json:"enable_https"`
EnableHTTPSVerification string `json:"enable_https_verification"`
HTTPSPort string `json:"https_port"`
HTTPSCert string `json:"https_cert"`
HTTPSKey string `json:"https_key"`
HTTPSChain string `json:"https_chain"`
// Security and API
APIKey string `json:"api_key"`
NzbKey string `json:"nzb_key"`
HostWhitelist string `json:"host_whitelist"`
LocalRanges []string `json:"local_ranges"`
InetExposure string `json:"inet_exposure"`
APILogging string `json:"api_logging"`
APIWarnings string `json:"api_warnings"`
// Logging
LogLevel string `json:"log_level"`
LogSize string `json:"log_size"`
MaxLogSize string `json:"max_log_size"`
LogBackups string `json:"log_backups"`
LogNew string `json:"log_new"`
// Notifications
MatrixUsername string `json:"matrix_username"`
MatrixPassword string `json:"matrix_password"`
MatrixServer string `json:"matrix_server"`
MatrixRoom string `json:"matrix_room"`
// Miscellaneous
ConfigLock string `json:"config_lock"`
Language string `json:"language"`
CheckNewRel string `json:"check_new_rel"`
RSSFilenames string `json:"rss_filenames"`
IPv6Hosting string `json:"ipv6_hosting"`
EnableBonjour string `json:"enable_bonjour"`
Cherryhost string `json:"cherryhost"`
WinMenu string `json:"win_menu"`
AMPM string `json:"ampm"`
NotifiedNewSkin string `json:"notified_new_skin"`
HelpURI string `json:"helpuri"`
SSDURI string `json:"ssduri"`
}

121
pkg/sabnzbd/context.go Normal file
View File

@@ -0,0 +1,121 @@
package sabnzbd
import (
"context"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"strings"
"github.com/sirrobot01/decypharr/pkg/arr"
)
type contextKey string
const (
apiKeyKey contextKey = "apikey"
modeKey contextKey = "mode"
arrKey contextKey = "arr"
categoryKey contextKey = "category"
)
func getMode(ctx context.Context) string {
if mode, ok := ctx.Value(modeKey).(string); ok {
return mode
}
return ""
}
func (s *SABnzbd) categoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := r.URL.Query().Get("category")
if category == "" {
// Check form data
_ = r.ParseForm()
category = r.Form.Get("category")
}
if category == "" {
category = r.FormValue("category")
}
ctx := context.WithValue(r.Context(), categoryKey, strings.TrimSpace(category))
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func getArrFromContext(ctx context.Context) *arr.Arr {
if a, ok := ctx.Value(arrKey).(*arr.Arr); ok {
return a
}
return nil
}
func getCategory(ctx context.Context) string {
if category, ok := ctx.Value(categoryKey).(string); ok {
return category
}
return ""
}
// modeContext extracts the mode parameter from the request
func (s *SABnzbd) modeContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
mode := r.URL.Query().Get("mode")
if mode == "" {
// Check form data
_ = r.ParseForm()
mode = r.Form.Get("mode")
}
// Extract category for Arr integration
category := r.URL.Query().Get("cat")
if category == "" {
category = r.Form.Get("cat")
}
// Create a default Arr instance for the category
downloadUncached := false
a := arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
ctx := context.WithValue(r.Context(), modeKey, strings.TrimSpace(mode))
ctx = context.WithValue(ctx, arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
// authContext creates a middleware that extracts the Arr host and token from the Authorization header
// and adds it to the request context.
// This is used to identify the Arr instance for the request.
// Only a valid host and token will be added to the context/config. The rest are manual
func (s *SABnzbd) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host := r.FormValue("ma_username")
token := r.FormValue("ma_password")
category := getCategory(r.Context())
arrs := store.Get().Arr()
// Check if arr exists
a := arrs.Get(category)
if a == nil {
// Arr is not configured, create a new one
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
}
host = strings.TrimSpace(host)
if host != "" {
a.Host = host
}
token = strings.TrimSpace(token)
if token != "" {
a.Token = token
}
a.Source = "auto"
if err := utils.ValidateServiceURL(a.Host); err != nil {
// Return silently, no need to raise a problem. Just do not add the Arr to the context/config.json
next.ServeHTTP(w, r)
return
}
arrs.AddOrUpdate(a)
ctx := context.WithValue(r.Context(), arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}

476
pkg/sabnzbd/handlers.go Normal file
View File

@@ -0,0 +1,476 @@
package sabnzbd
import (
"context"
"fmt"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/usenet"
"io"
"net/http"
"strconv"
"strings"
"time"
)
// handleAPI is the main handler for all SABnzbd API requests
func (s *SABnzbd) handleAPI(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
mode := getMode(ctx)
switch mode {
case ModeQueue:
s.handleQueue(w, r)
case ModeHistory:
s.handleHistory(w, r)
case ModeConfig:
s.handleConfig(w, r)
case ModeStatus, ModeFullStatus:
s.handleStatus(w, r)
case ModeGetConfig:
s.handleConfig(w, r)
case ModeAddURL:
s.handleAddURL(w, r)
case ModeAddFile:
s.handleAddFile(w, r)
case ModeVersion:
s.handleVersion(w, r)
case ModeGetCats:
s.handleGetCategories(w, r)
case ModeGetScripts:
s.handleGetScripts(w, r)
case ModeGetFiles:
s.handleGetFiles(w, r)
default:
// Default to queue if no mode specified
s.logger.Warn().Str("mode", mode).Msg("Unknown API mode, returning 404")
http.Error(w, "Not Found", http.StatusNotFound)
}
}
func (s *SABnzbd) handleQueue(w http.ResponseWriter, r *http.Request) {
name := r.FormValue("name")
if name == "" {
s.handleListQueue(w, r)
return
}
name = strings.ToLower(strings.TrimSpace(name))
switch name {
case "delete":
s.handleQueueDelete(w, r)
case "pause":
s.handleQueuePause(w, r)
case "resume":
s.handleQueueResume(w, r)
}
}
// handleResume handles resume operations
func (s *SABnzbd) handleQueueResume(w http.ResponseWriter, r *http.Request) {
response := StatusResponse{Status: true}
request.JSONResponse(w, response, http.StatusOK)
}
// handleDelete handles delete operations
func (s *SABnzbd) handleQueueDelete(w http.ResponseWriter, r *http.Request) {
nzoIDs := r.FormValue("value")
if nzoIDs == "" {
s.writeError(w, "No NZB IDs provided", http.StatusBadRequest)
return
}
var successCount int
var errors []string
for _, nzoID := range strings.Split(nzoIDs, ",") {
nzoID = strings.TrimSpace(nzoID)
if nzoID == "" {
continue // Skip empty IDs
}
s.logger.Info().Str("nzo_id", nzoID).Msg("Deleting NZB")
// Use atomic delete operation
if err := s.usenet.Store().AtomicDelete(nzoID); err != nil {
s.logger.Error().
Err(err).
Str("nzo_id", nzoID).
Msg("Failed to delete NZB")
errors = append(errors, fmt.Sprintf("Failed to delete %s: %v", nzoID, err))
} else {
successCount++
}
}
// Return response with success/error information
if len(errors) > 0 {
if successCount == 0 {
// All deletions failed
s.writeError(w, fmt.Sprintf("All deletions failed: %s", strings.Join(errors, "; ")), http.StatusInternalServerError)
return
} else {
// Partial success
s.logger.Warn().
Int("success_count", successCount).
Int("error_count", len(errors)).
Strs("errors", errors).
Msg("Partial success in queue deletion")
}
}
response := StatusResponse{
Status: true,
Error: "", // Could add error details here if needed
}
request.JSONResponse(w, response, http.StatusOK)
}
// handlePause handles pause operations
func (s *SABnzbd) handleQueuePause(w http.ResponseWriter, r *http.Request) {
response := StatusResponse{Status: true}
request.JSONResponse(w, response, http.StatusOK)
}
// handleQueue returns the current download queue
func (s *SABnzbd) handleListQueue(w http.ResponseWriter, r *http.Request) {
nzbs := s.usenet.Store().GetQueue()
queue := Queue{
Version: Version,
Slots: []QueueSlot{},
}
// Convert NZBs to queue slots
for _, nzb := range nzbs {
if nzb.ETA <= 0 {
nzb.ETA = 0 // Ensure ETA is non-negative
}
var timeLeft string
if nzb.ETA == 0 {
timeLeft = "00:00:00" // If ETA is 0, set TimeLeft to "00:00:00"
} else {
// Convert ETA from seconds to "HH:MM:SS" format
duration := time.Duration(nzb.ETA) * time.Second
timeLeft = duration.String()
}
slot := QueueSlot{
Status: s.mapNZBStatus(nzb.Status),
Mb: nzb.TotalSize,
Filename: nzb.Name,
Cat: nzb.Category,
MBLeft: 0,
Percentage: nzb.Percentage,
NzoId: nzb.ID,
Size: nzb.TotalSize,
TimeLeft: timeLeft, // This is in "00:00:00" format
}
queue.Slots = append(queue.Slots, slot)
}
response := QueueResponse{
Queue: queue,
Status: true,
Version: Version,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleHistory returns the download history
func (s *SABnzbd) handleHistory(w http.ResponseWriter, r *http.Request) {
limitStr := r.FormValue("limit")
if limitStr == "" {
limitStr = "0"
}
limit, err := strconv.Atoi(limitStr)
if err != nil {
s.logger.Error().Err(err).Msg("Invalid limit parameter for history")
s.writeError(w, "Invalid limit parameter", http.StatusBadRequest)
return
}
if limit < 0 {
limit = 0
}
history := s.getHistory(r.Context(), limit)
response := HistoryResponse{
History: history,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleConfig returns the configuration
func (s *SABnzbd) handleConfig(w http.ResponseWriter, r *http.Request) {
response := ConfigResponse{
Config: s.config,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleAddURL handles adding NZB by URL
func (s *SABnzbd) handleAddURL(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
_arr := getArrFromContext(ctx)
cat := getCategory(ctx)
if _arr == nil {
// If Arr is not in context, create a new one with default values
_arr = arr.New(cat, "", "", false, false, nil, "", "")
}
if r.Method != http.MethodPost {
s.logger.Warn().Str("method", r.Method).Msg("Invalid method")
s.writeError(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
url := r.FormValue("name")
action := r.FormValue("action")
downloadDir := r.FormValue("download_dir")
if action == "" {
action = "symlink"
}
if downloadDir == "" {
downloadDir = s.config.Misc.DownloadDir
}
if url == "" {
s.writeError(w, "URL is required", http.StatusBadRequest)
return
}
nzoID, err := s.addNZBURL(ctx, url, _arr, action, downloadDir)
if err != nil {
s.writeError(w, err.Error(), http.StatusInternalServerError)
return
}
if nzoID == "" {
s.writeError(w, "Failed to add NZB", http.StatusInternalServerError)
return
}
response := AddNZBResponse{
Status: true,
NzoIds: []string{nzoID},
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleAddFile handles NZB file uploads
func (s *SABnzbd) handleAddFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
_arr := getArrFromContext(ctx)
cat := getCategory(ctx)
if _arr == nil {
// If Arr is not in context, create a new one with default values
_arr = arr.New(cat, "", "", false, false, nil, "", "")
}
if r.Method != http.MethodPost {
s.writeError(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Parse multipart form
err := r.ParseMultipartForm(32 << 20) // 32 MB limit
if err != nil {
s.writeError(w, "Failed to parse multipart form", http.StatusBadRequest)
return
}
file, header, err := r.FormFile("name")
if err != nil {
s.writeError(w, "No file uploaded", http.StatusBadRequest)
return
}
defer file.Close()
// Read file content
content, err := io.ReadAll(file)
if err != nil {
s.writeError(w, "Failed to read file", http.StatusInternalServerError)
return
}
action := r.FormValue("action")
downloadDir := r.FormValue("download_dir")
if action == "" {
action = "symlink"
}
if downloadDir == "" {
downloadDir = s.config.Misc.DownloadDir
}
// Process NZB file
nzbID, err := s.addNZBFile(ctx, content, header.Filename, _arr, action, downloadDir)
if err != nil {
s.writeError(w, fmt.Sprintf("Failed to add NZB file: %s", err.Error()), http.StatusInternalServerError)
return
}
if nzbID == "" {
s.writeError(w, "Failed to add NZB file", http.StatusInternalServerError)
return
}
response := AddNZBResponse{
Status: true,
NzoIds: []string{nzbID},
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleVersion returns version information
func (s *SABnzbd) handleVersion(w http.ResponseWriter, r *http.Request) {
response := VersionResponse{
Version: Version,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleGetCategories returns available categories
func (s *SABnzbd) handleGetCategories(w http.ResponseWriter, r *http.Request) {
categories := s.getCategories()
request.JSONResponse(w, categories, http.StatusOK)
}
// handleGetScripts returns available scripts
func (s *SABnzbd) handleGetScripts(w http.ResponseWriter, r *http.Request) {
scripts := []string{"None"}
request.JSONResponse(w, scripts, http.StatusOK)
}
// handleGetFiles returns files for a specific NZB
func (s *SABnzbd) handleGetFiles(w http.ResponseWriter, r *http.Request) {
nzoID := r.FormValue("value")
var files []string
if nzoID != "" {
nzb := s.usenet.Store().Get(nzoID)
if nzb != nil {
for _, file := range nzb.Files {
files = append(files, file.Name)
}
}
}
request.JSONResponse(w, files, http.StatusOK)
}
func (s *SABnzbd) handleStatus(w http.ResponseWriter, r *http.Request) {
type status struct {
CompletedDir string `json:"completed_dir"`
}
response := struct {
Status status `json:"status"`
}{
Status: status{
CompletedDir: s.config.Misc.DownloadDir,
},
}
request.JSONResponse(w, response, http.StatusOK)
}
// Helper methods
func (s *SABnzbd) getHistory(ctx context.Context, limit int) History {
cat := getCategory(ctx)
items := s.usenet.Store().GetHistory(cat, limit)
slots := make([]HistorySlot, 0, len(items))
history := History{
Version: Version,
Paused: false,
}
for _, item := range items {
slot := HistorySlot{
Status: s.mapNZBStatus(item.Status),
Name: item.Name,
NZBName: item.Name,
NzoId: item.ID,
Category: item.Category,
FailMessage: item.FailMessage,
Bytes: item.TotalSize,
Storage: item.Storage,
}
slots = append(slots, slot)
}
history.Slots = slots
return history
}
func (s *SABnzbd) writeError(w http.ResponseWriter, message string, status int) {
response := StatusResponse{
Status: false,
Error: message,
}
request.JSONResponse(w, response, status)
}
func (s *SABnzbd) mapNZBStatus(status string) string {
switch status {
case "downloading":
return StatusDownloading
case "completed":
return StatusCompleted
case "paused":
return StatusPaused
case "error", "failed":
return StatusFailed
case "processing":
return StatusProcessing
case "verifying":
return StatusVerifying
case "repairing":
return StatusRepairing
case "extracting":
return StatusExtracting
case "moving":
return StatusMoving
case "running":
return StatusRunning
default:
return StatusQueued
}
}
func (s *SABnzbd) addNZBURL(ctx context.Context, url string, arr *arr.Arr, action, downloadDir string) (string, error) {
if url == "" {
return "", fmt.Errorf("URL is required")
}
// Download NZB content
filename, content, err := utils.DownloadFile(url)
if err != nil {
s.logger.Error().Err(err).Str("url", url).Msg("Failed to download NZB from URL")
return "", fmt.Errorf("failed to download NZB from URL: %w", err)
}
if len(content) == 0 {
s.logger.Warn().Str("url", url).Msg("Downloaded content is empty")
return "", fmt.Errorf("downloaded content is empty")
}
return s.addNZBFile(ctx, content, filename, arr, action, downloadDir)
}
func (s *SABnzbd) addNZBFile(ctx context.Context, content []byte, filename string, arr *arr.Arr, action, downloadDir string) (string, error) {
if s.usenet == nil {
return "", fmt.Errorf("store not initialized")
}
req := &usenet.ProcessRequest{
NZBContent: content,
Name: filename,
Arr: arr,
Action: action,
DownloadDir: downloadDir,
}
nzb, err := s.usenet.ProcessNZB(ctx, req)
if err != nil {
return "", fmt.Errorf("failed to process NZB: %w", err)
}
return nzb.ID, nil
}

24
pkg/sabnzbd/routes.go Normal file
View File

@@ -0,0 +1,24 @@
package sabnzbd
import (
"net/http"
"github.com/go-chi/chi/v5"
)
func (s *SABnzbd) Routes() http.Handler {
r := chi.NewRouter()
r.Use(s.categoryContext)
r.Use(s.authContext)
// SABnzbd API endpoints - all under /api with mode parameter
r.Route("/api", func(r chi.Router) {
r.Use(s.modeContext)
// Queue operations
r.Get("/", s.handleAPI)
r.Post("/", s.handleAPI)
})
return r
}

116
pkg/sabnzbd/sabnzbd.go Normal file
View File

@@ -0,0 +1,116 @@
package sabnzbd
import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"path/filepath"
)
type SABnzbd struct {
downloadFolder string
config *Config
refreshInterval int
logger zerolog.Logger
usenet usenet.Usenet
defaultCategories []string
}
func New(usenetClient usenet.Usenet) *SABnzbd {
_cfg := config.Get()
cfg := _cfg.SABnzbd
var defaultCategories []string
for _, cat := range _cfg.SABnzbd.Categories {
if cat != "" {
defaultCategories = append(defaultCategories, cat)
}
}
sb := &SABnzbd{
downloadFolder: cfg.DownloadFolder,
refreshInterval: cfg.RefreshInterval,
logger: logger.New("sabnzbd"),
usenet: usenetClient,
defaultCategories: defaultCategories,
}
sb.SetConfig(_cfg)
return sb
}
func (s *SABnzbd) SetConfig(cfg *config.Config) {
sabnzbdConfig := &Config{
Misc: MiscConfig{
CompleteDir: s.downloadFolder,
DownloadDir: s.downloadFolder,
AdminDir: s.downloadFolder,
WebPort: cfg.Port,
Language: "en",
RefreshRate: "1",
QueueComplete: "0",
ConfigLock: "0",
Autobrowser: "1",
CheckNewRel: "1",
},
Categories: s.getCategories(),
}
if cfg.Usenet != nil || len(cfg.Usenet.Providers) == 0 {
for _, provider := range cfg.Usenet.Providers {
if provider.Host == "" || provider.Port == 0 {
continue
}
sabnzbdConfig.Servers = append(sabnzbdConfig.Servers, Server{
Name: provider.Name,
Host: provider.Host,
Port: provider.Port,
Username: provider.Username,
Password: provider.Password,
Connections: provider.Connections,
SSL: provider.SSL,
})
}
}
s.config = sabnzbdConfig
}
func (s *SABnzbd) getCategories() []Category {
_store := store.Get()
arrs := _store.Arr().GetAll()
categories := make([]Category, 0, len(arrs))
added := map[string]struct{}{}
for i, a := range arrs {
if _, ok := added[a.Name]; ok {
continue // Skip if category already added
}
categories = append(categories, Category{
Name: a.Name,
Order: i + 1,
Pp: "3",
Script: "None",
Dir: filepath.Join(s.downloadFolder, a.Name),
Priority: PriorityNormal,
})
}
// Add default categories if not already present
for _, defaultCat := range s.defaultCategories {
if _, ok := added[defaultCat]; ok {
continue // Skip if default category already added
}
categories = append(categories, Category{
Name: defaultCat,
Order: len(categories) + 1,
Pp: "3",
Script: "None",
Dir: filepath.Join(s.downloadFolder, defaultCat),
Priority: PriorityNormal,
})
added[defaultCat] = struct{}{}
}
return categories
}
func (s *SABnzbd) Reset() {
}

150
pkg/sabnzbd/types.go Normal file
View File

@@ -0,0 +1,150 @@
package sabnzbd
// SABnzbd API response types based on official documentation
var (
Version = "4.5.0"
)
// QueueResponse represents the queue status response
type QueueResponse struct {
Queue Queue `json:"queue"`
Status bool `json:"status"`
Version string `json:"version"`
}
// Queue represents the download queue
type Queue struct {
Version string `json:"version"`
Slots []QueueSlot `json:"slots"`
}
// QueueSlot represents a download in the queue
type QueueSlot struct {
Status string `json:"status"`
TimeLeft string `json:"timeleft"`
Mb int64 `json:"mb"`
Filename string `json:"filename"`
Priority string `json:"priority"`
Cat string `json:"cat"`
MBLeft int64 `json:"mbleft"`
Percentage float64 `json:"percentage"`
NzoId string `json:"nzo_id"`
Size int64 `json:"size"`
}
// HistoryResponse represents the history response
type HistoryResponse struct {
History History `json:"history"`
}
// History represents the download history
type History struct {
Version string `json:"version"`
Paused bool `json:"paused"`
Slots []HistorySlot `json:"slots"`
}
// HistorySlot represents a completed download
type HistorySlot struct {
Status string `json:"status"`
Name string `json:"name"`
NZBName string `json:"nzb_name"`
NzoId string `json:"nzo_id"`
Category string `json:"category"`
FailMessage string `json:"fail_message"`
Bytes int64 `json:"bytes"`
Storage string `json:"storage"`
}
// StageLog represents processing stages
type StageLog struct {
Name string `json:"name"`
Actions []string `json:"actions"`
}
// VersionResponse represents version information
type VersionResponse struct {
Version string `json:"version"`
}
// StatusResponse represents general status
type StatusResponse struct {
Status bool `json:"status"`
Error string `json:"error,omitempty"`
}
// FullStatusResponse represents the full status response with queue and history
type FullStatusResponse struct {
Queue Queue `json:"queue"`
History History `json:"history"`
Status bool `json:"status"`
Version string `json:"version"`
}
// AddNZBRequest represents the request to add an NZB
type AddNZBRequest struct {
Name string `json:"name"`
Cat string `json:"cat"`
Script string `json:"script"`
Priority string `json:"priority"`
PP string `json:"pp"`
Password string `json:"password"`
NZBData []byte `json:"nzb_data"`
URL string `json:"url"`
}
// AddNZBResponse represents the response when adding an NZB
type AddNZBResponse struct {
Status bool `json:"status"`
NzoIds []string `json:"nzo_ids"`
Error string `json:"error,omitempty"`
}
// API Mode constants
const (
ModeQueue = "queue"
ModeHistory = "history"
ModeConfig = "config"
ModeGetConfig = "get_config"
ModeAddURL = "addurl"
ModeAddFile = "addfile"
ModeVersion = "version"
ModePause = "pause"
ModeResume = "resume"
ModeDelete = "delete"
ModeShutdown = "shutdown"
ModeRestart = "restart"
ModeGetCats = "get_cats"
ModeGetScripts = "get_scripts"
ModeGetFiles = "get_files"
ModeRetry = "retry"
ModeStatus = "status"
ModeFullStatus = "fullstatus"
)
// Status constants
const (
StatusQueued = "Queued"
StatusPaused = "Paused"
StatusDownloading = "downloading"
StatusProcessing = "Processing"
StatusCompleted = "Completed"
StatusFailed = "Failed"
StatusGrabbing = "Grabbing"
StatusPropagating = "Propagating"
StatusVerifying = "Verifying"
StatusRepairing = "Repairing"
StatusExtracting = "Extracting"
StatusMoving = "Moving"
StatusRunning = "Running"
)
// Priority constants
const (
PriorityForced = "2"
PriorityHigh = "1"
PriorityNormal = "0"
PriorityLow = "-1"
PriorityStop = "-2"
)

View File

@@ -3,6 +3,7 @@ package server
import (
"fmt"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/internal/request"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/store"
@@ -118,5 +119,23 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
profiles = append(profiles, profile)
}
stats["debrids"] = profiles
if s.usenet != nil {
if client := s.usenet.Client(); client != nil {
usenetsData := make([]map[string]interface{}, 0)
client.Pools().Range(func(key string, value *nntp.Pool) bool {
if value != nil {
providerData := make(map[string]interface{})
providerData["name"] = key
providerData["active_connections"] = value.ActiveConnections()
providerData["total_connections"] = value.ConnectionCount()
usenetsData = append(usenetsData, providerData)
}
return true
})
stats["usenet"] = usenetsData
}
}
request.JSONResponse(w, stats, http.StatusOK)
}

View File

@@ -9,18 +9,19 @@ import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/usenet"
"io"
"net/http"
"net/url"
"os"
)
type Server struct {
router *chi.Mux
logger zerolog.Logger
usenet usenet.Usenet
}
func New(handlers map[string]http.Handler) *Server {
func New(usenet usenet.Usenet, handlers map[string]http.Handler) *Server {
l := logger.New("http")
r := chi.NewRouter()
r.Use(middleware.Recoverer)
@@ -29,11 +30,8 @@ func New(handlers map[string]http.Handler) *Server {
s := &Server{
logger: l,
usenet: usenet,
}
staticPath, _ := url.JoinPath(cfg.URLBase, "static")
r.Handle(staticPath+"/*",
http.StripPrefix(staticPath, http.FileServer(http.Dir("static"))),
)
r.Route(cfg.URLBase, func(r chi.Router) {
for pattern, handler := range handlers {
@@ -103,10 +101,5 @@ func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Expires", "0")
// Stream the file
_, err = io.Copy(w, file)
if err != nil {
s.logger.Error().Err(err).Msg("Error streaming log file")
http.Error(w, "Error streaming log file", http.StatusInternalServerError)
return
}
_, _ = io.Copy(w, file)
}

View File

@@ -2,13 +2,14 @@ package store
import (
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"net/http"
"os"
"path/filepath"
"sync"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/cavaliergopher/grab/v3"
"github.com/sirrobot01/decypharr/internal/utils"
)
@@ -212,7 +213,7 @@ func (s *Store) processSymlink(torrent *Torrent, debridTorrent *types.Torrent) (
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
s.logger.Warn().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(pending, path)

View File

@@ -96,9 +96,7 @@ func (s *Store) trackAvailableSlots(ctx context.Context) {
return
}
for name, slots := range availableSlots {
s.logger.Debug().Msgf("Available slots for %s: %d", name, slots)
for _, slots := range availableSlots {
// If slots are available, process the next import request from the queue
for slots > 0 {
select {

View File

@@ -1,7 +1,6 @@
package store
import (
"cmp"
"context"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
@@ -37,18 +36,21 @@ func Get() *Store {
arrs := arr.NewStorage()
deb := debrid.NewStorage()
cfg := config.Get()
qbitCfg := cfg.QBitTorrent
instance = &Store{
repair: repair.New(arrs, deb),
arr: arrs,
debrid: deb,
torrents: newTorrentStorage(cfg.TorrentsFile()),
logger: logger.Default(), // Use default logger [decypharr]
refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute,
skipPreCache: qbitCfg.SkipPreCache,
downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)),
importsQueue: NewImportQueue(context.Background(), 1000),
refreshInterval: 10 * time.Minute, // Default refresh interval
skipPreCache: false, // Default skip pre-cache
downloadSemaphore: make(chan struct{}, 5), // Default max concurrent downloads
}
if cfg.QBitTorrent != nil {
instance.refreshInterval = time.Duration(cfg.QBitTorrent.RefreshInterval) * time.Minute
instance.skipPreCache = cfg.QBitTorrent.SkipPreCache
instance.downloadSemaphore = make(chan struct{}, cfg.QBitTorrent.MaxDownloads)
}
if cfg.RemoveStalledAfter != "" {
removeStalledAfter, err := time.ParseDuration(cfg.RemoveStalledAfter)

View File

@@ -5,13 +5,15 @@ import (
"context"
"errors"
"fmt"
"math"
"os"
"path/filepath"
"time"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"os"
"path/filepath"
"time"
)
func (s *Store) AddTorrent(ctx context.Context, importReq *ImportRequest) error {
@@ -57,10 +59,18 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
client := deb.Client()
downloadingStatuses := client.GetDownloadingStatus()
_arr := importReq.Arr
backoff := time.NewTimer(s.refreshInterval)
defer backoff.Stop()
for debridTorrent.Status != "downloaded" {
s.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress)
dbT, err := client.CheckStatus(debridTorrent)
if err != nil {
s.logger.Error().
Str("torrent_id", debridTorrent.Id).
Str("torrent_name", debridTorrent.Name).
Err(err).
Msg("Error checking torrent status")
if dbT != nil && dbT.Id != "" {
// Delete the torrent if it was not downloaded
go func() {
@@ -80,13 +90,16 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
torrent = s.partialTorrentUpdate(torrent, debridTorrent)
// Exit the loop for downloading statuses to prevent memory buildup
if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) {
exitCondition1 := debridTorrent.Status == "downloaded"
exitCondition2 := !utils.Contains(downloadingStatuses, debridTorrent.Status)
if exitCondition1 || exitCondition2 {
break
}
if !utils.Contains(client.GetDownloadingStatus(), debridTorrent.Status) {
break
}
time.Sleep(s.refreshInterval)
<-backoff.C
// Increase interval gradually, cap at max
nextInterval := min(s.refreshInterval*2, 30*time.Second)
backoff.Reset(nextInterval)
}
var torrentSymlinkPath string
var err error
@@ -96,15 +109,14 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
timer := time.Now()
onFailed := func(err error) {
if err != nil {
s.markTorrentAsFailed(torrent)
go func() {
_ = client.DeleteTorrent(debridTorrent.Id)
}()
s.logger.Error().Err(err).Msgf("Error occured while processing torrent %s", debridTorrent.Name)
importReq.markAsFailed(err, torrent, debridTorrent)
return
}
s.markTorrentAsFailed(torrent)
go func() {
if deleteErr := client.DeleteTorrent(debridTorrent.Id); deleteErr != nil {
s.logger.Warn().Err(deleteErr).Msgf("Failed to delete torrent %s", debridTorrent.Id)
}
}()
s.logger.Error().Err(err).Msgf("Error occured while processing torrent %s", debridTorrent.Name)
importReq.markAsFailed(err, torrent, debridTorrent)
}
onSuccess := func(torrentSymlinkPath string) {
@@ -118,7 +130,9 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
s.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
_arr.Refresh()
go func() {
_arr.Refresh()
}()
}
switch importReq.Action {
@@ -137,7 +151,6 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
torrentSymlinkPath, err = s.createSymlinksWebdav(torrent, debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
} else {
// User is using either zurg or debrid webdav
torrentSymlinkPath, err = s.processSymlink(torrent, debridTorrent) // /mnt/symlinks/{category}/MyTVShow/
@@ -202,6 +215,9 @@ func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) *
}
totalSize := debridTorrent.Bytes
progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0
if math.IsNaN(progress) || math.IsInf(progress, 0) {
progress = 0
}
sizeCompleted := int64(float64(totalSize) * progress)
var speed int64

141
pkg/usenet/cache.go Normal file
View File

@@ -0,0 +1,141 @@
package usenet
import (
"github.com/chrisfarms/yenc"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"sync/atomic"
"time"
)
// SegmentCache provides intelligent caching for NNTP segments
type SegmentCache struct {
cache *xsync.Map[string, *CachedSegment]
logger zerolog.Logger
maxSize int64
currentSize atomic.Int64
}
// CachedSegment represents a cached segment with metadata
type CachedSegment struct {
MessageID string `json:"message_id"`
Data []byte `json:"data"`
DecodedSize int64 `json:"decoded_size"` // Actual size after yEnc decoding
DeclaredSize int64 `json:"declared_size"` // Size declared in NZB
CachedAt time.Time `json:"cached_at"`
AccessCount int64 `json:"access_count"`
LastAccess time.Time `json:"last_access"`
FileBegin int64 `json:"file_begin"` // Start byte offset in the file
FileEnd int64 `json:"file_end"` // End byte offset in the file
}
// NewSegmentCache creates a new segment cache
func NewSegmentCache(logger zerolog.Logger) *SegmentCache {
sc := &SegmentCache{
cache: xsync.NewMap[string, *CachedSegment](),
logger: logger.With().Str("component", "segment_cache").Logger(),
maxSize: 50 * 1024 * 1024, // Default max size 100MB
}
return sc
}
// Get retrieves a segment from cache
func (sc *SegmentCache) Get(messageID string) (*CachedSegment, bool) {
segment, found := sc.cache.Load(messageID)
if !found {
return nil, false
}
segment.AccessCount++
segment.LastAccess = time.Now()
return segment, true
}
// Put stores a segment in cache with intelligent size management
func (sc *SegmentCache) Put(messageID string, data *yenc.Part, declaredSize int64) {
dataSize := data.Size
currentSize := sc.currentSize.Load()
// Check if we need to make room
wouldExceed := (currentSize + dataSize) > sc.maxSize
if wouldExceed {
sc.evictLRU(dataSize)
}
segment := &CachedSegment{
MessageID: messageID,
Data: make([]byte, data.Size),
DecodedSize: dataSize,
DeclaredSize: declaredSize,
CachedAt: time.Now(),
AccessCount: 1,
LastAccess: time.Now(),
}
copy(segment.Data, data.Body)
sc.cache.Store(messageID, segment)
sc.currentSize.Add(dataSize)
}
// evictLRU evicts least recently used segments to make room
func (sc *SegmentCache) evictLRU(neededSpace int64) {
if neededSpace <= 0 {
return // No need to evict if no space is needed
}
if sc.cache.Size() == 0 {
return // Nothing to evict
}
// Create a sorted list of segments by last access time
type segmentInfo struct {
key string
segment *CachedSegment
lastAccess time.Time
}
segments := make([]segmentInfo, 0, sc.cache.Size())
sc.cache.Range(func(key string, value *CachedSegment) bool {
segments = append(segments, segmentInfo{
key: key,
segment: value,
lastAccess: value.LastAccess,
})
return true // continue iteration
})
// Sort by last access time (oldest first)
for i := 0; i < len(segments)-1; i++ {
for j := i + 1; j < len(segments); j++ {
if segments[i].lastAccess.After(segments[j].lastAccess) {
segments[i], segments[j] = segments[j], segments[i]
}
}
}
// Evict segments until we have enough space
freedSpace := int64(0)
for _, seg := range segments {
if freedSpace >= neededSpace {
break
}
sc.cache.Delete(seg.key)
freedSpace += int64(len(seg.segment.Data))
}
}
// Clear removes all cached segments
func (sc *SegmentCache) Clear() {
sc.cache.Clear()
sc.currentSize.Store(0)
}
// Delete removes a specific segment from cache
func (sc *SegmentCache) Delete(messageID string) {
sc.cache.Delete(messageID)
}

281
pkg/usenet/downloader.go Normal file
View File

@@ -0,0 +1,281 @@
package usenet
import (
"context"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/internal/utils"
"golang.org/x/sync/errgroup"
"os"
"path/filepath"
"time"
)
// DownloadWorker manages concurrent NZB downloads
type DownloadWorker struct {
client *nntp.Client
processor *Processor
logger zerolog.Logger
skipPreCache bool // Skip pre-caching for faster processing
mountFolder string // Folder where downloads are mounted
}
// DownloadJob represents a download job for an NZB
type DownloadJob struct {
NZB *NZB
Action string
Priority int
Callback func(*NZB, error)
DownloadDir string
}
// NewDownloadWorker creates a new download worker
func NewDownloadWorker(config *config.Usenet, client *nntp.Client, processor *Processor) *DownloadWorker {
dw := &DownloadWorker{
processor: processor,
client: client,
logger: logger.New("usenet-download-worker"),
skipPreCache: config.SkipPreCache,
mountFolder: config.MountFolder,
}
return dw
}
func (dw *DownloadWorker) CheckAvailability(ctx context.Context, job *DownloadJob) error {
dw.logger.Debug().
Str("nzb_id", job.NZB.ID).
Msg("Checking NZB availability")
// Grab first file to extract message IDs
firstFile := job.NZB.Files[0]
if len(firstFile.Segments) == 0 {
return fmt.Errorf("no segments found in first file of NZB")
}
segments := firstFile.Segments
// Smart sampling: check first, last, and some middle segments
samplesToCheck := dw.getSampleSegments(segments)
// Create error group for concurrent checking
g, gCtx := errgroup.WithContext(ctx)
// Limit concurrent goroutines to prevent overwhelming the NNTP server
maxConcurrency := len(samplesToCheck)
if maxConns := dw.client.MinimumMaxConns(); maxConns < maxConcurrency {
maxConcurrency = maxConns
}
g.SetLimit(maxConcurrency)
// Check each segment concurrently
for i, segment := range samplesToCheck {
segment := segment // capture loop variable
segmentNum := i + 1
g.Go(func() error {
select {
case <-gCtx.Done():
return gCtx.Err() // Return if context is canceled
default:
}
conn, cleanup, err := dw.client.GetConnection(gCtx)
if err != nil {
return fmt.Errorf("failed to get NNTP connection: %w", err)
}
defer cleanup() // Ensure connection is returned to the pool
// Check segment availability
seg, err := conn.GetSegment(segment.MessageID, segmentNum)
if err != nil {
return fmt.Errorf("failed to check segment %d availability: %w", segmentNum, err)
}
if seg == nil {
return fmt.Errorf("segment %d not found", segmentNum)
}
return nil
})
}
// Wait for all checks to complete
if err := g.Wait(); err != nil {
return fmt.Errorf("availability check failed: %w", err)
}
// Update storage with availability info
if err := dw.processor.store.Update(job.NZB); err != nil {
dw.logger.Warn().Err(err).Msg("Failed to update NZB with availability info")
}
return nil
}
func (dw *DownloadWorker) Process(ctx context.Context, job *DownloadJob) error {
var (
finalPath string
err error
)
defer func(err error) {
if job.Callback != nil {
job.Callback(job.NZB, err)
}
}(err)
switch job.Action {
case "download":
finalPath, err = dw.downloadNZB(ctx, job)
case "symlink":
finalPath, err = dw.symlinkNZB(ctx, job)
case "none":
return nil
default:
// Use symlink as default action
finalPath, err = dw.symlinkNZB(ctx, job)
}
if err != nil {
return err
}
if finalPath == "" {
err = fmt.Errorf("final path is empty after processing job: %s", job.Action)
return err
}
// Use atomic transition to completed state
return dw.processor.store.MarkAsCompleted(job.NZB.ID, finalPath)
}
// downloadNZB downloads an NZB to the specified directory
func (dw *DownloadWorker) downloadNZB(ctx context.Context, job *DownloadJob) (string, error) {
dw.logger.Info().
Str("nzb_id", job.NZB.ID).
Str("download_dir", job.DownloadDir).
Msg("Starting NZB download")
// TODO: implement download logic
return job.DownloadDir, nil
}
// getSampleMessageIDs returns a smart sample of message IDs to check
func (dw *DownloadWorker) getSampleSegments(segments []NZBSegment) []NZBSegment {
totalSegments := len(segments)
// For small NZBs, check all segments
if totalSegments <= 2 {
return segments
}
var samplesToCheck []NZBSegment
// Always check the first and last segments
samplesToCheck = append(samplesToCheck, segments[0]) // First segment
samplesToCheck = append(samplesToCheck, segments[totalSegments-1]) // Last segment
return samplesToCheck
}
func (dw *DownloadWorker) symlinkNZB(ctx context.Context, job *DownloadJob) (string, error) {
dw.logger.Info().
Str("nzb_id", job.NZB.ID).
Str("symlink_dir", job.DownloadDir).
Msg("Creating symlinks for NZB")
if job.NZB == nil {
return "", fmt.Errorf("NZB is nil")
}
mountFolder := filepath.Join(dw.mountFolder, job.NZB.Name) // e.g. /mnt/rclone/usenet/__all__/TV_SHOW
if mountFolder == "" {
return "", fmt.Errorf("mount folder is empty")
}
symlinkPath := filepath.Join(job.DownloadDir, job.NZB.Name) // e.g. /mnt/symlinks/usenet/sonarr/TV_SHOW
if err := os.MkdirAll(symlinkPath, 0755); err != nil {
return "", fmt.Errorf("failed to create symlink directory: %w", err)
}
return dw.createSymlinksWebdav(job.NZB, mountFolder, symlinkPath)
}
func (dw *DownloadWorker) createSymlinksWebdav(nzb *NZB, mountPath, symlinkPath string) (string, error) {
files := nzb.GetFiles()
remainingFiles := make(map[string]NZBFile)
for _, file := range files {
remainingFiles[file.Name] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(files))
maxLogCount := 10 // Limit the number of log messages to avoid flooding
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
entries, err := os.ReadDir(mountPath)
if err != nil {
if maxLogCount > 0 && !errors.Is(err, os.ErrNotExist) {
// Only log if it's not a "not found" error
// This is due to the fact the mount path may not exist YET
dw.logger.Warn().
Err(err).
Str("mount_path", mountPath).
Msg("Failed to read directory, retrying")
maxLogCount--
}
continue
}
// Check which files exist in this batch
for _, entry := range entries {
filename := entry.Name()
dw.logger.Info().
Str("filename", filename).
Msg("Checking file existence in mount path")
if file, exists := remainingFiles[filename]; exists {
fullFilePath := filepath.Join(mountPath, filename)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
dw.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
dw.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
dw.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
}
}
if dw.skipPreCache {
return symlinkPath, nil
}
go func() {
defer func() {
if r := recover(); r != nil {
dw.logger.Error().
Interface("panic", r).
Str("nzbName", nzb.Name).
Msg("Recovered from panic in pre-cache goroutine")
}
}()
if err := utils.PreCacheFile(filePaths); err != nil {
dw.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
dw.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
// Pre-cache the first 256KB and 1MB of the file
return symlinkPath, nil
}

353
pkg/usenet/errors.go Normal file
View File

@@ -0,0 +1,353 @@
package usenet
import (
"errors"
"fmt"
"net"
"strings"
"sync"
"time"
)
var (
ErrConnectionFailed = errors.New("failed to connect to NNTP server")
ErrServerUnavailable = errors.New("NNTP server unavailable")
ErrRateLimitExceeded = errors.New("rate limit exceeded")
ErrDownloadTimeout = errors.New("download timeout")
)
// ErrInvalidNZBf creates a formatted error for NZB validation failures
func ErrInvalidNZBf(format string, args ...interface{}) error {
return fmt.Errorf("invalid NZB: "+format, args...)
}
// Error represents a structured usenet error
type Error struct {
Code string
Message string
Err error
ServerAddr string
Timestamp time.Time
Retryable bool
}
func (e *Error) Error() string {
if e.ServerAddr != "" {
return fmt.Sprintf("usenet error [%s] on %s: %s", e.Code, e.ServerAddr, e.Message)
}
return fmt.Sprintf("usenet error [%s]: %s", e.Code, e.Message)
}
func (e *Error) Unwrap() error {
return e.Err
}
func (e *Error) Is(target error) bool {
if target == nil {
return false
}
return e.Err != nil && errors.Is(e.Err, target)
}
// NewUsenetError creates a new UsenetError
func NewUsenetError(code, message string, err error) *Error {
return &Error{
Code: code,
Message: message,
Err: err,
Timestamp: time.Now(),
Retryable: isRetryableError(err),
}
}
// NewServerError creates a new UsenetError with server address
func NewServerError(code, message, serverAddr string, err error) *Error {
return &Error{
Code: code,
Message: message,
Err: err,
ServerAddr: serverAddr,
Timestamp: time.Now(),
Retryable: isRetryableError(err),
}
}
// isRetryableError determines if an error is retryable
func isRetryableError(err error) bool {
if err == nil {
return false
}
// Network errors are generally retryable
var netErr net.Error
if errors.As(err, &netErr) {
return netErr.Timeout()
}
// DNS errors are retryable
var dnsErr *net.DNSError
if errors.As(err, &dnsErr) {
return dnsErr.Temporary()
}
// Connection refused is retryable
if errors.Is(err, net.ErrClosed) {
return true
}
// Check error message for retryable conditions
errMsg := strings.ToLower(err.Error())
retryableMessages := []string{
"connection refused",
"connection reset",
"connection timed out",
"network is unreachable",
"host is unreachable",
"temporary failure",
"service unavailable",
"server overloaded",
"rate limit",
"too many connections",
}
for _, msg := range retryableMessages {
if strings.Contains(errMsg, msg) {
return true
}
}
return false
}
// RetryConfig defines retry behavior
type RetryConfig struct {
MaxRetries int
InitialDelay time.Duration
MaxDelay time.Duration
BackoffFactor float64
RetryableErrors []error
}
// DefaultRetryConfig returns a default retry configuration
func DefaultRetryConfig() *RetryConfig {
return &RetryConfig{
MaxRetries: 3,
InitialDelay: 1 * time.Second,
MaxDelay: 30 * time.Second,
BackoffFactor: 2.0,
RetryableErrors: []error{
ErrConnectionFailed,
ErrServerUnavailable,
ErrRateLimitExceeded,
ErrDownloadTimeout,
},
}
}
// ShouldRetry determines if an error should be retried
func (rc *RetryConfig) ShouldRetry(err error, attempt int) bool {
if attempt >= rc.MaxRetries {
return false
}
// Check if it's a retryable UsenetError
var usenetErr *Error
if errors.As(err, &usenetErr) {
return usenetErr.Retryable
}
// Check if it's in the list of retryable errors
for _, retryableErr := range rc.RetryableErrors {
if errors.Is(err, retryableErr) {
return true
}
}
return isRetryableError(err)
}
// GetDelay calculates the delay for the next retry
func (rc *RetryConfig) GetDelay(attempt int) time.Duration {
if attempt <= 0 {
return rc.InitialDelay
}
delay := time.Duration(float64(rc.InitialDelay) * float64(attempt) * rc.BackoffFactor)
if delay > rc.MaxDelay {
delay = rc.MaxDelay
}
return delay
}
// RetryWithBackoff retries a function with exponential backoff
func RetryWithBackoff(config *RetryConfig, operation func() error) error {
var lastErr error
for attempt := 0; attempt <= config.MaxRetries; attempt++ {
if attempt > 0 {
delay := config.GetDelay(attempt)
time.Sleep(delay)
}
err := operation()
if err == nil {
return nil
}
lastErr = err
if !config.ShouldRetry(err, attempt) {
break
}
}
return lastErr
}
// CircuitBreakerConfig defines circuit breaker behavior
type CircuitBreakerConfig struct {
MaxFailures int
ResetTimeout time.Duration
CheckInterval time.Duration
FailureCallback func(error)
}
// CircuitBreaker implements a circuit breaker pattern for NNTP connections
type CircuitBreaker struct {
config *CircuitBreakerConfig
failures int
lastFailure time.Time
state string // "closed", "open", "half-open"
mu *sync.RWMutex
}
// NewCircuitBreaker creates a new circuit breaker
func NewCircuitBreaker(config *CircuitBreakerConfig) *CircuitBreaker {
if config == nil {
config = &CircuitBreakerConfig{
MaxFailures: 5,
ResetTimeout: 60 * time.Second,
CheckInterval: 10 * time.Second,
}
}
return &CircuitBreaker{
config: config,
state: "closed",
mu: &sync.RWMutex{},
}
}
// Execute executes an operation through the circuit breaker
func (cb *CircuitBreaker) Execute(operation func() error) error {
cb.mu.RLock()
state := cb.state
failures := cb.failures
lastFailure := cb.lastFailure
cb.mu.RUnlock()
// Check if we should attempt reset
if state == "open" && time.Since(lastFailure) > cb.config.ResetTimeout {
cb.mu.Lock()
cb.state = "half-open"
cb.mu.Unlock()
state = "half-open"
}
if state == "open" {
return NewUsenetError("circuit_breaker_open",
fmt.Sprintf("circuit breaker is open (failures: %d)", failures),
ErrServerUnavailable)
}
err := operation()
cb.mu.Lock()
defer cb.mu.Unlock()
if err != nil {
cb.failures++
cb.lastFailure = time.Now()
if cb.failures >= cb.config.MaxFailures {
cb.state = "open"
}
if cb.config.FailureCallback != nil {
go func() {
cb.config.FailureCallback(err)
}()
}
return err
}
// Success - reset if we were in half-open state
if cb.state == "half-open" {
cb.state = "closed"
cb.failures = 0
}
return nil
}
// GetState returns the current circuit breaker state
func (cb *CircuitBreaker) GetState() string {
cb.mu.RLock()
defer cb.mu.RUnlock()
return cb.state
}
// Reset manually resets the circuit breaker
func (cb *CircuitBreaker) Reset() {
cb.mu.Lock()
defer cb.mu.Unlock()
cb.state = "closed"
cb.failures = 0
}
// ValidationError represents validation errors
type ValidationError struct {
Field string
Value interface{}
Message string
}
func (e *ValidationError) Error() string {
return fmt.Sprintf("validation error for field '%s': %s", e.Field, e.Message)
}
// ValidateNZBContent validates NZB content
func ValidateNZBContent(content []byte) error {
if len(content) == 0 {
return &ValidationError{
Field: "content",
Value: len(content),
Message: "NZB content cannot be empty",
}
}
if len(content) > 100*1024*1024 { // 100MB limit
return &ValidationError{
Field: "content",
Value: len(content),
Message: "NZB content exceeds maximum size limit (100MB)",
}
}
contentStr := string(content)
if !strings.Contains(contentStr, "<nzb") {
maxLen := 100
if len(contentStr) < maxLen {
maxLen = len(contentStr)
}
return &ValidationError{
Field: "content",
Value: contentStr[:maxLen],
Message: "content does not appear to be valid NZB format",
}
}
return nil
}

83
pkg/usenet/misc.go Normal file
View File

@@ -0,0 +1,83 @@
package usenet
import (
"io"
"strings"
)
func (s *Streamer) isSkippableError(err error) bool {
if err == nil {
return false
}
// EOF is usually expected/skippable
if err == io.EOF {
return true
}
errMsg := strings.ToLower(err.Error())
// Client disconnection errors
if strings.Contains(errMsg, "client disconnected") ||
strings.Contains(errMsg, "broken pipe") ||
strings.Contains(errMsg, "connection reset") ||
strings.Contains(errMsg, "write failed") ||
strings.Contains(errMsg, "writer is nil") ||
strings.Contains(errMsg, "closed pipe") ||
strings.Contains(errMsg, "context canceled") ||
strings.Contains(errMsg, "operation timed out") ||
strings.Contains(errMsg, "eof") {
return true
}
return false
}
func RecalculateSegmentBoundaries(
segments []NZBSegment,
actualSizes map[string]int64,
) []NZBSegment {
if len(segments) == 0 {
return segments
}
result := make([]NZBSegment, len(segments))
var currentOffset int64
for i, seg := range segments {
// Copy original segment metadata
result[i] = seg
result[i].StartOffset = currentOffset
// Determine which size to use: actual decoded size, or fall back
var size int64
if actual, ok := actualSizes[seg.MessageID]; ok {
size = actual
} else {
// decoded size as computed by parser (EndOffset-StartOffset)
size = seg.EndOffset - seg.StartOffset
}
result[i].EndOffset = currentOffset + size
currentOffset += size
}
return result
}
// GetSegmentActualSizes extracts actual decoded sizes from cache
func GetSegmentActualSizes(segments []NZBSegment, cache *SegmentCache) map[string]int64 {
actualSizes := make(map[string]int64)
if cache == nil {
return actualSizes
}
for _, segment := range segments {
if cached, found := cache.Get(segment.MessageID); found {
actualSizes[segment.MessageID] = int64(len(cached.Data))
}
}
return actualSizes
}

152
pkg/usenet/nzb.go Normal file
View File

@@ -0,0 +1,152 @@
package usenet
import (
"fmt"
"strings"
)
type SegmentRange struct {
Segment NZBSegment // Reference to the segment
ByteStart int64 // Start offset within this segment
ByteEnd int64 // End offset within this segment
TotalStart int64 // Absolute start position in file
TotalEnd int64 // Absolute end position in file
}
func (nzb *NZB) GetFileByName(name string) *NZBFile {
for i := range nzb.Files {
f := nzb.Files[i]
if f.IsDeleted {
continue
}
if nzb.Files[i].Name == name {
return &nzb.Files[i]
}
}
return nil
}
func (nzb *NZB) MarkFileAsRemoved(fileName string) error {
for i, file := range nzb.Files {
if file.Name == fileName {
// Mark the file as deleted
nzb.Files[i].IsDeleted = true
return nil
}
}
return fmt.Errorf("file %s not found in NZB %s", fileName, nzb.ID)
}
func (nf *NZBFile) GetSegmentsInRange(segmentSize int64, start, end int64) []SegmentRange {
if end == -1 {
end = nf.Size - 1
}
var segmentRanges []SegmentRange
var cumulativeSize int64
for i, segment := range nf.Segments {
// Use the file's segment size (uniform)
if segmentSize <= 0 {
segmentSize = segment.Bytes // Fallback to actual segment size if not set
}
// Handle last segment which might be smaller
if i == len(nf.Segments)-1 {
segmentSize = segment.Bytes // Last segment uses actual size
}
cumulativeSize += segmentSize
// Skip segments that end before our start position
if cumulativeSize <= start {
continue
}
// Calculate this segment's boundaries
segmentStart := cumulativeSize - segmentSize
segmentEnd := cumulativeSize - 1
// Calculate intersection with requested range
rangeStart := max(start, segmentStart)
rangeEnd := min(end, segmentEnd)
segmentRange := SegmentRange{
Segment: segment,
ByteStart: rangeStart - segmentStart, // Offset within segment
ByteEnd: rangeEnd - segmentStart, // End offset within segment
TotalStart: rangeStart, // Absolute position
TotalEnd: rangeEnd, // Absolute position
}
segmentRanges = append(segmentRanges, segmentRange)
// Stop if we've covered the entire requested range
if cumulativeSize >= end+1 {
break
}
}
return segmentRanges
}
func (nf *NZBFile) ConvertToSegmentRanges(segments []NZBSegment) []SegmentRange {
var segmentRanges []SegmentRange
var cumulativeSize int64
for i, segment := range segments {
// Use the file's segment size (uniform)
segmentSize := nf.SegmentSize
// Handle last segment which might be smaller
if i == len(segments)-1 {
segmentSize = segment.Bytes // Last segment uses actual size
}
cumulativeSize += segmentSize
segmentRange := SegmentRange{
Segment: segment,
ByteStart: 0, // Always starts at 0 within the segment
ByteEnd: segmentSize - 1, // Ends at segment size - 1
TotalStart: cumulativeSize - segmentSize, // Absolute start position
TotalEnd: cumulativeSize - 1, // Absolute end position
}
segmentRanges = append(segmentRanges, segmentRange)
}
return segmentRanges
}
func (nf *NZBFile) GetCacheKey() string {
return fmt.Sprintf("rar_%s_%d", nf.Name, nf.Size)
}
func (nzb *NZB) GetFiles() []NZBFile {
files := make([]NZBFile, 0, len(nzb.Files))
for _, file := range nzb.Files {
if !file.IsDeleted {
files = append(files, file)
}
}
return files[:len(files):len(files)] // Return a slice to avoid aliasing
}
// ValidateNZB performs basic validation on NZB content
func ValidateNZB(content []byte) error {
if len(content) == 0 {
return fmt.Errorf("empty NZB content")
}
// Check for basic XML structure
if !strings.Contains(string(content), "<nzb") {
return fmt.Errorf("invalid NZB format: missing <nzb> tag")
}
if !strings.Contains(string(content), "<file") {
return fmt.Errorf("invalid NZB format: no files found")
}
return nil
}

863
pkg/usenet/parser.go Normal file
View File

@@ -0,0 +1,863 @@
package usenet
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"github.com/Tensai75/nzbparser"
"github.com/chrisfarms/yenc"
"github.com/nwaples/rardecode/v2"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sourcegraph/conc/pool"
"io"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"sync"
)
// NZBParser provides a simplified, robust NZB parser
type NZBParser struct {
logger zerolog.Logger
client *nntp.Client
cache *SegmentCache
}
type FileGroup struct {
BaseName string
ActualFilename string
Type FileType
Files []nzbparser.NzbFile
Groups map[string]struct{}
}
type FileInfo struct {
Size int64
ChunkSize int64
Name string
}
// NewNZBParser creates a new simplified NZB parser
func NewNZBParser(client *nntp.Client, cache *SegmentCache, logger zerolog.Logger) *NZBParser {
return &NZBParser{
logger: logger.With().Str("component", "nzb_parser").Logger(),
client: client,
cache: cache,
}
}
type FileType int
const (
FileTypeMedia FileType = iota // Direct media files (.mkv, .mp4, etc.) // Check internal/utils.IsMediaFile
FileTypeRar // RAR archives (.rar, .r00, .r01, etc.)
FileTypeArchive // Other archives (.7z, .zip, etc.)
FileTypeIgnore // Files to ignore (.nfo, .txt, par2 etc.)
FileTypeUnknown
)
var (
// RAR file patterns - simplified and more accurate
rarMainPattern = regexp.MustCompile(`\.rar$`)
rarPartPattern = regexp.MustCompile(`\.r\d{2}$`) // .r00, .r01, etc.
rarVolumePattern = regexp.MustCompile(`\.part\d+\.rar$`)
ignoreExtensions = []string{".par2", ".sfv", ".nfo", ".jpg", ".png", ".txt", ".srt", ".idx", ".sub"}
sevenZMainPattern = regexp.MustCompile(`\.7z$`)
sevenZPartPattern = regexp.MustCompile(`\.7z\.\d{3}$`)
extWithNumberPattern = regexp.MustCompile(`\.[^ "\.]*\.\d+$`)
volPar2Pattern = regexp.MustCompile(`(?i)\.vol\d+\+\d+\.par2?$`)
partPattern = regexp.MustCompile(`(?i)\.part\d+\.[^ "\.]*$`)
regularExtPattern = regexp.MustCompile(`\.[^ "\.]*$`)
)
type PositionTracker struct {
reader io.Reader
position int64
}
func (pt *PositionTracker) Read(p []byte) (n int, err error) {
n, err = pt.reader.Read(p)
pt.position += int64(n)
return n, err
}
func (pt *PositionTracker) Position() int64 {
return pt.position
}
func (p *NZBParser) Parse(ctx context.Context, filename string, category string, content []byte) (*NZB, error) {
// Parse raw XML
raw, err := nzbparser.Parse(bytes.NewReader(content))
if err != nil {
return nil, fmt.Errorf("failed to parse NZB content: %w", err)
}
// Create base NZB structure
nzb := &NZB{
Files: []NZBFile{},
Status: "parsed",
Category: category,
Name: determineNZBName(filename, raw.Meta),
Title: raw.Meta["title"],
Password: raw.Meta["password"],
}
// Group files by base name and type
fileGroups := p.groupFiles(ctx, raw.Files)
// Process each group
files := p.processFileGroups(ctx, fileGroups, nzb.Password)
nzb.ID = generateID(nzb)
if len(files) == 0 {
return nil, fmt.Errorf("no valid files found in NZB")
}
// Calculate total size
for _, file := range files {
nzb.TotalSize += file.Size
file.NzbID = nzb.ID
nzb.Files = append(nzb.Files, file)
}
return nzb, nil
}
func (p *NZBParser) groupFiles(ctx context.Context, files nzbparser.NzbFiles) map[string]*FileGroup {
var unknownFiles []nzbparser.NzbFile
var knownFiles []struct {
file nzbparser.NzbFile
fileType FileType
}
for _, file := range files {
if len(file.Segments) == 0 {
continue
}
fileType := p.detectFileType(file.Filename)
if fileType == FileTypeUnknown {
unknownFiles = append(unknownFiles, file)
} else {
knownFiles = append(knownFiles, struct {
file nzbparser.NzbFile
fileType FileType
}{file, fileType})
}
}
p.logger.Info().
Int("known_files", len(knownFiles)).
Int("unknown_files", len(unknownFiles)).
Msg("File type detection")
unknownResults := p.batchDetectContentTypes(ctx, unknownFiles)
allFiles := make([]struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}, 0, len(knownFiles)+len(unknownResults))
// Add known files
for _, known := range knownFiles {
allFiles = append(allFiles, struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}{known.file, known.fileType, known.file.Filename})
}
// Add unknown results
allFiles = append(allFiles, unknownResults...)
return p.groupProcessedFiles(allFiles)
}
// Batch process unknown files in parallel
func (p *NZBParser) batchDetectContentTypes(ctx context.Context, unknownFiles []nzbparser.NzbFile) []struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
} {
if len(unknownFiles) == 0 {
return nil
}
// Use worker pool for parallel processing
workers := min(len(unknownFiles), 10) // Max 10 concurrent downloads
workerPool := pool.New().WithMaxGoroutines(workers).WithContext(ctx)
type result struct {
index int
file nzbparser.NzbFile
fileType FileType
actualFilename string
}
results := make([]result, len(unknownFiles))
var mu sync.Mutex
// Process each unknown file
for i, file := range unknownFiles {
i, file := i, file // Capture loop variables
workerPool.Go(func(ctx context.Context) error {
detectedType, actualFilename := p.detectFileTypeByContent(ctx, file)
mu.Lock()
results[i] = result{
index: i,
file: file,
fileType: detectedType,
actualFilename: actualFilename,
}
mu.Unlock()
return nil // Don't fail the entire batch for one file
})
}
// Wait for all to complete
if err := workerPool.Wait(); err != nil {
return nil
}
// Convert results
processedFiles := make([]struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}, 0, len(results))
for _, result := range results {
if result.fileType != FileTypeUnknown {
processedFiles = append(processedFiles, struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}{result.file, result.fileType, result.actualFilename})
}
}
return processedFiles
}
// Group already processed files (fast)
func (p *NZBParser) groupProcessedFiles(allFiles []struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}) map[string]*FileGroup {
groups := make(map[string]*FileGroup)
for _, item := range allFiles {
// Skip unwanted files
if item.fileType == FileTypeIgnore || item.fileType == FileTypeArchive {
continue
}
var groupKey string
if item.actualFilename != "" && item.actualFilename != item.file.Filename {
groupKey = p.getBaseFilename(item.actualFilename)
} else {
groupKey = item.file.Basefilename
}
group, exists := groups[groupKey]
if !exists {
group = &FileGroup{
ActualFilename: item.actualFilename,
BaseName: groupKey,
Type: item.fileType,
Files: []nzbparser.NzbFile{},
Groups: make(map[string]struct{}),
}
groups[groupKey] = group
}
// Update filename
item.file.Filename = item.actualFilename
group.Files = append(group.Files, item.file)
for _, g := range item.file.Groups {
group.Groups[g] = struct{}{}
}
}
return groups
}
func (p *NZBParser) getBaseFilename(filename string) string {
if filename == "" {
return ""
}
// First remove any quotes and trim spaces
cleaned := strings.Trim(filename, `" -`)
// Check for vol\d+\+\d+\.par2? (PAR2 volume files)
if volPar2Pattern.MatchString(cleaned) {
return volPar2Pattern.ReplaceAllString(cleaned, "")
}
// Check for part\d+\.[^ "\.]* (part files like .part01.rar)
if partPattern.MatchString(cleaned) {
return partPattern.ReplaceAllString(cleaned, "")
}
// Check for [^ "\.]*\.\d+ (extensions with numbers like .7z.001, .r01, etc.)
if extWithNumberPattern.MatchString(cleaned) {
return extWithNumberPattern.ReplaceAllString(cleaned, "")
}
// Check for regular extensions [^ "\.]*
if regularExtPattern.MatchString(cleaned) {
return regularExtPattern.ReplaceAllString(cleaned, "")
}
return cleaned
}
// Simplified file type detection
func (p *NZBParser) detectFileType(filename string) FileType {
lower := strings.ToLower(filename)
// Check for media first
if p.isMediaFile(lower) {
return FileTypeMedia
}
// Check rar next
if p.isRarFile(lower) {
return FileTypeRar
}
// Check for 7z files
if sevenZMainPattern.MatchString(lower) || sevenZPartPattern.MatchString(lower) {
return FileTypeArchive
}
if strings.HasSuffix(lower, ".zip") || strings.HasSuffix(lower, ".tar") ||
strings.HasSuffix(lower, ".gz") || strings.HasSuffix(lower, ".bz2") {
return FileTypeArchive
}
// Check for ignored file types
for _, ext := range ignoreExtensions {
if strings.HasSuffix(lower, ext) {
return FileTypeIgnore
}
}
// Default to unknown type
return FileTypeUnknown
}
// Simplified RAR detection
func (p *NZBParser) isRarFile(filename string) bool {
return rarMainPattern.MatchString(filename) ||
rarPartPattern.MatchString(filename) ||
rarVolumePattern.MatchString(filename)
}
func (p *NZBParser) isMediaFile(filename string) bool {
return utils.IsMediaFile(filename)
}
func (p *NZBParser) processFileGroups(ctx context.Context, groups map[string]*FileGroup, password string) []NZBFile {
if len(groups) == 0 {
return nil
}
// Channel to collect results
results := make(chan *NZBFile, len(groups))
var wg sync.WaitGroup
// Process each group concurrently
for _, group := range groups {
wg.Add(1)
go func(g *FileGroup) {
defer wg.Done()
file := p.processFileGroup(ctx, g, password)
results <- file // nil values are fine, we'll filter later
}(group)
}
// Close results channel when all goroutines complete
go func() {
wg.Wait()
close(results)
}()
// Collect results
var files []NZBFile
for file := range results {
if file != nil {
files = append(files, *file)
}
}
return files
}
// Simplified individual group processing
func (p *NZBParser) processFileGroup(ctx context.Context, group *FileGroup, password string) *NZBFile {
switch group.Type {
case FileTypeMedia:
return p.processMediaFile(group, password)
case FileTypeRar:
return p.processRarArchive(ctx, group, password)
case FileTypeArchive:
return nil
default:
// Treat unknown files as media files with conservative estimation
return p.processMediaFile(group, password)
}
}
// Process regular media files
func (p *NZBParser) processMediaFile(group *FileGroup, password string) *NZBFile {
if len(group.Files) == 0 {
return nil
}
// Sort files for consistent ordering
sort.Slice(group.Files, func(i, j int) bool {
return group.Files[i].Number < group.Files[j].Number
})
// Determine extension
ext := p.determineExtension(group)
file := &NZBFile{
Name: group.BaseName + ext,
Groups: p.getGroupsList(group.Groups),
Segments: []NZBSegment{},
Password: password,
IsRarArchive: false,
}
currentOffset := int64(0)
ratio := 0.968
for _, nzbFile := range group.Files {
sort.Slice(nzbFile.Segments, func(i, j int) bool {
return nzbFile.Segments[i].Number < nzbFile.Segments[j].Number
})
for _, segment := range nzbFile.Segments {
decodedSize := int64(float64(segment.Bytes) * ratio)
seg := NZBSegment{
Number: segment.Number,
MessageID: segment.Id,
Bytes: int64(segment.Bytes),
StartOffset: currentOffset,
EndOffset: currentOffset + decodedSize,
Group: file.Groups[0],
}
file.Segments = append(file.Segments, seg)
currentOffset += decodedSize
}
}
fileInfo, err := p.getFileInfo(context.Background(), group)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to get file info, using fallback")
file.Size = currentOffset
file.SegmentSize = currentOffset / int64(len(file.Segments)) // Average segment size
} else {
file.Size = fileInfo.Size
file.SegmentSize = fileInfo.ChunkSize
}
return file
}
func (p *NZBParser) processRarArchive(ctx context.Context, group *FileGroup, password string) *NZBFile {
if len(group.Files) == 0 {
return nil
}
// Sort RAR files by part number
sort.Slice(group.Files, func(i, j int) bool {
return group.Files[i].Filename < group.Files[j].Filename
})
// Try to extract RAR info during parsing for better accuracy
extractedInfo := p.extractRarInfo(ctx, group, password)
filename := group.BaseName + ".mkv" // Default extension
if extractedInfo != nil && extractedInfo.FileName != "" {
filename = extractedInfo.FileName
}
filename = utils.RemoveInvalidChars(path.Base(filename))
file := &NZBFile{
Name: filename,
Groups: p.getGroupsList(group.Groups),
Segments: []NZBSegment{},
Password: password,
IsRarArchive: true,
}
// Build segments
ratio := 0.968
currentOffset := int64(0)
for _, nzbFile := range group.Files {
sort.Slice(nzbFile.Segments, func(i, j int) bool {
return nzbFile.Segments[i].Number < nzbFile.Segments[j].Number
})
for _, segment := range nzbFile.Segments {
decodedSize := int64(float64(segment.Bytes) * ratio)
seg := NZBSegment{
Number: segment.Number,
MessageID: segment.Id,
Bytes: int64(segment.Bytes),
StartOffset: currentOffset,
EndOffset: currentOffset + decodedSize,
Group: file.Groups[0],
}
file.Segments = append(file.Segments, seg)
currentOffset += decodedSize
}
}
if extractedInfo != nil {
file.Size = extractedInfo.FileSize
file.SegmentSize = extractedInfo.SegmentSize
file.StartOffset = extractedInfo.EstimatedStartOffset
} else {
file.Size = currentOffset
file.SegmentSize = currentOffset / int64(len(file.Segments)) // Average segment size
file.StartOffset = 0 // No accurate start offset available
}
return file
}
func (p *NZBParser) getFileInfo(ctx context.Context, group *FileGroup) (*FileInfo, error) {
if len(group.Files) == 0 {
return nil, fmt.Errorf("no files in group %s", group.BaseName)
}
// Sort files
sort.Slice(group.Files, func(i, j int) bool {
return group.Files[i].Filename < group.Files[j].Filename
})
firstFile := group.Files[0]
lastFile := group.Files[len(group.Files)-1]
firstInfo, err := p.client.DownloadHeader(ctx, firstFile.Segments[0].Id)
if err != nil {
return nil, err
}
lastInfo, err := p.client.DownloadHeader(ctx, lastFile.Segments[len(lastFile.Segments)-1].Id)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to download last segment header")
return nil, err
}
chunkSize := firstInfo.End - (firstInfo.Begin - 1)
totalFileSize := (int64(len(group.Files)-1) * firstInfo.Size) + lastInfo.Size
return &FileInfo{
Size: totalFileSize,
ChunkSize: chunkSize,
Name: firstInfo.Name,
}, nil
}
func (p *NZBParser) extractRarInfo(ctx context.Context, group *FileGroup, password string) *ExtractedFileInfo {
if len(group.Files) == 0 || len(group.Files[0].Segments) == 0 {
return nil
}
firstRarFile := group.Files[0]
segmentsToDownload := min(5, len(firstRarFile.Segments))
headerBuffer, err := p.downloadRarHeaders(ctx, firstRarFile.Segments[:segmentsToDownload])
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to download RAR headers")
return nil
}
fileInfo, err := p.getFileInfo(ctx, group)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to get file info for RAR group")
return nil
}
// Pass the actual RAR size to the analysis function
return p.analyzeRarStructure(headerBuffer, password, fileInfo)
}
func (p *NZBParser) analyzeRarStructure(headerData []byte, password string, fileInfo *FileInfo) *ExtractedFileInfo {
reader := bytes.NewReader(headerData)
tracker := &PositionTracker{reader: reader, position: 0}
rarReader, err := rardecode.NewReader(tracker, rardecode.Password(password))
if err != nil {
return nil
}
for {
header, err := rarReader.Next()
if err != nil {
break
}
if !header.IsDir && p.isMediaFile(header.Name) {
compressionRatio := float64(fileInfo.Size) / float64(header.UnPackedSize)
if compressionRatio > 0.95 {
fileDataOffset := tracker.Position()
p.logger.Info().
Str("file", header.Name).
Int64("accurate_offset", fileDataOffset).
Float64("compression_ratio", compressionRatio).
Msg("Found accurate store RAR offset using position tracking")
return &ExtractedFileInfo{
FileName: header.Name,
FileSize: header.UnPackedSize,
SegmentSize: fileInfo.ChunkSize,
EstimatedStartOffset: fileDataOffset,
}
}
break
}
// Skip file content - this advances the tracker position
io.Copy(io.Discard, rarReader)
}
return nil
}
func (p *NZBParser) determineExtension(group *FileGroup) string {
// Try to determine extension from filenames
for _, file := range group.Files {
ext := filepath.Ext(file.Filename)
if ext != "" {
return ext
}
}
return ".mkv" // Default
}
func (p *NZBParser) getGroupsList(groups map[string]struct{}) []string {
result := make([]string, 0, len(groups))
for g := range groups {
result = append(result, g)
}
return result
}
// Download RAR headers from segments
func (p *NZBParser) downloadRarHeaders(ctx context.Context, segments []nzbparser.NzbSegment) ([]byte, error) {
var headerBuffer bytes.Buffer
for _, segment := range segments {
conn, cleanup, err := p.client.GetConnection(ctx)
if err != nil {
continue
}
data, err := conn.GetBody(segment.Id)
cleanup()
if err != nil {
if !nntp.IsRetryableError(err) {
return nil, err
}
continue
}
if len(data) == 0 {
continue
}
// yEnc decode
part, err := nntp.DecodeYenc(bytes.NewReader(data))
if err != nil || part == nil || len(part.Body) == 0 {
p.logger.Warn().Err(err).Str("segment_id", segment.Id).Msg("Failed to decode RAR header segment")
continue
}
headerBuffer.Write(part.Body)
// Stop if we have enough data (typically first segment is enough for headers)
if headerBuffer.Len() > 32768 { // 32KB should be plenty for RAR headers
break
}
}
if headerBuffer.Len() == 0 {
return nil, fmt.Errorf("no valid header data downloaded")
}
return headerBuffer.Bytes(), nil
}
func (p *NZBParser) detectFileTypeByContent(ctx context.Context, file nzbparser.NzbFile) (FileType, string) {
if len(file.Segments) == 0 {
return FileTypeUnknown, ""
}
// Download first segment to check file signature
firstSegment := file.Segments[0]
data, err := p.downloadFirstSegment(ctx, firstSegment)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to download first segment for content detection")
return FileTypeUnknown, ""
}
if data.Name != "" {
fileType := p.detectFileType(data.Name)
if fileType != FileTypeUnknown {
return fileType, data.Name
}
}
return p.detectFileTypeFromContent(data.Body), data.Name
}
func (p *NZBParser) detectFileTypeFromContent(data []byte) FileType {
if len(data) == 0 {
return FileTypeUnknown
}
// Check for RAR signatures (both RAR 4.x and 5.x)
if len(data) >= 7 {
// RAR 4.x signature
if bytes.Equal(data[:7], []byte("Rar!\x1A\x07\x00")) {
return FileTypeRar
}
}
if len(data) >= 8 {
// RAR 5.x signature
if bytes.Equal(data[:8], []byte("Rar!\x1A\x07\x01\x00")) {
return FileTypeRar
}
}
// Check for ZIP signature
if len(data) >= 4 && bytes.Equal(data[:4], []byte{0x50, 0x4B, 0x03, 0x04}) {
return FileTypeArchive
}
// Check for 7z signature
if len(data) >= 6 && bytes.Equal(data[:6], []byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C}) {
return FileTypeArchive
}
// Check for common media file signatures
if len(data) >= 4 {
// Matroska (MKV/WebM)
if bytes.Equal(data[:4], []byte{0x1A, 0x45, 0xDF, 0xA3}) {
return FileTypeMedia
}
// MP4/MOV (check for 'ftyp' at offset 4)
if len(data) >= 8 && bytes.Equal(data[4:8], []byte("ftyp")) {
return FileTypeMedia
}
// AVI
if len(data) >= 12 && bytes.Equal(data[:4], []byte("RIFF")) &&
bytes.Equal(data[8:12], []byte("AVI ")) {
return FileTypeMedia
}
}
// MPEG checks need more specific patterns
if len(data) >= 4 {
// MPEG-1/2 Program Stream
if bytes.Equal(data[:4], []byte{0x00, 0x00, 0x01, 0xBA}) {
return FileTypeMedia
}
// MPEG-1/2 Video Stream
if bytes.Equal(data[:4], []byte{0x00, 0x00, 0x01, 0xB3}) {
return FileTypeMedia
}
}
// Check for Transport Stream (TS files)
if len(data) >= 1 && data[0] == 0x47 {
// Additional validation for TS files
if len(data) >= 188 && data[188] == 0x47 {
return FileTypeMedia
}
}
return FileTypeUnknown
}
func (p *NZBParser) downloadFirstSegment(ctx context.Context, segment nzbparser.NzbSegment) (*yenc.Part, error) {
conn, cleanup, err := p.client.GetConnection(ctx)
if err != nil {
return nil, err
}
defer cleanup()
data, err := conn.GetBody(segment.Id)
if err != nil {
return nil, err
}
// yEnc decode
part, err := nntp.DecodeYenc(bytes.NewReader(data))
if err != nil || part == nil {
return nil, fmt.Errorf("failed to decode segment")
}
// Return both the filename and decoded data
return part, nil
}
// Calculate total archive size from all RAR parts in the group
func (p *NZBParser) calculateTotalArchiveSize(group *FileGroup) int64 {
var total int64
for _, file := range group.Files {
for _, segment := range file.Segments {
total += int64(segment.Bytes)
}
}
return total
}
func determineNZBName(filename string, meta map[string]string) string {
// Prefer filename if it exists
if filename != "" {
filename = strings.Replace(filename, filepath.Ext(filename), "", 1)
} else {
if name := meta["name"]; name != "" {
filename = name
} else if title := meta["title"]; title != "" {
filename = title
}
}
return utils.RemoveInvalidChars(filename)
}
func generateID(nzb *NZB) string {
h := sha256.New()
h.Write([]byte(nzb.Name))
h.Write([]byte(fmt.Sprintf("%d", nzb.TotalSize)))
h.Write([]byte(nzb.Category))
h.Write([]byte(nzb.Password))
return hex.EncodeToString(h.Sum(nil))[:16]
}

145
pkg/usenet/processor.go Normal file
View File

@@ -0,0 +1,145 @@
package usenet
import (
"context"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/pkg/arr"
"path/filepath"
"time"
)
// Processor handles NZB processing and download orchestration
type Processor struct {
store Store
parser *NZBParser
downloadWorker *DownloadWorker
logger zerolog.Logger
client *nntp.Client
}
// ProcessRequest represents a request to process an NZB
type ProcessRequest struct {
NZBContent []byte
Name string
Arr *arr.Arr
Action string // "download", "symlink", "none"
DownloadDir string
}
// NewProcessor creates a new usenet processor
func NewProcessor(config *config.Usenet, logger zerolog.Logger, store Store, client *nntp.Client) (*Processor, error) {
processor := &Processor{
store: store,
logger: logger.With().Str("component", "usenet-processor").Logger(),
client: client,
}
// Initialize download worker
processor.downloadWorker = NewDownloadWorker(config, client, processor)
processor.parser = NewNZBParser(client, nil, processor.logger)
return processor, nil
}
// Process processes an NZB for download/streaming
func (p *Processor) Process(ctx context.Context, req *ProcessRequest) (*NZB, error) {
if len(req.NZBContent) == 0 {
return nil, fmt.Errorf("NZB content is empty")
}
// Validate NZB content
if err := ValidateNZB(req.NZBContent); err != nil {
return nil, fmt.Errorf("invalid NZB content: %w", err)
}
nzb, err := p.process(ctx, req)
if err != nil {
p.logger.Error().
Err(err).
Msg("Failed to process NZB content")
return nil, fmt.Errorf("failed to process NZB content: %w", err)
}
return nzb, nil
}
func (p *Processor) process(ctx context.Context, req *ProcessRequest) (*NZB, error) {
nzb, err := p.parser.Parse(ctx, req.Name, req.Arr.Name, req.NZBContent)
if err != nil {
p.logger.Error().
Err(err).
Msg("Failed to parse NZB content")
return nil, fmt.Errorf("failed to parse NZB content: %w", err)
}
if nzb == nil {
p.logger.Error().
Msg("Parsed NZB is nil")
return nil, fmt.Errorf("parsed NZB is nil")
}
p.logger.Info().
Str("nzb_id", nzb.ID).
Msg("Successfully parsed NZB content")
if existing := p.store.Get(nzb.ID); existing != nil {
p.logger.Info().Str("nzb_id", nzb.ID).Msg("NZB already exists")
return existing, nil
}
p.logger.Info().
Str("nzb_id", nzb.ID).
Msg("Creating new NZB download job")
downloadDir := req.DownloadDir
if req.Arr != nil {
downloadDir = filepath.Join(downloadDir, req.Arr.Name)
}
job := &DownloadJob{
NZB: nzb,
Action: req.Action,
DownloadDir: downloadDir,
Callback: func(completedNZB *NZB, err error) {
if err != nil {
p.logger.Error().
Err(err).
Str("nzb_id", completedNZB.ID).
Msg("Download job failed")
return
}
p.logger.Info().
Str("nzb_id", completedNZB.ID).
Msg("Download job completed successfully")
},
}
// Check availability before submitting the job
//if err := p.downloadWorker.CheckAvailability(ctx, job); err != nil {
// p.logger.Error().
// Err(err).
// Str("nzb_id", nzb.ID).
// Msg("NZB availability check failed")
// return nil, fmt.Errorf("availability check failed for NZB %s: %w", nzb.ID, err)
//}
// Mark NZB as downloaded but not completed
nzb.Downloaded = true
nzb.AddedOn = time.Now()
p.store.AddToQueue(nzb)
if err := p.store.Add(nzb); err != nil {
return nil, err
} // Add the downloaded NZB to the store asynchronously
p.logger.Info().
Str("nzb_id", nzb.ID).
Msg("NZB added to queue")
go func() {
if err := p.downloadWorker.Process(ctx, job); err != nil {
p.logger.Error().
Err(err).
Str("nzb_id", nzb.ID).
Msg("Failed to submit download job")
}
}()
return nzb, nil
}

336
pkg/usenet/rar.go Normal file
View File

@@ -0,0 +1,336 @@
package usenet
import (
"bytes"
"context"
"fmt"
"github.com/nwaples/rardecode/v2"
"github.com/sirrobot01/decypharr/internal/utils"
"io"
"strings"
"time"
)
type RarParser struct {
streamer *Streamer
}
func NewRarParser(s *Streamer) *RarParser {
return &RarParser{streamer: s}
}
func (p *RarParser) ExtractFileRange(ctx context.Context, file *NZBFile, password string, start, end int64, writer io.Writer) error {
info, err := p.getFileInfo(ctx, file, password)
if err != nil {
return fmt.Errorf("failed to get file info: %w", err)
}
requiredSegments := p.calculateSmartSegmentRanges(file, info, start, end)
return p.extract(ctx, requiredSegments, password, info.FileName, start, end, writer)
}
func (p *RarParser) calculateSmartSegmentRanges(file *NZBFile, fileInfo *ExtractedFileInfo, start, end int64) []SegmentRange {
totalSegments := len(file.Segments)
// For store compression, be more conservative with seeking
compressionOverhead := 1.1 // Increase to 10% overhead
estimatedArchiveStart := int64(float64(start) * compressionOverhead)
estimatedArchiveEnd := int64(float64(end) * compressionOverhead)
startSegmentIndex := int(float64(estimatedArchiveStart) / float64(fileInfo.ArchiveSize) * float64(totalSegments))
endSegmentIndex := int(float64(estimatedArchiveEnd) / float64(fileInfo.ArchiveSize) * float64(totalSegments))
// More conservative buffers for seeking
if start > 0 {
// For seeking, always include more context
headerBuffer := min(10, startSegmentIndex) // Up to 10 segments back
startSegmentIndex = max(0, startSegmentIndex-headerBuffer)
} else {
startSegmentIndex = 0
}
// Larger end buffer for segment boundaries and RAR footer
endBuffer := 10 + int(float64(totalSegments)*0.02) // 2% of total segments as buffer
endSegmentIndex = min(totalSegments-1, endSegmentIndex+endBuffer)
// Ensure minimum segment count for seeking
minSegmentsForSeek := 20
if endSegmentIndex-startSegmentIndex < minSegmentsForSeek {
endSegmentIndex = min(totalSegments-1, startSegmentIndex+minSegmentsForSeek)
}
return convertSegmentIndicesToRanges(file, startSegmentIndex, endSegmentIndex)
}
func (p *RarParser) extract(ctx context.Context, segmentRanges []SegmentRange, password, targetFileName string, start, end int64, writer io.Writer) error {
pipeReader, pipeWriter := io.Pipe()
extractionErr := make(chan error, 1)
streamingErr := make(chan error, 1)
// RAR extraction goroutine
go func() {
defer func() {
pipeReader.Close()
if r := recover(); r != nil {
extractionErr <- fmt.Errorf("extraction panic: %v", r)
}
}()
rarReader, err := rardecode.NewReader(pipeReader, rardecode.Password(password))
if err != nil {
extractionErr <- fmt.Errorf("failed to create RAR reader: %w", err)
return
}
found := false
for {
select {
case <-ctx.Done():
extractionErr <- ctx.Err()
return
default:
}
header, err := rarReader.Next()
if err == io.EOF {
if !found {
extractionErr <- fmt.Errorf("target file %s not found in downloaded segments", targetFileName)
} else {
extractionErr <- fmt.Errorf("reached EOF before completing range extraction")
}
return
}
if err != nil {
extractionErr <- fmt.Errorf("failed to read RAR header: %w", err)
return
}
if header.Name == targetFileName || utils.IsMediaFile(header.Name) {
found = true
err = p.extractRangeFromReader(ctx, rarReader, start, end, writer)
extractionErr <- err
return
} else if !header.IsDir {
err = p.skipFileEfficiently(ctx, rarReader)
if err != nil && ctx.Err() == nil {
extractionErr <- fmt.Errorf("failed to skip file %s: %w", header.Name, err)
return
}
}
}
}()
// Streaming goroutine
go func() {
defer pipeWriter.Close()
err := p.streamer.stream(ctx, segmentRanges, pipeWriter)
streamingErr <- err
}()
// Wait with longer timeout for seeking operations
select {
case err := <-extractionErr:
return err
case err := <-streamingErr:
if err != nil && !p.isSkippableError(err) {
return fmt.Errorf("segment streaming failed: %w", err)
}
// Longer timeout for seeking operations
select {
case err := <-extractionErr:
return err
case <-time.After(30 * time.Second): // Increased from 5 seconds
return fmt.Errorf("extraction timeout after 30 seconds")
}
case <-ctx.Done():
return ctx.Err()
}
}
func (p *RarParser) extractRangeFromReader(ctx context.Context, reader io.Reader, start, end int64, writer io.Writer) error {
// Skip to start position efficiently
if start > 0 {
skipped, err := p.smartSkip(ctx, reader, start)
if err != nil {
return fmt.Errorf("failed to skip to position %d (skipped %d): %w", start, skipped, err)
}
}
// Copy requested range
bytesToCopy := end - start + 1
copied, err := p.smartCopy(ctx, writer, reader, bytesToCopy)
if err != nil && err != io.EOF {
return fmt.Errorf("failed to copy range (copied %d/%d): %w", copied, bytesToCopy, err)
}
return nil
}
func (p *RarParser) smartSkip(ctx context.Context, reader io.Reader, bytesToSkip int64) (int64, error) {
const skipBufferSize = 64 * 1024 // Larger buffer for skipping
buffer := make([]byte, skipBufferSize)
var totalSkipped int64
for totalSkipped < bytesToSkip {
select {
case <-ctx.Done():
return totalSkipped, ctx.Err()
default:
}
toRead := skipBufferSize
if remaining := bytesToSkip - totalSkipped; remaining < int64(toRead) {
toRead = int(remaining)
}
n, err := reader.Read(buffer[:toRead])
if n > 0 {
totalSkipped += int64(n)
}
if err != nil {
if err == io.EOF {
break
}
return totalSkipped, err
}
}
return totalSkipped, nil
}
func (p *RarParser) smartCopy(ctx context.Context, dst io.Writer, src io.Reader, bytesToCopy int64) (int64, error) {
const copyBufferSize = 32 * 1024
buffer := make([]byte, copyBufferSize)
var totalCopied int64
for totalCopied < bytesToCopy {
select {
case <-ctx.Done():
return totalCopied, ctx.Err()
default:
}
toRead := copyBufferSize
if remaining := bytesToCopy - totalCopied; remaining < int64(toRead) {
toRead = int(remaining)
}
n, err := src.Read(buffer[:toRead])
if n > 0 {
written, writeErr := dst.Write(buffer[:n])
if writeErr != nil {
return totalCopied, writeErr
}
totalCopied += int64(written)
}
if err != nil {
if err == io.EOF {
break
}
return totalCopied, err
}
}
return totalCopied, nil
}
func (p *RarParser) skipFileEfficiently(ctx context.Context, reader io.Reader) error {
_, err := p.smartSkip(ctx, reader, 1<<62) // Very large number
if err == io.EOF {
return nil // EOF is expected when skipping
}
return err
}
func (p *RarParser) getFileInfo(ctx context.Context, file *NZBFile, password string) (*ExtractedFileInfo, error) {
headerSegments := p.getMinimalHeaders(file)
var headerBuffer bytes.Buffer
err := p.streamer.stream(ctx, headerSegments, &headerBuffer)
if err != nil {
return nil, fmt.Errorf("failed to download headers: %w", err)
}
reader := bytes.NewReader(headerBuffer.Bytes())
rarReader, err := rardecode.NewReader(reader, rardecode.Password(password))
if err != nil {
return nil, fmt.Errorf("failed to create RAR reader (check password): %w", err)
}
totalArchiveSize := p.calculateTotalSize(file.SegmentSize, file.Segments)
for {
header, err := rarReader.Next()
if err == io.EOF {
break
}
if err != nil {
continue
}
if !header.IsDir && utils.IsMediaFile(header.Name) {
return &ExtractedFileInfo{
FileName: header.Name,
FileSize: header.UnPackedSize,
ArchiveSize: totalArchiveSize,
}, nil
}
}
return nil, fmt.Errorf("no media file found in RAR archive")
}
func (p *RarParser) getMinimalHeaders(file *NZBFile) []SegmentRange {
headerCount := min(len(file.Segments), 4) // Minimal for password+headers
return file.ConvertToSegmentRanges(file.Segments[:headerCount])
}
func (p *RarParser) calculateTotalSize(segmentSize int64, segments []NZBSegment) int64 {
total := int64(0)
for i, seg := range segments {
if segmentSize <= 0 {
segmentSize = seg.Bytes // Fallback to actual segment size if not set
}
if i == len(segments)-1 {
segmentSize = seg.Bytes // Last segment uses actual size
}
total += segmentSize
}
return total
}
func (p *RarParser) isSkippableError(err error) bool {
if err == nil {
return true
}
errStr := err.Error()
return strings.Contains(errStr, "client disconnected") ||
strings.Contains(errStr, "broken pipe") ||
strings.Contains(errStr, "connection reset")
}
func convertSegmentIndicesToRanges(file *NZBFile, startIndex, endIndex int) []SegmentRange {
var segmentRanges []SegmentRange
for i := startIndex; i <= endIndex && i < len(file.Segments); i++ {
segment := file.Segments[i]
// For RAR files, we want the entire segment (no partial byte ranges)
segmentRange := SegmentRange{
Segment: segment,
ByteStart: 0, // Always start at beginning of segment
ByteEnd: segment.Bytes - 1, // Always go to end of segment
TotalStart: 0, // Not used for this approach
TotalEnd: segment.Bytes - 1, // Not used for this approach
}
segmentRanges = append(segmentRanges, segmentRange)
}
return segmentRanges
}

619
pkg/usenet/store.go Normal file
View File

@@ -0,0 +1,619 @@
package usenet
import (
"context"
"encoding/json"
"fmt"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sourcegraph/conc/pool"
"io"
"io/fs"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
)
type fileInfo struct {
id string
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
}
func (fi *fileInfo) Name() string { return fi.name }
func (fi *fileInfo) Size() int64 { return fi.size }
func (fi *fileInfo) Mode() os.FileMode { return fi.mode }
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
func (fi *fileInfo) IsDir() bool { return fi.isDir }
func (fi *fileInfo) ID() string { return fi.id }
func (fi *fileInfo) Sys() interface{} { return nil }
type Store interface {
Add(nzb *NZB) error
Get(nzoID string) *NZB
GetByName(name string) *NZB
Update(nzb *NZB) error
UpdateFile(nzoID string, file *NZBFile) error
Delete(nzoID string) error
Count() int
Filter(category string, limit int, status ...string) []*NZB
GetHistory(category string, limit int) []*NZB
UpdateStatus(nzoID string, status string) error
Close() error
GetListing(folder string) []os.FileInfo
Load() error
// GetQueueItem Queue management
GetQueueItem(nzoID string) *NZB
AddToQueue(nzb *NZB)
RemoveFromQueue(nzoID string)
GetQueue() []*NZB
AtomicDelete(nzoID string) error
RemoveFile(nzoID string, filename string) error
MarkAsCompleted(nzoID string, storage string) error
}
type store struct {
storePath string
listing atomic.Value
badListing atomic.Value
queue *xsync.Map[string, *NZB]
titles *xsync.Map[string, string] // title -> nzoID
config *config.Usenet
logger zerolog.Logger
}
func NewStore(config *config.Config, logger zerolog.Logger) Store {
err := os.MkdirAll(config.NZBsPath(), 0755)
if err != nil {
return nil
}
s := &store{
storePath: config.NZBsPath(),
queue: xsync.NewMap[string, *NZB](),
titles: xsync.NewMap[string, string](),
config: config.Usenet,
logger: logger,
}
return s
}
func (ns *store) Load() error {
ids, err := ns.getAllIDs()
if err != nil {
return err
}
listing := make([]os.FileInfo, 0)
badListing := make([]os.FileInfo, 0)
for _, id := range ids {
nzb, err := ns.loadFromFile(id)
if err != nil {
continue // Skip if file cannot be loaded
}
ns.titles.Store(nzb.Name, nzb.ID)
fileInfo := &fileInfo{
id: nzb.ID,
name: nzb.Name,
size: nzb.TotalSize,
mode: 0644,
modTime: nzb.AddedOn,
isDir: true,
}
listing = append(listing, fileInfo)
if nzb.IsBad {
badListing = append(badListing, fileInfo)
}
}
ns.listing.Store(listing)
ns.badListing.Store(badListing)
return nil
}
// getFilePath returns the file path for an NZB
func (ns *store) getFilePath(nzoID string) string {
return filepath.Join(ns.storePath, nzoID+".json")
}
func (ns *store) loadFromFile(nzoID string) (*NZB, error) {
filePath := ns.getFilePath(nzoID)
data, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
var compact CompactNZB
if err := json.Unmarshal(data, &compact); err != nil {
return nil, err
}
return compact.toNZB(), nil
}
// saveToFile saves an NZB to file
func (ns *store) saveToFile(nzb *NZB) error {
filePath := ns.getFilePath(nzb.ID)
// Ensure directory exists
dir := filepath.Dir(filePath)
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
compact := nzb.toCompact()
data, err := json.Marshal(compact) // Use compact JSON
if err != nil {
return err
}
return os.WriteFile(filePath, data, 0644)
}
func (ns *store) refreshListing() error {
ids, err := ns.getAllIDs()
if err != nil {
return err
}
listing := make([]os.FileInfo, 0, len(ids))
badListing := make([]os.FileInfo, 0, len(ids))
for _, id := range ids {
nzb, err := ns.loadFromFile(id)
if err != nil {
continue // Skip if file cannot be loaded
}
fileInfo := &fileInfo{
id: nzb.ID,
name: nzb.Name,
size: nzb.TotalSize,
mode: 0644,
modTime: nzb.AddedOn,
isDir: true,
}
listing = append(listing, fileInfo)
ns.titles.Store(nzb.Name, nzb.ID)
if nzb.IsBad {
badListing = append(badListing, fileInfo)
}
}
// Update all structures atomically
ns.listing.Store(listing)
ns.badListing.Store(badListing)
// Refresh rclone if configured
go func() {
if err := ns.refreshRclone(); err != nil {
ns.logger.Error().Err(err).Msg("Failed to refresh rclone")
}
}()
return nil
}
func (ns *store) Add(nzb *NZB) error {
if nzb == nil {
return fmt.Errorf("nzb cannot be nil")
}
if err := ns.saveToFile(nzb); err != nil {
return err
}
ns.titles.Store(nzb.Name, nzb.ID)
go func() {
_ = ns.refreshListing()
}()
return nil
}
func (ns *store) GetByName(name string) *NZB {
if nzoID, exists := ns.titles.Load(name); exists {
return ns.Get(nzoID)
}
return nil
}
func (ns *store) GetQueueItem(nzoID string) *NZB {
if item, exists := ns.queue.Load(nzoID); exists {
return item
}
return nil
}
func (ns *store) AddToQueue(nzb *NZB) {
if nzb == nil {
return
}
ns.queue.Store(nzb.ID, nzb)
}
func (ns *store) RemoveFromQueue(nzoID string) {
if nzoID == "" {
return
}
ns.queue.Delete(nzoID)
}
func (ns *store) GetQueue() []*NZB {
var queueItems []*NZB
ns.queue.Range(func(_ string, value *NZB) bool {
queueItems = append(queueItems, value)
return true // continue iteration
})
return queueItems
}
func (ns *store) Get(nzoID string) *NZB {
nzb, err := ns.loadFromFile(nzoID)
if err != nil {
return nil
}
return nzb
}
func (ns *store) Update(nzb *NZB) error {
if err := ns.saveToFile(nzb); err != nil {
return err
}
return nil
}
func (ns *store) Delete(nzoID string) error {
return ns.AtomicDelete(nzoID)
}
// AtomicDelete performs an atomic delete operation across all data structures
func (ns *store) AtomicDelete(nzoID string) error {
if nzoID == "" {
return fmt.Errorf("nzoID cannot be empty")
}
filePath := ns.getFilePath(nzoID)
// Get NZB info before deletion for cleanup
nzb := ns.Get(nzoID)
if nzb == nil {
// Check if file exists on disk even if not in cache
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return nil // Already deleted
}
}
ns.queue.Delete(nzoID)
if nzb != nil {
ns.titles.Delete(nzb.Name)
}
if currentListing := ns.listing.Load(); currentListing != nil {
oldListing := currentListing.([]os.FileInfo)
newListing := make([]os.FileInfo, 0, len(oldListing))
for _, fi := range oldListing {
if fileInfo, ok := fi.(*fileInfo); ok && fileInfo.id != nzoID {
newListing = append(newListing, fi)
}
}
ns.listing.Store(newListing)
}
if currentListing := ns.badListing.Load(); currentListing != nil {
oldListing := currentListing.([]os.FileInfo)
newListing := make([]os.FileInfo, 0, len(oldListing))
for _, fi := range oldListing {
if fileInfo, ok := fi.(*fileInfo); ok && fileInfo.id != nzoID {
newListing = append(newListing, fi)
}
}
ns.badListing.Store(newListing)
}
// Remove file from disk
return os.Remove(filePath)
}
func (ns *store) RemoveFile(nzoID string, filename string) error {
if nzoID == "" || filename == "" {
return fmt.Errorf("nzoID and filename cannot be empty")
}
nzb := ns.Get(nzoID)
if nzb == nil {
return fmt.Errorf("nzb with nzoID %s not found", nzoID)
}
err := nzb.MarkFileAsRemoved(filename)
if err != nil {
return err
}
if err := ns.Update(nzb); err != nil {
return fmt.Errorf("failed to update nzb after removing file %s: %w", filename, err)
}
// Refresh listing after file removal
_ = ns.refreshListing()
// Remove file from rclone cache if configured
return nil
}
func (ns *store) getAllIDs() ([]string, error) {
var ids []string
err := filepath.WalkDir(ns.storePath, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.IsDir() && strings.HasSuffix(d.Name(), ".json") {
id := strings.TrimSuffix(d.Name(), ".json")
ids = append(ids, id)
}
return nil
})
return ids, err
}
func (ns *store) Filter(category string, limit int, status ...string) []*NZB {
ids, err := ns.getAllIDs()
if err != nil {
return nil
}
statusSet := make(map[string]struct{})
for _, s := range status {
statusSet[s] = struct{}{}
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
p := pool.New().WithContext(ctx).WithMaxGoroutines(10)
var results []*NZB
var mu sync.Mutex
var found atomic.Int32
for _, id := range ids {
id := id
p.Go(func(ctx context.Context) error {
// Early exit if limit reached
if limit > 0 && found.Load() >= int32(limit) {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
default:
nzb := ns.Get(id)
if nzb == nil {
return nil
}
// Apply filters
if category != "" && nzb.Category != category {
return nil
}
if len(statusSet) > 0 {
if _, exists := statusSet[nzb.Status]; !exists {
return nil
}
}
// Add to results with limit check
mu.Lock()
if limit == 0 || len(results) < limit {
results = append(results, nzb)
found.Add(1)
// Cancel if we hit the limit
if limit > 0 && len(results) >= limit {
cancel()
}
}
mu.Unlock()
return nil
}
})
}
if err := p.Wait(); err != nil {
return nil
}
return results
}
func (ns *store) Count() int {
ids, err := ns.getAllIDs()
if err != nil {
return 0
}
return len(ids)
}
func (ns *store) GetHistory(category string, limit int) []*NZB {
return ns.Filter(category, limit, "completed", "failed", "error")
}
func (ns *store) UpdateStatus(nzoID string, status string) error {
nzb := ns.Get(nzoID)
if nzb == nil {
return fmt.Errorf("nzb with nzoID %s not found", nzoID)
}
nzb.Status = status
nzb.LastActivity = time.Now()
if status == "completed" {
nzb.CompletedOn = time.Now()
nzb.Progress = 100
nzb.Percentage = 100
}
if status == "failed" {
// Remove from cache if failed
err := ns.Delete(nzb.ID)
if err != nil {
return err
}
}
return ns.Update(nzb)
}
func (ns *store) Close() error {
// Clear cache
ns.queue = xsync.NewMap[string, *NZB]()
// Clear listings
ns.listing = atomic.Value{}
ns.badListing = atomic.Value{}
// Clear titles
ns.titles = xsync.NewMap[string, string]()
return nil
}
func (ns *store) UpdateFile(nzoID string, file *NZBFile) error {
if nzoID == "" || file == nil {
return fmt.Errorf("nzoID and file cannot be empty")
}
nzb := ns.Get(nzoID)
if nzb == nil {
return fmt.Errorf("nzb with nzoID %s not found", nzoID)
}
// Update file in NZB
for i, f := range nzb.Files {
if f.Name == file.Name {
nzb.Files[i] = *file
break
}
}
if err := ns.Update(nzb); err != nil {
return fmt.Errorf("failed to update nzb after updating file %s: %w", file.Name, err)
}
// Refresh listing after file update
return ns.refreshListing()
}
func (ns *store) GetListing(folder string) []os.FileInfo {
switch folder {
case "__bad__":
if badListing, ok := ns.badListing.Load().([]os.FileInfo); ok {
return badListing
}
return []os.FileInfo{}
default:
if listing, ok := ns.listing.Load().([]os.FileInfo); ok {
return listing
}
return []os.FileInfo{}
}
}
func (ns *store) MarkAsCompleted(nzoID string, storage string) error {
if nzoID == "" {
return fmt.Errorf("nzoID cannot be empty")
}
// Get NZB from queue
queueNZB := ns.GetQueueItem(nzoID)
if queueNZB == nil {
return fmt.Errorf("NZB %s not found in queue", nzoID)
}
// Update NZB status
queueNZB.Status = "completed"
queueNZB.Storage = storage
queueNZB.CompletedOn = time.Now()
queueNZB.LastActivity = time.Now()
queueNZB.Progress = 100
queueNZB.Percentage = 100
// Atomically: remove from queue and add to storage
ns.queue.Delete(nzoID)
if err := ns.Add(queueNZB); err != nil {
// Rollback: add back to queue if storage fails
ns.queue.Store(nzoID, queueNZB)
return fmt.Errorf("failed to store completed NZB: %w", err)
}
return nil
}
func (ns *store) refreshRclone() error {
if ns.config.RcUrl == "" {
return nil
}
client := http.DefaultClient
// Create form data
data := ns.buildRcloneRequestData()
if err := ns.sendRcloneRequest(client, "vfs/forget", data); err != nil {
ns.logger.Error().Err(err).Msg("Failed to send rclone vfs/forget request")
}
if err := ns.sendRcloneRequest(client, "vfs/refresh", data); err != nil {
ns.logger.Error().Err(err).Msg("Failed to send rclone vfs/refresh request")
}
return nil
}
func (ns *store) buildRcloneRequestData() string {
return "dir=__all__"
}
func (ns *store) sendRcloneRequest(client *http.Client, endpoint, data string) error {
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", ns.config.RcUrl, endpoint), strings.NewReader(data))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if ns.config.RcUser != "" && ns.config.RcPass != "" {
req.SetBasicAuth(ns.config.RcUser, ns.config.RcPass)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
ns.logger.Error().Err(err).Msg("Failed to close response body")
}
}(resp.Body)
if resp.StatusCode != 200 {
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
}
_, _ = io.Copy(io.Discard, resp.Body)
return nil
}

383
pkg/usenet/stream.go Normal file
View File

@@ -0,0 +1,383 @@
package usenet
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/chrisfarms/yenc"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/nntp"
"io"
"net/http"
"sync"
"time"
)
var groupCache = sync.Map{}
type Streamer struct {
logger zerolog.Logger
client *nntp.Client
store Store
cache *SegmentCache
chunkSize int
maxRetries int
retryDelayMs int
}
type segmentResult struct {
index int
data []byte
err error
}
type FlushingWriter struct {
writer io.Writer
}
func (fw *FlushingWriter) Write(data []byte) (int, error) {
if len(data) == 0 {
return 0, nil
}
written, err := fw.writer.Write(data)
if err != nil {
return written, err
}
if written != len(data) {
return written, io.ErrShortWrite
}
// Auto-flush if possible
if flusher, ok := fw.writer.(http.Flusher); ok {
flusher.Flush()
}
return written, nil
}
func (fw *FlushingWriter) WriteAndFlush(data []byte) (int64, error) {
if len(data) == 0 {
return 0, nil
}
written, err := fw.Write(data)
return int64(written), err
}
func (fw *FlushingWriter) WriteString(s string) (int, error) {
return fw.Write([]byte(s))
}
func (fw *FlushingWriter) WriteBytes(data []byte) (int, error) {
return fw.Write(data)
}
func NewStreamer(client *nntp.Client, cache *SegmentCache, store Store, chunkSize int, logger zerolog.Logger) *Streamer {
return &Streamer{
logger: logger.With().Str("component", "streamer").Logger(),
cache: cache,
store: store,
client: client,
chunkSize: chunkSize,
maxRetries: 3,
retryDelayMs: 2000,
}
}
func (s *Streamer) Stream(ctx context.Context, file *NZBFile, start, end int64, writer io.Writer) error {
if file == nil {
return fmt.Errorf("file cannot be nil")
}
if start < 0 {
start = 0
}
if err := s.getSegmentSize(ctx, file); err != nil {
return fmt.Errorf("failed to get segment size: %w", err)
}
if file.IsRarArchive {
return s.streamRarExtracted(ctx, file, start, end, writer)
}
if end >= file.Size {
end = file.Size - 1
}
if start > end {
return fmt.Errorf("invalid range: start=%d > end=%d", start, end)
}
ranges := file.GetSegmentsInRange(file.SegmentSize, start, end)
if len(ranges) == 0 {
return fmt.Errorf("no segments found for range [%d, %d]", start, end)
}
writer = &FlushingWriter{writer: writer}
return s.stream(ctx, ranges, writer)
}
func (s *Streamer) streamRarExtracted(ctx context.Context, file *NZBFile, start, end int64, writer io.Writer) error {
parser := NewRarParser(s)
return parser.ExtractFileRange(ctx, file, file.Password, start, end, writer)
}
func (s *Streamer) stream(ctx context.Context, ranges []SegmentRange, writer io.Writer) error {
chunkSize := s.chunkSize
for i := 0; i < len(ranges); i += chunkSize {
end := min(i+chunkSize, len(ranges))
chunk := ranges[i:end]
// Download chunk concurrently
results := make([]segmentResult, len(chunk))
var wg sync.WaitGroup
for j, segRange := range chunk {
wg.Add(1)
go func(idx int, sr SegmentRange) {
defer wg.Done()
data, err := s.processSegment(ctx, sr)
results[idx] = segmentResult{index: idx, data: data, err: err}
}(j, segRange)
}
wg.Wait()
// Write chunk sequentially
for j, result := range results {
if result.err != nil {
return fmt.Errorf("segment %d failed: %w", i+j, result.err)
}
if len(result.data) > 0 {
_, err := writer.Write(result.data)
if err != nil {
return err
}
}
}
}
return nil
}
func (s *Streamer) processSegment(ctx context.Context, segRange SegmentRange) ([]byte, error) {
segment := segRange.Segment
// Try cache first
if s.cache != nil {
if cached, found := s.cache.Get(segment.MessageID); found {
return s.extractRangeFromSegment(cached.Data, segRange)
}
}
// Download with retries
decodedData, err := s.downloadSegmentWithRetry(ctx, segment)
if err != nil {
return nil, fmt.Errorf("download failed: %w", err)
}
// Cache full segment for future seeks
if s.cache != nil {
s.cache.Put(segment.MessageID, decodedData, segment.Bytes)
}
// Extract the specific range from this segment
return s.extractRangeFromSegment(decodedData.Body, segRange)
}
func (s *Streamer) extractRangeFromSegment(data []byte, segRange SegmentRange) ([]byte, error) {
// Use the segment range's pre-calculated offsets
startOffset := segRange.ByteStart
endOffset := segRange.ByteEnd + 1 // ByteEnd is inclusive, we need exclusive for slicing
// Bounds check
if startOffset < 0 || startOffset >= int64(len(data)) {
return []byte{}, nil
}
if endOffset > int64(len(data)) {
endOffset = int64(len(data))
}
if startOffset >= endOffset {
return []byte{}, nil
}
// Extract the range
result := make([]byte, endOffset-startOffset)
copy(result, data[startOffset:endOffset])
return result, nil
}
func (s *Streamer) downloadSegmentWithRetry(ctx context.Context, segment NZBSegment) (*yenc.Part, error) {
var lastErr error
for attempt := 0; attempt < s.maxRetries; attempt++ {
// Check cancellation before each retry
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if attempt > 0 {
delay := time.Duration(s.retryDelayMs*(1<<(attempt-1))) * time.Millisecond
if delay > 5*time.Second {
delay = 5 * time.Second
}
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(delay):
}
}
data, err := s.downloadSegment(ctx, segment)
if err == nil {
return data, nil
}
lastErr = err
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
}
}
return nil, fmt.Errorf("segment download failed after %d attempts: %w", s.maxRetries, lastErr)
}
// Updated to work with NZBSegment from SegmentRange
func (s *Streamer) downloadSegment(ctx context.Context, segment NZBSegment) (*yenc.Part, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
downloadCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
conn, cleanup, err := s.client.GetConnection(downloadCtx)
if err != nil {
return nil, err
}
defer cleanup()
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if segment.Group != "" {
if _, exists := groupCache.Load(segment.Group); !exists {
if _, err := conn.SelectGroup(segment.Group); err != nil {
return nil, fmt.Errorf("failed to select group %s: %w", segment.Group, err)
}
groupCache.Store(segment.Group, true)
}
}
body, err := conn.GetBody(segment.MessageID)
if err != nil {
return nil, fmt.Errorf("failed to get body for message %s: %w", segment.MessageID, err)
}
if body == nil || len(body) == 0 {
return nil, fmt.Errorf("no body found for message %s", segment.MessageID)
}
data, err := nntp.DecodeYenc(bytes.NewReader(body))
if err != nil {
return nil, fmt.Errorf("failed to decode yEnc: %w", err)
}
// Adjust begin offset
data.Begin -= 1
return data, nil
}
func (s *Streamer) copySegmentData(writer io.Writer, data []byte) (int64, error) {
if len(data) == 0 {
return 0, nil
}
reader := bytes.NewReader(data)
written, err := io.CopyN(writer, reader, int64(len(data)))
if err != nil {
return 0, fmt.Errorf("copyN failed %w", err)
}
if written != int64(len(data)) {
return 0, fmt.Errorf("expected to copy %d bytes, only copied %d", len(data), written)
}
if fl, ok := writer.(http.Flusher); ok {
fl.Flush()
}
return written, nil
}
func (s *Streamer) extractRangeWithGapHandling(data []byte, segStart, segEnd int64, globalStart, globalEnd int64) ([]byte, error) {
// Calculate intersection using actual bounds
intersectionStart := max(segStart, globalStart)
intersectionEnd := min(segEnd, globalEnd+1) // +1 because globalEnd is inclusive
// No overlap
if intersectionStart >= intersectionEnd {
return []byte{}, nil
}
// Calculate offsets within the actual data
offsetInData := intersectionStart - segStart
dataLength := intersectionEnd - intersectionStart
// Bounds check
if offsetInData < 0 || offsetInData >= int64(len(data)) {
return []byte{}, nil
}
endOffset := offsetInData + dataLength
if endOffset > int64(len(data)) {
endOffset = int64(len(data))
dataLength = endOffset - offsetInData
}
if dataLength <= 0 {
return []byte{}, nil
}
// Extract the range
result := make([]byte, dataLength)
copy(result, data[offsetInData:endOffset])
return result, nil
}
func (s *Streamer) getSegmentSize(ctx context.Context, file *NZBFile) error {
if file.SegmentSize > 0 {
return nil
}
if len(file.Segments) == 0 {
return fmt.Errorf("no segments available for file %s", file.Name)
}
// Fetch the segment size and then store it in the file
firstSegment := file.Segments[0]
firstInfo, err := s.client.DownloadHeader(ctx, firstSegment.MessageID)
if err != nil {
return err
}
chunkSize := firstInfo.End - (firstInfo.Begin - 1)
if chunkSize <= 0 {
return fmt.Errorf("invalid segment size for file %s: %d", file.Name, chunkSize)
}
file.SegmentSize = chunkSize
return s.store.UpdateFile(file.NzbID, file)
}

239
pkg/usenet/types.go Normal file
View File

@@ -0,0 +1,239 @@
package usenet
import "time"
// NZB represents a torrent-like structure for NZB files
type NZB struct {
ID string `json:"id"`
Name string `json:"name"`
Title string `json:"title,omitempty"`
TotalSize int64 `json:"total_size"`
DatePosted time.Time `json:"date_posted"`
Category string `json:"category"`
Groups []string `json:"groups"`
Files []NZBFile `json:"files"`
Downloaded bool `json:"downloaded"` // Whether the NZB has been downloaded
StreamingInfo StreamingInfo `json:"streaming_info"`
AddedOn time.Time `json:"added_on"` // When the NZB was added to the system
LastActivity time.Time `json:"last_activity"` // Last activity timestamp
Status string `json:"status"` // "queued", "downloading", "completed", "failed"
Progress float64 `json:"progress"` // Percentage of download completion
Percentage float64 `json:"percentage"` // Percentage of download completion
SizeDownloaded int64 `json:"size_downloaded"` // Total size downloaded so far
ETA int64 `json:"eta"` // Estimated time of arrival in seconds
Speed int64 `json:"speed"` // Download speed in bytes per second
CompletedOn time.Time `json:"completed_on"` // When the NZB was completed
IsBad bool `json:"is_bad"`
Storage string `json:"storage"`
FailMessage string `json:"fail_message,omitempty"` // Error message if the download failed
Password string `json:"-,omitempty"` // Password for encrypted RAR files
}
// StreamingInfo contains metadata for streaming capabilities
type StreamingInfo struct {
IsStreamable bool `json:"is_streamable"`
MainFileIndex int `json:"main_file_index"` // Index of the main media file
HasParFiles bool `json:"has_par_files"`
HasRarFiles bool `json:"has_rar_files"`
TotalSegments int `json:"total_segments"`
EstimatedTime int64 `json:"estimated_time"` // Estimated download time in seconds
}
type SegmentValidationInfo struct {
ExpectedSize int64
ActualSize int64
Validated bool
}
// NZBFile represents a grouped file with its segments
type NZBFile struct {
NzbID string `json:"nzo_id"`
Name string `json:"name"`
Size int64 `json:"size"`
StartOffset int64 `json:"start_offset"` // This is useful for removing rar headers
Segments []NZBSegment `json:"segments"`
Groups []string `json:"groups"`
SegmentValidation map[string]*SegmentValidationInfo `json:"-"`
IsRarArchive bool `json:"is_rar_archive"` // Whether this file is a RAR archive that needs extraction
Password string `json:"password,omitempty"` // Password for encrypted RAR files
IsDeleted bool `json:"is_deleted"`
SegmentSize int64 `json:"segment_size,omitempty"` // Size of each segment in bytes, if applicable
}
// NZBSegment represents a segment with all necessary download info
type NZBSegment struct {
Number int `json:"number"`
MessageID string `json:"message_id"`
Bytes int64 `json:"bytes"`
StartOffset int64 `json:"start_offset"` // Byte offset within the file
EndOffset int64 `json:"end_offset"` // End byte offset within the file
Group string `json:"group"`
}
// CompactNZB is a space-optimized version of NZB for storage
type CompactNZB struct {
ID string `json:"i"`
Name string `json:"n"`
Status string `json:"s"`
Category string `json:"c"`
Size int64 `json:"sz"`
Progress float64 `json:"p"`
Speed int64 `json:"sp,omitempty"`
ETA int64 `json:"e,omitempty"`
Added int64 `json:"a"` // Unix timestamp
Modified int64 `json:"m"` // Unix timestamp
Complete int64 `json:"co,omitempty"` // Unix timestamp
Groups []string `json:"g,omitempty"`
Files []CompactFile `json:"f,omitempty"`
Storage string `json:"st,omitempty"` // Storage path
FailMessage string `json:"fm,omitempty"` // Error message if the download failed
Downloaded bool `json:"d,omitempty"`
}
// CompactFile represents a file in compact format
type CompactFile struct {
Name string `json:"n"`
Size int64 `json:"s"`
Type string `json:"t"`
Main bool `json:"m,omitempty"`
Offset int64 `json:"o"`
Segments []CompactSegment `json:"seg,omitempty"`
IsRar bool `json:"r,omitempty"`
Password string `json:"p,omitempty"`
IsDeleted bool `json:"del,omitempty"` // Whether the file is marked as deleted
ExtractedFileInfo *ExtractedFileInfo `json:"efi,omitempty"` // Pre-extracted RAR file info
SegmentSize int64 `json:"ss,omitempty"` // Size of each segment in bytes, if applicable
}
// CompactSegment represents a segment in compact format
type CompactSegment struct {
Number int `json:"n"` // Segment number
MessageID string `json:"mid"` // Message-ID of the segment
Bytes int64 `json:"b"` // Size in bytes
StartOffset int64 `json:"so"` // Start byte offset within the file
EndOffset int64 `json:"eo"` // End byte offset within the file
Group string `json:"g,omitempty"` // Group associated with this segment
}
type ExtractedFileInfo struct {
FileName string `json:"fn,omitempty"`
FileSize int64 `json:"fs,omitempty"`
ArchiveSize int64 `json:"as,omitempty"` // Total size of the RAR archive
EstimatedStartOffset int64 `json:"eso,omitempty"` // Estimated start offset in the archive
SegmentSize int64 `json:"ss,omitempty"` // Size of each segment in the archive
}
// toCompact converts NZB to compact format
func (nzb *NZB) toCompact() *CompactNZB {
compact := &CompactNZB{
ID: nzb.ID,
Name: nzb.Name,
Status: nzb.Status,
Category: nzb.Category,
Size: nzb.TotalSize,
Progress: nzb.Progress,
Speed: nzb.Speed,
ETA: nzb.ETA,
Added: nzb.AddedOn.Unix(),
Modified: nzb.LastActivity.Unix(),
Storage: nzb.Storage,
Downloaded: nzb.Downloaded,
FailMessage: nzb.FailMessage,
}
if !nzb.CompletedOn.IsZero() {
compact.Complete = nzb.CompletedOn.Unix()
}
// Only store essential groups (first 3)
if len(nzb.Groups) > 0 {
maxGroups := 3
if len(nzb.Groups) < maxGroups {
maxGroups = len(nzb.Groups)
}
compact.Groups = nzb.Groups[:maxGroups]
}
// Store only essential file info
if len(nzb.Files) > 0 {
compact.Files = make([]CompactFile, len(nzb.Files))
for i, file := range nzb.Files {
compact.Files[i] = file.toCompact()
}
}
return compact
}
// fromCompact converts compact format back to NZB
func (compact *CompactNZB) toNZB() *NZB {
nzb := &NZB{
ID: compact.ID,
Name: compact.Name,
Status: compact.Status,
Category: compact.Category,
TotalSize: compact.Size,
Progress: compact.Progress,
Percentage: compact.Progress,
Speed: compact.Speed,
ETA: compact.ETA,
Groups: compact.Groups,
AddedOn: time.Unix(compact.Added, 0),
LastActivity: time.Unix(compact.Modified, 0),
Storage: compact.Storage,
Downloaded: compact.Downloaded,
FailMessage: compact.FailMessage,
StreamingInfo: StreamingInfo{
MainFileIndex: -1,
},
}
if compact.Complete > 0 {
nzb.CompletedOn = time.Unix(compact.Complete, 0)
}
// Reconstruct files
if len(compact.Files) > 0 {
nzb.Files = make([]NZBFile, len(compact.Files))
for i, file := range compact.Files {
nzb.Files[i] = file.toNZB()
}
// Set streaming info
nzb.StreamingInfo.TotalSegments = len(compact.Files)
nzb.StreamingInfo.IsStreamable = nzb.StreamingInfo.MainFileIndex >= 0
}
return nzb
}
func (nf *NZBFile) toCompact() CompactFile {
compact := CompactFile{
Name: nf.Name,
Size: nf.Size,
Offset: nf.StartOffset,
IsRar: nf.IsRarArchive,
IsDeleted: nf.IsDeleted,
Password: nf.Password,
SegmentSize: nf.SegmentSize,
}
for _, seg := range nf.Segments {
compact.Segments = append(compact.Segments, CompactSegment(seg))
}
return compact
}
func (compact *CompactFile) toNZB() NZBFile {
f := NZBFile{
Name: compact.Name,
Size: compact.Size,
StartOffset: compact.Offset,
IsRarArchive: compact.IsRar,
Password: compact.Password,
IsDeleted: compact.IsDeleted,
SegmentSize: compact.SegmentSize,
}
for _, seg := range compact.Segments {
f.Segments = append(f.Segments, NZBSegment(seg))
}
return f
}

180
pkg/usenet/usenet.go Normal file
View File

@@ -0,0 +1,180 @@
package usenet
import (
"context"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/nntp"
"io"
"os"
)
// Usenet interface for usenet operations
type Usenet interface {
Start(ctx context.Context) error
IsReady() chan struct{}
ProcessNZB(ctx context.Context, req *ProcessRequest) (*NZB, error)
GetDownloadByteRange(nzoID string, filename string) (int64, int64, error)
Close()
Logger() zerolog.Logger
Stream(ctx context.Context, nzbID string, filename string, start, end int64, writer io.Writer) error
Store() Store
Client() *nntp.Client
}
// Client implements UsenetClient
type usenet struct {
client *nntp.Client
store Store
processor *Processor
parser *NZBParser
streamer *Streamer
cache *SegmentCache
logger zerolog.Logger
ready chan struct{}
}
// New creates a new usenet client
func New() Usenet {
cfg := config.Get()
usenetConfig := cfg.Usenet
if usenetConfig == nil || len(usenetConfig.Providers) == 0 {
// No usenet providers configured, return nil
return nil
}
_logger := logger.New("usenet")
client, err := nntp.NewClient(usenetConfig.Providers)
if err != nil {
_logger.Error().Err(err).Msg("Failed to create usenet client")
return nil
}
store := NewStore(cfg, _logger)
processor, err := NewProcessor(usenetConfig, _logger, store, client)
if err != nil {
_logger.Error().Err(err).Msg("Failed to create usenet processor")
return nil
}
// Create cache and components
cache := NewSegmentCache(_logger)
parser := NewNZBParser(client, cache, _logger)
streamer := NewStreamer(client, cache, store, usenetConfig.Chunks, _logger)
return &usenet{
store: store,
client: client,
processor: processor,
parser: parser,
streamer: streamer,
cache: cache,
logger: _logger,
ready: make(chan struct{}),
}
}
func (c *usenet) Start(ctx context.Context) error {
// Init the client
if err := c.client.InitPools(); err != nil {
c.logger.Error().Err(err).Msg("Failed to initialize usenet client pools")
return fmt.Errorf("failed to initialize usenet client pools: %w", err)
}
// Initialize the store
if err := c.store.Load(); err != nil {
c.logger.Error().Err(err).Msg("Failed to initialize usenet store")
return fmt.Errorf("failed to initialize usenet store: %w", err)
}
close(c.ready)
c.logger.Info().Msg("Usenet client initialized")
return nil
}
func (c *usenet) IsReady() chan struct{} {
return c.ready
}
func (c *usenet) Store() Store {
return c.store
}
func (c *usenet) Client() *nntp.Client {
return c.client
}
func (c *usenet) Logger() zerolog.Logger {
return c.logger
}
func (c *usenet) ProcessNZB(ctx context.Context, req *ProcessRequest) (*NZB, error) {
return c.processor.Process(ctx, req)
}
// GetNZB retrieves an NZB by ID
func (c *usenet) GetNZB(nzoID string) *NZB {
return c.store.Get(nzoID)
}
// DeleteNZB deletes an NZB
func (c *usenet) DeleteNZB(nzoID string) error {
return c.store.Delete(nzoID)
}
// PauseNZB pauses an NZB download
func (c *usenet) PauseNZB(nzoID string) error {
return c.store.UpdateStatus(nzoID, "paused")
}
// ResumeNZB resumes an NZB download
func (c *usenet) ResumeNZB(nzoID string) error {
return c.store.UpdateStatus(nzoID, "downloading")
}
func (c *usenet) Close() {
if c.store != nil {
if err := c.store.Close(); err != nil {
c.logger.Error().Err(err).Msg("Failed to close store")
}
}
c.logger.Info().Msg("Usenet client closed")
}
// GetListing returns the file listing of the NZB directory
func (c *usenet) GetListing(folder string) []os.FileInfo {
return c.store.GetListing(folder)
}
func (c *usenet) GetDownloadByteRange(nzoID string, filename string) (int64, int64, error) {
return int64(0), int64(0), nil
}
func (c *usenet) RemoveNZB(nzoID string) error {
if err := c.store.Delete(nzoID); err != nil {
return fmt.Errorf("failed to delete NZB %s: %w", nzoID, err)
}
c.logger.Info().Msgf("NZB %s deleted successfully", nzoID)
return nil
}
// Stream streams a file using the new simplified streaming system
func (c *usenet) Stream(ctx context.Context, nzbID string, filename string, start, end int64, writer io.Writer) error {
// Get NZB from store
nzb := c.GetNZB(nzbID)
if nzb == nil {
return fmt.Errorf("NZB %s not found", nzbID)
}
// Get file
file := nzb.GetFileByName(filename)
if file == nil {
return fmt.Errorf("file %s not found in NZB %s", filename, nzbID)
}
if file.NzbID == "" {
file.NzbID = nzbID // Ensure NZB ID is set for the file
}
// Stream using the new streamer
return c.streamer.Stream(ctx, file, start, end, writer)
}

File diff suppressed because one or more lines are too long

Binary file not shown.

Binary file not shown.

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

2078
pkg/web/assets/css/bootstrap-icons.css vendored Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 284 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 665 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -0,0 +1 @@
{"name":"","short_name":"","icons":[{"src":"/images/favicon/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/images/favicon/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 184 KiB

638
pkg/web/assets/js/common.js Normal file
View File

@@ -0,0 +1,638 @@
// Common utilities and functions
class DecypharrUtils {
constructor() {
this.urlBase = window.urlBase || '';
this.toastContainer = null;
this.init();
}
init() {
this.setupToastSystem();
this.setupThemeToggle();
this.setupPasswordToggles();
this.setupVersionInfo();
this.setupGlobalEventListeners();
this.createToastContainer();
}
// Create toast container if it doesn't exist
createToastContainer() {
let container = document.querySelector('.toast-container');
if (!container) {
container = document.createElement('div');
container.className = 'toast-container fixed bottom-4 right-4 z-50 space-y-2';
document.body.appendChild(container);
}
this.toastContainer = container;
}
// Setup toast system
setupToastSystem() {
// Add toast CSS styles
this.addToastStyles();
// Global toast handler
window.addEventListener('error', (e) => {
console.error('Global error:', e.error);
this.createToast(`Unexpected error: ${e.error?.message || 'Unknown error'}`, 'error');
});
// Handle unhandled promise rejections
window.addEventListener('unhandledrejection', (e) => {
console.error('Unhandled promise rejection:', e.reason);
this.createToast(`Promise rejected: ${e.reason?.message || 'Unknown error'}`, 'error');
});
}
// Add toast styles to document
addToastStyles() {
if (document.getElementById('toast-styles')) return;
const style = document.createElement('style');
style.id = 'toast-styles';
style.textContent = `
@keyframes toastSlideIn {
from {
opacity: 0;
transform: translateX(100%);
}
to {
opacity: 1;
transform: translateX(0);
}
}
@keyframes toastSlideOut {
from {
opacity: 1;
transform: translateX(0);
}
to {
opacity: 0;
transform: translateX(100%);
}
}
.toast-container .alert {
animation: toastSlideIn 0.3s ease-out;
max-width: 400px;
word-wrap: break-word;
}
.toast-container .alert.toast-closing {
animation: toastSlideOut 0.3s ease-in forwards;
}
@media (max-width: 640px) {
.toast-container {
left: 1rem;
right: 1rem;
bottom: 1rem;
}
.toast-container .alert {
max-width: none;
}
}
`;
document.head.appendChild(style);
}
// URL joining utility
joinURL(base, path) {
if (!base.endsWith('/')) base += '/';
if (path.startsWith('/')) path = path.substring(1);
return base + path;
}
// Enhanced fetch wrapper
async fetcher(endpoint, options = {}) {
const url = this.joinURL(this.urlBase, endpoint);
// Handle FormData - don't set Content-Type for FormData
const defaultOptions = {
headers: {},
...options
};
// Only set Content-Type if not FormData
if (!(options.body instanceof FormData)) {
defaultOptions.headers['Content-Type'] = 'application/json';
}
// Merge headers
defaultOptions.headers = {
...defaultOptions.headers,
...options.headers
};
try {
const response = await fetch(url, defaultOptions);
// Add loading state management
if (options.loadingButton) {
this.setButtonLoading(options.loadingButton, false);
}
return response;
} catch (error) {
if (options.loadingButton) {
this.setButtonLoading(options.loadingButton, false);
}
throw error;
}
}
// Enhanced toast system
createToast(message, type = 'success', duration = null) {
const toastTimeouts = {
success: 5000,
warning: 10000,
error: 15000,
info: 7000
};
type = ['success', 'warning', 'error', 'info'].includes(type) ? type : 'success';
duration = duration || toastTimeouts[type];
// Ensure toast container exists
if (!this.toastContainer) {
this.createToastContainer();
}
const toastId = `toast-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
const alertTypeClass = {
success: 'alert-success',
warning: 'alert-warning',
error: 'alert-error',
info: 'alert-info'
};
const icons = {
success: '<path fill-rule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zm3.707-9.293a1 1 0 00-1.414-1.414L9 10.586 7.707 9.293a1 1 0 00-1.414 1.414l2 2a1 1 0 001.414 0l4-4z" clip-rule="evenodd"></path>',
error: '<path fill-rule="evenodd" d="M18 10a8 8 0 11-16 0 8 8 0 0116 0zm-7 4a1 1 0 11-2 0 1 1 0 012 0zm-1-9a1 1 0 00-1 1v4a1 1 0 102 0V6a1 1 0 00-1-1z" clip-rule="evenodd"></path>',
warning: '<path fill-rule="evenodd" d="M8.257 3.099c.765-1.36 2.722-1.36 3.486 0l5.58 9.92c.75 1.334-.213 2.98-1.742 2.98H4.42c-1.53 0-2.493-1.646-1.743-2.98l5.58-9.92zM11 13a1 1 0 11-2 0 1 1 0 012 0zm-1-8a1 1 0 00-1 1v3a1 1 0 002 0V6a1 1 0 00-1-1z" clip-rule="evenodd"></path>',
info: '<path fill-rule="evenodd" d="M18 10a8 8 0 11-16 0 8 8 0 0116 0zm-7-4a1 1 0 11-2 0 1 1 0 012 0zM9 9a1 1 0 000 2v3a1 1 0 001 1h1a1 1 0 100-2v-3a1 1 0 00-1-1H9z" clip-rule="evenodd"></path>'
};
const toastHtml = `
<div id="${toastId}" class="alert ${alertTypeClass[type]} shadow-lg mb-2">
<div class="flex items-start gap-3">
<svg class="w-6 h-6 shrink-0" fill="currentColor" viewBox="0 0 20 20">
${icons[type]}
</svg>
<div class="flex-1">
<span class="text-sm">${message.replace(/\n/g, '<br>')}</span>
</div>
<button class="btn btn-sm btn-ghost btn-circle" onclick="window.decypharrUtils.closeToast('${toastId}');">
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M6 18L18 6M6 6l12 12"></path>
</svg>
</button>
</div>
</div>
`;
this.toastContainer.insertAdjacentHTML('beforeend', toastHtml);
// Auto-close toast
const timeoutId = setTimeout(() => this.closeToast(toastId), duration);
// Store timeout ID for manual closing
const toastElement = document.getElementById(toastId);
if (toastElement) {
toastElement.dataset.timeoutId = timeoutId;
}
return toastId;
}
closeToast(toastId) {
const toastElement = document.getElementById(toastId);
if (toastElement) {
// Clear auto-close timeout
if (toastElement.dataset.timeoutId) {
clearTimeout(parseInt(toastElement.dataset.timeoutId));
}
toastElement.classList.add('toast-closing');
setTimeout(() => {
if (toastElement.parentNode) {
toastElement.remove();
}
}, 300);
}
}
// Close all toasts
closeAllToasts() {
const toasts = this.toastContainer?.querySelectorAll('.alert');
if (toasts) {
toasts.forEach(toast => {
if (toast.id) {
this.closeToast(toast.id);
}
});
}
}
// Button loading state management
setButtonLoading(buttonElement, loading = true, originalText = null) {
if (typeof buttonElement === 'string') {
buttonElement = document.getElementById(buttonElement) || document.querySelector(buttonElement);
}
if (!buttonElement) return;
if (loading) {
buttonElement.disabled = true;
if (!buttonElement.dataset.originalText) {
buttonElement.dataset.originalText = originalText || buttonElement.innerHTML;
}
buttonElement.innerHTML = '<span class="loading loading-spinner loading-sm"></span>Processing...';
buttonElement.classList.add('loading-state');
} else {
buttonElement.disabled = false;
buttonElement.innerHTML = buttonElement.dataset.originalText || 'Submit';
buttonElement.classList.remove('loading-state');
delete buttonElement.dataset.originalText;
}
}
// Password field utilities
setupPasswordToggles() {
document.addEventListener('click', (e) => {
const toggleBtn = e.target.closest('.password-toggle-btn');
if (toggleBtn) {
e.preventDefault();
e.stopPropagation();
// Find the associated input field
const container = toggleBtn.closest('.password-toggle-container');
if (container) {
const input = container.querySelector('input, textarea');
const icon = toggleBtn.querySelector('i');
if (input && icon) {
this.togglePasswordField(input, icon);
}
}
}
});
}
togglePasswordField(field, icon) {
if (!icon) return;
if (field.tagName.toLowerCase() === 'textarea') {
this.togglePasswordTextarea(field, icon);
} else {
this.togglePasswordInput(field, icon);
}
}
togglePasswordInput(field, icon) {
if (field.type === 'password') {
field.type = 'text';
icon.className = 'bi bi-eye-slash';
} else {
field.type = 'password';
icon.className = 'bi bi-eye';
}
}
togglePasswordTextarea(field, icon) {
const isHidden = field.style.webkitTextSecurity === 'disc' ||
field.style.webkitTextSecurity === '' ||
field.getAttribute('data-password-visible') !== 'true';
if (isHidden) {
field.style.webkitTextSecurity = 'none';
field.style.textSecurity = 'none';
field.setAttribute('data-password-visible', 'true');
icon.className = 'bi bi-eye-slash';
} else {
field.style.webkitTextSecurity = 'disc';
field.style.textSecurity = 'disc';
field.setAttribute('data-password-visible', 'false');
icon.className = 'bi bi-eye';
}
}
// Legacy methods for backward compatibility
togglePassword(fieldId) {
const field = document.getElementById(fieldId);
const button = field?.closest('.password-toggle-container')?.querySelector('.password-toggle-btn');
let icon = button.querySelector("i");
if (field && icon) {
this.togglePasswordField(field, icon);
}
}
// Theme management
setupThemeToggle() {
const themeToggle = document.getElementById('themeToggle');
const htmlElement = document.documentElement;
if (!themeToggle) return;
const setTheme = (theme) => {
htmlElement.setAttribute('data-theme', theme);
localStorage.setItem('theme', theme);
themeToggle.checked = theme === 'dark';
// Smooth theme transition
document.body.style.transition = 'background-color 0.3s ease, color 0.3s ease';
setTimeout(() => {
document.body.style.transition = '';
}, 300);
// Emit theme change event
window.dispatchEvent(new CustomEvent('themechange', { detail: { theme } }));
};
// Load saved theme
const savedTheme = localStorage.getItem('theme');
if (savedTheme) {
setTheme(savedTheme);
} else if (window.matchMedia?.('(prefers-color-scheme: dark)').matches) {
setTheme('dark');
} else {
setTheme('light');
}
// Theme toggle event
themeToggle.addEventListener('change', () => {
const currentTheme = htmlElement.getAttribute('data-theme');
setTheme(currentTheme === 'dark' ? 'light' : 'dark');
});
// Listen for system theme changes
if (window.matchMedia) {
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', e => {
if (!localStorage.getItem('theme')) {
setTheme(e.matches ? 'dark' : 'light');
}
});
}
}
// Version info
async setupVersionInfo() {
try {
const response = await this.fetcher('/version');
if (!response.ok) throw new Error('Failed to fetch version');
const data = await response.json();
const versionBadge = document.getElementById('version-badge');
if (versionBadge) {
versionBadge.innerHTML = `
<a href="https://github.com/sirrobot01/decypharr/releases/tag/${data.version}"
target="_blank"
class="text-current hover:text-primary transition-colors">
${data.channel}-${data.version}
</a>
`;
// Remove existing badge classes
versionBadge.classList.remove('badge-warning', 'badge-error', 'badge-ghost');
if (data.channel === 'beta') {
versionBadge.classList.add('badge-warning');
} else if (data.channel === 'experimental') {
versionBadge.classList.add('badge-error');
}
}
} catch (error) {
console.error('Error fetching version:', error);
const versionBadge = document.getElementById('version-badge');
if (versionBadge) {
versionBadge.textContent = 'Unknown';
versionBadge.classList.add('badge-ghost');
}
}
}
// Global event listeners
setupGlobalEventListeners() {
// Smooth scroll for anchor links
document.addEventListener('click', (e) => {
const link = e.target.closest('a[href^="#"]');
if (link && link.getAttribute('href') !== '#') {
e.preventDefault();
const target = document.querySelector(link.getAttribute('href'));
if (target) {
target.scrollIntoView({ behavior: 'smooth', block: 'start' });
}
}
});
// Enhanced form validation
document.addEventListener('invalid', (e) => {
e.target.classList.add('input-error');
setTimeout(() => e.target.classList.remove('input-error'), 3000);
}, true);
// Keyboard shortcuts
document.addEventListener('keydown', (e) => {
// Escape key closes modals and dropdowns
if (e.key === 'Escape') {
// Close modals
document.querySelectorAll('.modal[open]').forEach(modal => modal.close());
// Close dropdowns
document.querySelectorAll('.dropdown-open').forEach(dropdown => {
dropdown.classList.remove('dropdown-open');
});
// Close context menus
document.querySelectorAll('.context-menu:not(.hidden)').forEach(menu => {
menu.classList.add('hidden');
});
}
// Ctrl/Cmd + / for help (if help system exists)
if ((e.ctrlKey || e.metaKey) && e.key === '/') {
e.preventDefault();
this.showKeyboardShortcuts();
}
});
// Handle page visibility changes
document.addEventListener('visibilitychange', () => {
if (document.hidden) {
// Page is hidden - pause auto-refresh timers if any
window.dispatchEvent(new CustomEvent('pageHidden'));
} else {
// Page is visible - resume auto-refresh timers if any
window.dispatchEvent(new CustomEvent('pageVisible'));
}
});
// Handle online/offline status
window.addEventListener('online', () => {
this.createToast('Connection restored', 'success');
});
window.addEventListener('offline', () => {
this.createToast('Connection lost - working offline', 'warning');
});
}
// Show keyboard shortcuts modal
showKeyboardShortcuts() {
const shortcuts = [
{ key: 'Esc', description: 'Close modals and dropdowns' },
{ key: 'Ctrl + /', description: 'Show this help' },
{ key: 'Ctrl + R', description: 'Refresh page' }
];
const modal = document.createElement('dialog');
modal.className = 'modal';
modal.innerHTML = `
<div class="modal-box">
<form method="dialog">
<button class="btn btn-sm btn-circle btn-ghost absolute right-2 top-2">✕</button>
</form>
<h3 class="font-bold text-lg mb-4">Keyboard Shortcuts</h3>
<div class="space-y-2">
${shortcuts.map(shortcut => `
<div class="flex justify-between items-center">
<span class="kbd kbd-sm">${shortcut.key}</span>
<span class="text-sm">${shortcut.description}</span>
</div>
`).join('')}
</div>
</div>
`;
document.body.appendChild(modal);
modal.showModal();
modal.addEventListener('close', () => {
document.body.removeChild(modal);
});
}
// Utility methods
formatBytes(bytes) {
if (!bytes || bytes === 0) return '0 B';
const k = 1024;
const sizes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return `${parseFloat((bytes / Math.pow(k, i)).toFixed(2))} ${sizes[i]}`;
}
formatSpeed(speed) {
return `${this.formatBytes(speed)}/s`;
}
formatDuration(seconds) {
if (!seconds || seconds === 0) return '0s';
const units = [
{ label: 'd', seconds: 86400 },
{ label: 'h', seconds: 3600 },
{ label: 'm', seconds: 60 },
{ label: 's', seconds: 1 }
];
const parts = [];
let remaining = seconds;
for (const unit of units) {
const count = Math.floor(remaining / unit.seconds);
if (count > 0) {
parts.push(`${count}${unit.label}`);
remaining %= unit.seconds;
}
}
return parts.slice(0, 2).join(' ') || '0s';
}
// Debounce function
debounce(func, wait, immediate = false) {
let timeout;
return function executedFunction(...args) {
const later = () => {
timeout = null;
if (!immediate) func(...args);
};
const callNow = immediate && !timeout;
clearTimeout(timeout);
timeout = setTimeout(later, wait);
if (callNow) func(...args);
};
}
// Throttle function
throttle(func, limit) {
let inThrottle;
return function(...args) {
if (!inThrottle) {
func.apply(this, args);
inThrottle = true;
setTimeout(() => inThrottle = false, limit);
}
};
}
// Copy to clipboard utility
async copyToClipboard(text) {
try {
await navigator.clipboard.writeText(text);
this.createToast('Copied to clipboard', 'success');
return true;
} catch (error) {
console.error('Failed to copy to clipboard:', error);
this.createToast('Failed to copy to clipboard', 'error');
return false;
}
}
// Validate URL
isValidUrl(string) {
try {
new URL(string);
return true;
} catch (_) {
return false;
}
}
// Escape HTML
escapeHtml(text) {
const map = {
'&': '&amp;',
'<': '&lt;',
'>': '&gt;',
'"': '&quot;',
"'": '&#039;'
};
return text ? text.replace(/[&<>"']/g, (m) => map[m]) : '';
}
// Get current theme
getCurrentTheme() {
return document.documentElement.getAttribute('data-theme') || 'light';
}
// Network status
isOnline() {
return navigator.onLine;
}
}
// Initialize utilities
window.decypharrUtils = new DecypharrUtils();
// Global functions for backward compatibility
window.fetcher = (endpoint, options = {}) => window.decypharrUtils.fetcher(endpoint, options);
window.createToast = (message, type, duration) => window.decypharrUtils.createToast(message, type, duration);
// Export for ES6 modules if needed
if (typeof module !== 'undefined' && module.exports) {
module.exports = DecypharrUtils;
}

1463
pkg/web/assets/js/config.js Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,384 @@
// Download page functionality
class DownloadManager {
constructor(downloadFolder) {
this.downloadFolder = downloadFolder;
this.currentMode = 'torrent'; // Default mode
this.refs = {
downloadForm: document.getElementById('downloadForm'),
// Mode controls
torrentMode: document.getElementById('torrentMode'),
nzbMode: document.getElementById('nzbMode'),
// Torrent inputs
magnetURI: document.getElementById('magnetURI'),
torrentFiles: document.getElementById('torrentFiles'),
torrentInputs: document.getElementById('torrentInputs'),
// NZB inputs
nzbURLs: document.getElementById('nzbURLs'),
nzbFiles: document.getElementById('nzbFiles'),
nzbInputs: document.getElementById('nzbInputs'),
// Common form elements
arr: document.getElementById('arr'),
downloadAction: document.getElementById('downloadAction'),
downloadUncached: document.getElementById('downloadUncached'),
downloadFolder: document.getElementById('downloadFolder'),
downloadFolderHint: document.getElementById('downloadFolderHint'),
debrid: document.getElementById('debrid'),
submitBtn: document.getElementById('submitDownload'),
submitButtonText: document.getElementById('submitButtonText'),
activeCount: document.getElementById('activeCount'),
completedCount: document.getElementById('completedCount'),
totalSize: document.getElementById('totalSize')
};
this.init();
}
init() {
this.loadSavedOptions();
this.bindEvents();
this.handleMagnetFromURL();
this.loadModeFromURL();
}
bindEvents() {
// Form submission
this.refs.downloadForm.addEventListener('submit', (e) => this.handleSubmit(e));
// Mode switching
this.refs.torrentMode.addEventListener('click', () => this.switchMode('torrent'));
this.refs.nzbMode.addEventListener('click', () => this.switchMode('nzb'));
// Save options on change
this.refs.arr.addEventListener('change', () => this.saveOptions());
this.refs.downloadAction.addEventListener('change', () => this.saveOptions());
this.refs.downloadUncached.addEventListener('change', () => this.saveOptions());
this.refs.downloadFolder.addEventListener('change', () => this.saveOptions());
// File input enhancement
this.refs.torrentFiles.addEventListener('change', (e) => this.handleFileSelection(e));
this.refs.nzbFiles.addEventListener('change', (e) => this.handleFileSelection(e));
// Drag and drop
this.setupDragAndDrop();
}
loadSavedOptions() {
const savedOptions = {
category: localStorage.getItem('downloadCategory') || '',
action: localStorage.getItem('downloadAction') || 'symlink',
uncached: localStorage.getItem('downloadUncached') === 'true',
folder: localStorage.getItem('downloadFolder') || this.downloadFolder,
mode: localStorage.getItem('downloadMode') || 'torrent'
};
this.refs.arr.value = savedOptions.category;
this.refs.downloadAction.value = savedOptions.action;
this.refs.downloadUncached.checked = savedOptions.uncached;
this.refs.downloadFolder.value = savedOptions.folder;
this.currentMode = savedOptions.mode;
}
saveOptions() {
localStorage.setItem('downloadCategory', this.refs.arr.value);
localStorage.setItem('downloadAction', this.refs.downloadAction.value);
localStorage.setItem('downloadUncached', this.refs.downloadUncached.checked.toString());
localStorage.setItem('downloadFolder', this.refs.downloadFolder.value);
localStorage.setItem('downloadMode', this.currentMode);
}
handleMagnetFromURL() {
const urlParams = new URLSearchParams(window.location.search);
const magnetURI = urlParams.get('magnet');
if (magnetURI) {
this.refs.magnetURI.value = magnetURI;
history.replaceState({}, document.title, window.location.pathname);
// Show notification
window.decypharrUtils.createToast('Magnet link loaded from URL', 'info');
}
}
async handleSubmit(e) {
e.preventDefault();
const formData = new FormData();
let urls = [];
let files = [];
let endpoint = '/api/add';
let itemType = 'torrent';
if (this.currentMode === 'torrent') {
// Get torrent URLs
urls = this.refs.magnetURI.value
.split('\n')
.map(url => url.trim())
.filter(url => url.length > 0);
if (urls.length > 0) {
formData.append('urls', urls.join('\n'));
}
// Get torrent files
for (let i = 0; i < this.refs.torrentFiles.files.length; i++) {
formData.append('files', this.refs.torrentFiles.files[i]);
files.push(this.refs.torrentFiles.files[i]);
}
} else if (this.currentMode === 'nzb') {
// Get NZB URLs
urls = this.refs.nzbURLs.value
.split('\n')
.map(url => url.trim())
.filter(url => url.length > 0);
if (urls.length > 0) {
formData.append('nzbUrls', urls.join('\n'));
}
// Get NZB files
for (let i = 0; i < this.refs.nzbFiles.files.length; i++) {
formData.append('nzbFiles', this.refs.nzbFiles.files[i]);
files.push(this.refs.nzbFiles.files[i]);
}
endpoint = '/api/nzbs/add';
itemType = 'NZB';
}
// Validation
const totalItems = urls.length + files.length;
if (totalItems === 0) {
window.decypharrUtils.createToast(`Please provide at least one ${itemType}`, 'warning');
return;
}
if (totalItems > 100) {
window.decypharrUtils.createToast(`Please submit up to 100 ${itemType}s at a time`, 'warning');
return;
}
// Add other form data
formData.append('arr', this.refs.arr.value);
formData.append('downloadFolder', this.refs.downloadFolder.value);
formData.append('action', this.refs.downloadAction.value);
formData.append('downloadUncached', this.refs.downloadUncached.checked);
if (this.refs.debrid) {
formData.append('debrid', this.refs.debrid.value);
}
try {
// Set loading state
window.decypharrUtils.setButtonLoading(this.refs.submitBtn, true);
const response = await window.decypharrUtils.fetcher(endpoint, {
method: 'POST',
body: formData,
headers: {} // Remove Content-Type to let browser set it for FormData
});
const result = await response.json();
if (!response.ok) {
throw new Error(result.error || 'Unknown error');
}
// Handle partial success
if (result.errors && result.errors.length > 0) {
console.log(result.errors);
let errorMessage = ` ${result.errors.join('\n')}`;
if (result.results.length > 0) {
window.decypharrUtils.createToast(
`Added ${result.results.length} ${itemType}s with ${result.errors.length} errors \n${errorMessage}`,
'warning'
);
} else {
window.decypharrUtils.createToast(`Failed to add ${itemType}s \n${errorMessage}`, 'error');
}
} else {
window.decypharrUtils.createToast(
`Successfully added ${result.results.length} ${itemType}${result.results.length > 1 ? 's' : ''}!`
);
this.clearForm();
}
} catch (error) {
console.error('Error adding downloads:', error);
window.decypharrUtils.createToast(`Error adding downloads: ${error.message}`, 'error');
} finally {
window.decypharrUtils.setButtonLoading(this.refs.submitBtn, false);
}
}
switchMode(mode) {
this.currentMode = mode;
this.saveOptions();
this.updateURL(mode);
// Update button states
if (mode === 'torrent') {
this.refs.torrentMode.classList.remove('btn-outline');
this.refs.torrentMode.classList.add('btn-primary');
this.refs.nzbMode.classList.remove('btn-primary');
this.refs.nzbMode.classList.add('btn-outline');
// Show/hide sections
this.refs.torrentInputs.classList.remove('hidden');
this.refs.nzbInputs.classList.add('hidden');
// Update UI text
this.refs.submitButtonText.textContent = 'Add to Download Queue';
this.refs.downloadFolderHint.textContent = 'Leave empty to use default qBittorrent folder';
} else {
this.refs.nzbMode.classList.remove('btn-outline');
this.refs.nzbMode.classList.add('btn-primary');
this.refs.torrentMode.classList.remove('btn-primary');
this.refs.torrentMode.classList.add('btn-outline');
// Show/hide sections
this.refs.nzbInputs.classList.remove('hidden');
this.refs.torrentInputs.classList.add('hidden');
// Update UI text
this.refs.submitButtonText.textContent = 'Add to NZB Queue';
this.refs.downloadFolderHint.textContent = 'Leave empty to use default SABnzbd folder';
}
}
clearForm() {
if (this.currentMode === 'torrent') {
this.refs.magnetURI.value = '';
this.refs.torrentFiles.value = '';
} else {
this.refs.nzbURLs.value = '';
this.refs.nzbFiles.value = '';
}
}
handleFileSelection(e) {
const files = e.target.files;
if (files.length > 0) {
const fileNames = Array.from(files).map(f => f.name).join(', ');
window.decypharrUtils.createToast(
`Selected ${files.length} file${files.length > 1 ? 's' : ''}: ${fileNames}`,
'info'
);
}
}
setupDragAndDrop() {
const dropZone = this.refs.downloadForm;
['dragenter', 'dragover', 'dragleave', 'drop'].forEach(eventName => {
dropZone.addEventListener(eventName, this.preventDefaults, false);
});
['dragenter', 'dragover'].forEach(eventName => {
dropZone.addEventListener(eventName, () => this.highlight(dropZone), false);
});
['dragleave', 'drop'].forEach(eventName => {
dropZone.addEventListener(eventName, () => this.unhighlight(dropZone), false);
});
dropZone.addEventListener('drop', (e) => this.handleDrop(e), false);
}
preventDefaults(e) {
e.preventDefault();
e.stopPropagation();
}
highlight(element) {
element.classList.add('border-primary', 'border-2', 'border-dashed', 'bg-primary/5');
}
unhighlight(element) {
element.classList.remove('border-primary', 'border-2', 'border-dashed', 'bg-primary/5');
}
handleDrop(e) {
const dt = e.dataTransfer;
const files = dt.files;
if (this.currentMode === 'torrent') {
// Filter for .torrent files
const torrentFiles = Array.from(files).filter(file =>
file.name.toLowerCase().endsWith('.torrent')
);
if (torrentFiles.length > 0) {
// Create a new FileList-like object
const dataTransfer = new DataTransfer();
torrentFiles.forEach(file => dataTransfer.items.add(file));
this.refs.torrentFiles.files = dataTransfer.files;
this.handleFileSelection({ target: { files: torrentFiles } });
} else {
window.decypharrUtils.createToast('Please drop .torrent files only', 'warning');
}
} else {
// Filter for .nzb files
const nzbFiles = Array.from(files).filter(file =>
file.name.toLowerCase().endsWith('.nzb')
);
if (nzbFiles.length > 0) {
// Create a new FileList-like object
const dataTransfer = new DataTransfer();
nzbFiles.forEach(file => dataTransfer.items.add(file));
this.refs.nzbFiles.files = dataTransfer.files;
this.handleFileSelection({ target: { files: nzbFiles } });
} else {
window.decypharrUtils.createToast('Please drop .nzb files only', 'warning');
}
}
}
loadModeFromURL() {
const urlParams = new URLSearchParams(window.location.search);
const mode = urlParams.get('mode');
if (mode === 'nzb' || mode === 'torrent') {
this.currentMode = mode;
} else {
this.currentMode = this.currentMode || 'torrent'; // Use saved preference or default
}
// Initialize the mode without updating URL again
this.setModeUI(this.currentMode);
}
setModeUI(mode) {
if (mode === 'torrent') {
this.refs.torrentMode.classList.remove('btn-outline');
this.refs.torrentMode.classList.add('btn-primary');
this.refs.nzbMode.classList.remove('btn-primary');
this.refs.nzbMode.classList.add('btn-outline');
this.refs.torrentInputs.classList.remove('hidden');
this.refs.nzbInputs.classList.add('hidden');
this.refs.submitButtonText.textContent = 'Add to Download Queue';
this.refs.downloadFolderHint.textContent = 'Leave empty to use default qBittorrent folder';
} else {
this.refs.nzbMode.classList.remove('btn-outline');
this.refs.nzbMode.classList.add('btn-primary');
this.refs.torrentMode.classList.remove('btn-primary');
this.refs.torrentMode.classList.add('btn-outline');
this.refs.nzbInputs.classList.remove('hidden');
this.refs.torrentInputs.classList.add('hidden');
this.refs.submitButtonText.textContent = 'Add to NZB Queue';
this.refs.downloadFolderHint.textContent = 'Leave empty to use default SABnzbd folder';
}
}
updateURL(mode) {
const url = new URL(window.location);
url.searchParams.set('mode', mode);
window.history.replaceState({}, '', url);
}
}

1103
pkg/web/assets/js/repair.js Normal file

File diff suppressed because it is too large Load Diff

693
pkg/web/assets/styles.css Normal file
View File

@@ -0,0 +1,693 @@
@import './css/bootstrap-icons.css';
@tailwind base;
@tailwind components;
@tailwind utilities;
* {
transition: transform 0.2s ease-in-out,
opacity 0.2s ease-in-out,
box-shadow 0.2s ease-in-out,
border-color 0.2s ease-in-out,
background-color 0.2s ease-in-out;
}
:root {
--warning-color: rgb(245 158 11); /* amber-500 */
}
[data-theme="dark"] {
--warning-color: rgb(251 191 36); /* amber-400 - lighter for dark theme */
}
/* Context menu styles */
.context-menu {
position: absolute;
z-index: 1000;
backdrop-filter: blur(8px);
border: 1px solid hsl(var(--bc) / 0.2);
box-shadow: 0 20px 25px -5px rgba(0, 0, 0, 0.1), 0 10px 10px -5px rgba(0, 0, 0, 0.04);
}
.context-menu.hidden {
opacity: 0;
pointer-events: none;
transform: scale(0.95) translateY(-5px);
}
.context-menu:not(.hidden) {
opacity: 1;
pointer-events: all;
transform: scale(1) translateY(0);
animation: contextMenuAppear 0.15s ease-out;
}
@keyframes contextMenuAppear {
from {
opacity: 0;
transform: scale(0.95) translateY(-5px);
}
to {
opacity: 1;
transform: scale(1) translateY(0);
}
}
/* Smooth progress bar animations */
.progress {
transition: all 0.3s ease-in-out;
}
.progress::-webkit-progress-value {
transition: width 0.5s ease-in-out;
}
.progress::-moz-progress-bar {
transition: width 0.5s ease-in-out;
}
/* Enhanced button animations */
.btn {
transition: all 0.2s ease-in-out;
transform: translateY(0);
}
.btn:hover:not(:disabled) {
transform: translateY(-1px);
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
}
.btn:active:not(:disabled) {
transform: translateY(0);
transition: all 0.1s ease-in-out;
}
/* Card hover effects */
.card {
transition: all 0.3s ease-in-out;
}
.card:hover {
transform: translateY(-2px);
box-shadow: 0 8px 25px rgba(0, 0, 0, 0.15);
}
/* Table row animations */
.table tbody tr {
transition: all 0.2s ease-in-out;
}
.table tbody tr:hover {
background-color: hsl(var(--b2));
transform: scale(1.005);
}
/* Item selection styles */
.item-row.selected {
background-color: hsl(var(--p) / 0.1) !important;
border-left: 4px solid hsl(var(--p));
}
.item-row {
cursor: pointer;
transition: all 0.2s ease-in-out;
}
/* Stepper navigation */
.stepper-nav .nav-link {
transition: all 0.2s ease-in-out;
position: relative;
overflow: hidden;
}
.stepper-nav .nav-link::before {
content: '';
position: absolute;
top: 0;
left: -100%;
width: 100%;
height: 100%;
background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.2), transparent);
transition: left 0.5s ease-in-out;
}
.stepper-nav .nav-link:hover::before {
left: 100%;
}
/* Modal animations */
.modal {
transition: all 0.3s ease-in-out;
}
.modal-box {
animation: modalSlideIn 0.3s ease-out;
}
@keyframes modalSlideIn {
from {
opacity: 0;
transform: scale(0.9) translateY(-20px);
}
to {
opacity: 1;
transform: scale(1) translateY(0);
}
}
/* Toast animations */
.toast-container .alert {
animation: toastSlideIn 0.3s ease-out;
}
@keyframes toastSlideIn {
from {
opacity: 0;
transform: translateX(100%);
}
to {
opacity: 1;
transform: translateX(0);
}
}
/* Loading spinner improvements */
.loading {
transition: all 0.2s ease-in-out;
}
/* Badge animations */
.badge {
transition: all 0.2s ease-in-out;
}
.badge:hover {
transform: scale(1.05);
}
/* Form focus improvements */
.form-control input:focus,
.form-control textarea:focus,
.form-control select:focus {
transform: scale(1.02);
box-shadow: 0 0 0 3px hsl(var(--p) / 0.2);
}
/* Pagination smooth transitions */
.join .btn {
transition: all 0.2s ease-in-out;
}
.join .btn:not(.btn-active):hover {
background-color: hsl(var(--b3));
transform: translateY(-1px);
}
/* Password Toggle */
/* Password toggle styles */
.password-toggle-container {
position: relative;
display: flex;
align-items: center;
}
.password-toggle-btn {
position: absolute;
right: 0;
top: 0;
bottom: 0;
width: 40px;
background: none;
border: none;
cursor: pointer;
z-index: 10;
color: hsl(var(--bc) / 0.6);
transition: all 0.2s ease-in-out;
display: flex;
align-items: center;
justify-content: center;
border-top-right-radius: var(--rounded-btn, 0.5rem);
border-bottom-right-radius: var(--rounded-btn, 0.5rem);
}
.password-toggle-btn:hover {
background-color: hsl(var(--bc) / 0.1);
color: hsl(var(--bc) / 0.8);
}
/* Input password fields - make room for the button */
.input.input-has-toggle {
padding-right: 40px !important;
width: 100%;
}
/* Textarea password fields */
.textarea.has-toggle {
padding-right: 40px !important;
font-family: 'JetBrains Mono', 'Consolas', 'Monaco', 'Courier New', monospace !important;
line-height: 1.4;
resize: vertical;
}
/* Specific positioning for textarea toggles */
.password-toggle-btn.textarea-toggle {
top: 0;
right: 0;
bottom: auto;
height: 40px;
border-radius: 0;
border-top-right-radius: var(--rounded-btn, 0.5rem);
}
/* Better masking for textareas */
.textarea.has-toggle[data-password-visible="false"],
.textarea.has-toggle:not([data-password-visible="true"]) {
-webkit-text-security: disc;
text-security: disc;
font-family: monospace !important;
letter-spacing: 2px;
}
.textarea.has-toggle[data-password-visible="true"] {
-webkit-text-security: none;
text-security: none;
font-family: 'JetBrains Mono', 'Consolas', 'Monaco', 'Courier New', monospace !important;
letter-spacing: normal;
}
/* Input password field styling */
.input[type="password"] {
font-family: 'Courier New', monospace;
letter-spacing: 1px;
}
.input[type="text"].was-password {
font-family: 'JetBrains Mono', 'Consolas', 'Monaco', 'Courier New', monospace;
letter-spacing: normal;
}
/* Dark mode button styling */
[data-theme="dark"] .password-toggle-btn {
color: hsl(var(--bc) / 0.6);
background-color: transparent;
}
[data-theme="dark"] .password-toggle-btn:hover {
background-color: hsl(var(--bc) / 0.15);
color: hsl(var(--bc) / 0.9);
}
/* Light mode button styling */
[data-theme="light"] .password-toggle-btn {
color: hsl(var(--bc) / 0.7);
background-color: transparent;
}
[data-theme="light"] .password-toggle-btn:hover {
background-color: hsl(var(--bc) / 0.1);
color: hsl(var(--bc) / 0.9);
}
/* Icon sizing */
.password-toggle-btn i {
font-size: 14px;
line-height: 1;
}
/* Make sure the container fills the width */
.password-toggle-container {
width: 100%;
}
.password-toggle-container .input,
.password-toggle-container .textarea {
width: 100%;
}
/* Drawer animations */
.drawer-side {
transition: transform 0.3s ease-in-out;
}
/* Dropdown animations */
.dropdown-content {
animation: dropdownSlideIn 0.2s ease-out;
}
@keyframes dropdownSlideIn {
from {
opacity: 0;
transform: scale(0.95) translateY(-10px);
}
to {
opacity: 1;
transform: scale(1) translateY(0);
}
}
/* Enhanced filter UI */
.filters-container .filter-item {
animation: filterItemAppear 0.2s ease-out;
transition: all 0.2s ease-in-out;
}
@keyframes filterItemAppear {
from {
opacity: 0;
transform: translateX(-10px);
}
to {
opacity: 1;
transform: translateX(0);
}
}
.filter-item:hover {
background-color: hsl(var(--b2));
border-radius: 4px;
padding: 4px;
margin: -4px;
}
/* Dark mode specific improvements */
[data-theme="dark"] .glass {
background: rgba(255, 255, 255, 0.05);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 255, 255, 0.1);
}
/* Fix input text colors in dark mode */
[data-theme="dark"] .input,
[data-theme="dark"] .textarea,
[data-theme="dark"] .select {
color: #ffffff !important; /* Force white text for values */
background-color: hsl(var(--b1));
}
/* Keep placeholders dim and distinguishable */
[data-theme="dark"] .input::placeholder,
[data-theme="dark"] .textarea::placeholder {
color: #6b7280 !important; /* Gray-500 equivalent */
opacity: 1 !important;
font-style: italic;
}
/* Focus states */
[data-theme="dark"] .input:focus,
[data-theme="dark"] .textarea:focus,
[data-theme="dark"] .select:focus {
color: #ffffff !important;
background-color: hsl(var(--b1));
border-color: hsl(var(--p));
outline: none;
box-shadow: 0 0 0 2px hsl(var(--p) / 0.2);
}
/* Select options */
[data-theme="dark"] .select option {
background-color: hsl(var(--b1));
color: #ffffff;
}
/* Disabled inputs */
[data-theme="dark"] .input:disabled,
[data-theme="dark"] .textarea:disabled,
[data-theme="dark"] .select:disabled {
color: #9ca3af !important; /* Gray-400 */
background-color: hsl(var(--b2));
border-color: hsl(var(--bc) / 0.2);
}
[data-theme="dark"] .input:disabled::placeholder,
[data-theme="dark"] .textarea:disabled::placeholder {
color: #6b7280 !important;
}
/* Readonly inputs (like auto-detected configs) */
[data-theme="dark"] .input[readonly],
[data-theme="dark"] .textarea[readonly],
[data-theme="dark"] .select[readonly] {
color: #d1d5db !important; /* Gray-300 */
background-color: hsl(var(--b2));
border-color: hsl(var(--bc) / 0.15);
}
/* Password fields with monospace */
[data-theme="dark"] .password-toggle-container .input {
color: #ffffff !important;
}
[data-theme="dark"] .password-toggle-container .textarea {
color: #ffffff !important;
font-family: 'JetBrains Mono', 'Consolas', 'Monaco', 'Courier New', monospace;
}
/* Form labels */
[data-theme="dark"] .label-text {
color: #f3f4f6; /* Gray-100 */
}
[data-theme="dark"] .label-text-alt {
color: #9ca3af; /* Gray-400 */
}
.text-warning,
[data-theme="dark"] .text-warning,
[data-theme="dark"] .label-text-alt.text-warning {
color: var(--warning-color) !important;
}
/* File input */
[data-theme="dark"] .file-input {
color: #ffffff !important;
}
[data-theme="dark"] .file-input::file-selector-button {
background-color: hsl(var(--b3));
color: #ffffff;
border: none;
border-right: 1px solid hsl(var(--bc) / 0.2);
}
/* Search inputs */
[data-theme="dark"] input[type="search"] {
color: #ffffff !important;
}
[data-theme="dark"] input[type="search"]::placeholder {
color: #6b7280 !important;
font-style: italic;
}
/* Number inputs */
[data-theme="dark"] input[type="number"] {
color: #ffffff !important;
}
/* URL inputs */
[data-theme="dark"] input[type="url"] {
color: #ffffff !important;
}
/* Email inputs */
[data-theme="dark"] input[type="email"] {
color: #ffffff !important;
}
/* Date/time inputs */
[data-theme="dark"] input[type="date"],
[data-theme="dark"] input[type="time"],
[data-theme="dark"] input[type="datetime-local"] {
color: #ffffff !important;
}
/* Fix autofill in dark mode */
[data-theme="dark"] .input:-webkit-autofill,
[data-theme="dark"] .input:-webkit-autofill:hover,
[data-theme="dark"] .input:-webkit-autofill:focus {
-webkit-box-shadow: 0 0 0 1000px hsl(var(--b1)) inset !important;
-webkit-text-fill-color: #ffffff !important;
transition: background-color 5000s ease-in-out 0s;
}
/* Error states */
[data-theme="dark"] .input-error {
color: #ffffff !important;
border-color: hsl(var(--er));
background-color: hsl(var(--er) / 0.1);
}
[data-theme="dark"] .input-error::placeholder {
color: #ef4444 !important; /* Red placeholder for errors */
}
/* Responsive improvements */
@media (max-width: 768px) {
.card:hover {
transform: none;
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1);
}
.btn:hover:not(:disabled) {
transform: none;
}
.table tbody tr:hover {
transform: none;
}
}
/* Custom scrollbar */
::-webkit-scrollbar {
width: 8px;
height: 8px;
}
::-webkit-scrollbar-track {
background: hsl(var(--b1));
}
::-webkit-scrollbar-thumb {
background: hsl(var(--bc) / 0.3);
border-radius: 4px;
}
::-webkit-scrollbar-thumb:hover {
background: hsl(var(--bc) / 0.5);
}
/* Tabs */
/* Tab Navigation Styles */
.tab-button {
transition: all 0.2s ease-in-out;
white-space: nowrap;
cursor: pointer;
background: none;
border: none;
outline: none;
}
.tab-button.active {
color: hsl(var(--p)) !important;
border-color: hsl(var(--p)) !important;
}
.tab-button:not(.active) {
color: hsl(var(--bc) / 0.7);
border-color: transparent;
}
.tab-button:not(.active):hover {
color: hsl(var(--bc));
border-color: hsl(var(--bc) / 0.3);
}
/* Tab content styling */
.tab-content {
min-height: 400px;
}
.tab-content.hidden {
display: none !important;
}
.tab-content:not(.hidden) {
display: block;
animation: fadeIn 0.3s ease-in-out;
}
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
/* Responsive adjustments */
@media (max-width: 640px) {
.tab-button {
padding: 12px 8px;
font-size: 12px;
}
.tab-button i {
font-size: 16px;
}
nav[aria-label="Configuration Tabs"] {
overflow-x: auto;
white-space: nowrap;
padding-bottom: 8px;
}
nav[aria-label="Configuration Tabs"]::-webkit-scrollbar {
height: 4px;
}
nav[aria-label="Configuration Tabs"]::-webkit-scrollbar-track {
background: hsl(var(--b2));
}
nav[aria-label="Configuration Tabs"]::-webkit-scrollbar-thumb {
background: hsl(var(--bc) / 0.3);
border-radius: 2px;
}
}
/* Dark mode specific styling */
[data-theme="dark"] .tab-button:not(.active) {
color: hsl(var(--bc) / 0.6);
}
[data-theme="dark"] .tab-button:not(.active):hover {
color: hsl(var(--bc) / 0.9);
border-color: hsl(var(--bc) / 0.3);
}
/* Light mode specific styling */
[data-theme="light"] .tab-button:not(.active) {
color: hsl(var(--bc) / 0.7);
}
[data-theme="light"] .tab-button:not(.active):hover {
color: hsl(var(--bc) / 0.9);
border-color: hsl(var(--bc) / 0.4);
}
/* Enhanced border styling */
.tab-button {
border-bottom-width: 2px;
border-bottom-style: solid;
}
/* Make sure the nav container has proper spacing */
nav[aria-label="Configuration Tabs"] {
display: flex;
gap: 2rem;
min-height: 60px;
align-items: end;
}
/* Icon and text alignment */
.tab-button .flex {
align-items: center;
justify-content: center;
gap: 0.5rem;
}
/* Focus states for accessibility */
.tab-button:focus {
outline: 2px solid hsl(var(--p));
outline-offset: 2px;
border-radius: 4px;
}
.tab-button:focus:not(:focus-visible) {
outline: none;
}

View File

@@ -3,8 +3,12 @@ package web
import (
"fmt"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"io"
"mime/multipart"
"net/http"
"strings"
"sync"
"time"
"encoding/json"
@@ -28,6 +32,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
return
}
_store := store.Get()
cfg := config.Get()
results := make([]*store.ImportRequest, 0)
errs := make([]string, 0)
@@ -37,8 +42,8 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
debridName := r.FormValue("debrid")
callbackUrl := r.FormValue("callbackUrl")
downloadFolder := r.FormValue("downloadFolder")
if downloadFolder == "" {
downloadFolder = config.Get().QBitTorrent.DownloadFolder
if downloadFolder == "" && cfg.QBitTorrent != nil {
downloadFolder = cfg.QBitTorrent.DownloadFolder
}
downloadUncached := r.FormValue("downloadUncached") == "true"
@@ -236,8 +241,6 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
currentConfig.RemoveStalledAfter = updatedConfig.RemoveStalledAfter
currentConfig.AllowedExt = updatedConfig.AllowedExt
currentConfig.DiscordWebhook = updatedConfig.DiscordWebhook
// Should this be added?
currentConfig.URLBase = updatedConfig.URLBase
currentConfig.BindAddress = updatedConfig.BindAddress
currentConfig.Port = updatedConfig.Port
@@ -251,9 +254,11 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
// Update Debrids
if len(updatedConfig.Debrids) > 0 {
currentConfig.Debrids = updatedConfig.Debrids
// Clear legacy single debrid if using array
}
currentConfig.Usenet = updatedConfig.Usenet
currentConfig.SABnzbd = updatedConfig.SABnzbd
// Update Arrs through the service
storage := store.Get()
arrStorage := storage.Arr()
@@ -326,28 +331,6 @@ func (wb *Web) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (wb *Web) handleProcessRepairJobItems(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
if id == "" {
http.Error(w, "No job ID provided", http.StatusBadRequest)
return
}
var req struct {
Items map[string][]int `json:"items"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body: "+err.Error(), http.StatusBadRequest)
return
}
_store := store.Get()
if err := _store.Repair().ProcessJobItems(id, req.Items); err != nil {
wb.logger.Error().Err(err).Msg("Failed to process repair job items")
http.Error(w, "Failed to process job items: "+err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
func (wb *Web) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) {
// Read ids from body
var req struct {
@@ -381,3 +364,198 @@ func (wb *Web) handleStopRepairJob(w http.ResponseWriter, r *http.Request) {
}
w.WriteHeader(http.StatusOK)
}
// NZB API Handlers
func (wb *Web) handleGetNZBs(w http.ResponseWriter, r *http.Request) {
// Get query parameters for filtering
status := r.URL.Query().Get("status")
category := r.URL.Query().Get("category")
nzbs := wb.usenet.Store().GetQueue()
// Apply filters if provided
filteredNZBs := make([]*usenet.NZB, 0)
for _, nzb := range nzbs {
if status != "" && nzb.Status != status {
continue
}
if category != "" && nzb.Category != category {
continue
}
filteredNZBs = append(filteredNZBs, nzb)
}
response := map[string]interface{}{
"nzbs": filteredNZBs,
"count": len(filteredNZBs),
}
request.JSONResponse(w, response, http.StatusOK)
}
func (wb *Web) handleDeleteNZB(w http.ResponseWriter, r *http.Request) {
nzbID := chi.URLParam(r, "id")
if nzbID == "" {
http.Error(w, "No NZB ID provided", http.StatusBadRequest)
return
}
wb.usenet.Store().RemoveFromQueue(nzbID)
wb.logger.Info().Str("nzb_id", nzbID).Msg("NZB delete requested")
request.JSONResponse(w, map[string]string{"status": "success"}, http.StatusOK)
}
func (wb *Web) handleAddNZBContent(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
cfg := config.Get()
_store := store.Get()
if err := r.ParseMultipartForm(32 << 20); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
results := make([]interface{}, 0)
errs := make([]string, 0)
arrName := r.FormValue("arr")
action := r.FormValue("action")
downloadFolder := r.FormValue("downloadFolder")
if downloadFolder == "" {
downloadFolder = cfg.SABnzbd.DownloadFolder
}
_arr := _store.Arr().Get(arrName)
if _arr == nil {
// These are not found in the config. They are throwaway arrs.
_arr = arr.New(arrName, "", "", false, false, nil, "", "")
}
_nzbURLS := r.FormValue("nzbUrls")
urlList := make([]string, 0)
if _nzbURLS != "" {
for _, u := range strings.Split(_nzbURLS, "\n") {
if trimmed := strings.TrimSpace(u); trimmed != "" {
urlList = append(urlList, trimmed)
}
}
}
files := r.MultipartForm.File["nzbFiles"]
totalItems := len(files) + len(urlList)
if totalItems == 0 {
request.JSONResponse(w, map[string]any{
"results": nil,
"errors": "No NZB URLs or files provided",
}, http.StatusBadRequest)
return
}
var wg sync.WaitGroup
for _, url := range urlList {
wg.Add(1)
go func(url string) {
defer wg.Done()
select {
case <-ctx.Done():
return // Exit if context is done
default:
}
if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") {
errs = append(errs, fmt.Sprintf("Invalid URL format: %s", url))
return
}
// Download the NZB file from the URL
filename, content, err := utils.DownloadFile(url)
if err != nil {
wb.logger.Error().Err(err).Str("url", url).Msg("Failed to download NZB from URL")
errs = append(errs, fmt.Sprintf("Failed to download NZB from URL %s: %v", url, err))
return // Continue processing other URLs
}
req := &usenet.ProcessRequest{
NZBContent: content,
Name: filename,
Arr: _arr,
Action: action,
DownloadDir: downloadFolder,
}
nzb, err := wb.usenet.ProcessNZB(ctx, req)
if err != nil {
errs = append(errs, fmt.Sprintf("Failed to process NZB from URL %s: %v", url, err))
return
}
wb.logger.Info().Str("nzb_id", nzb.ID).Str("url", url).Msg("NZB added from URL")
result := map[string]interface{}{
"id": nzb.ID,
"name": "NZB from URL",
"url": url,
"category": arrName,
}
results = append(results, result)
}(url)
}
// Handle NZB files
for _, fileHeader := range files {
wg.Add(1)
go func(fileHeader *multipart.FileHeader) {
defer wg.Done()
select {
case <-ctx.Done():
return
default:
}
file, err := fileHeader.Open()
if err != nil {
errs = append(errs, fmt.Sprintf("failed to open NZB file %s: %v", fileHeader.Filename, err))
return
}
defer file.Close()
content, err := io.ReadAll(file)
if err != nil {
errs = append(errs, fmt.Sprintf("failed to read NZB file %s: %v", fileHeader.Filename, err))
return
}
req := &usenet.ProcessRequest{
NZBContent: content,
Name: fileHeader.Filename,
Arr: _arr,
Action: action,
DownloadDir: downloadFolder,
}
nzb, err := wb.usenet.ProcessNZB(ctx, req)
if err != nil {
errs = append(errs, fmt.Sprintf("failed to process NZB file %s: %v", fileHeader.Filename, err))
return
}
wb.logger.Info().Str("nzb_id", nzb.ID).Str("file", fileHeader.Filename).Msg("NZB added from file")
// Simulate successful addition
result := map[string]interface{}{
"id": nzb.ID,
"name": fileHeader.Filename,
"filename": fileHeader.Filename,
"category": arrName,
}
results = append(results, result)
}(fileHeader)
}
// Wait for all goroutines to finish
wg.Wait()
// Validation
if len(results) == 0 && len(errs) == 0 {
request.JSONResponse(w, map[string]any{
"results": nil,
"errors": "No NZB URLs or files processed successfully",
}, http.StatusBadRequest)
return
}
request.JSONResponse(w, struct {
Results []interface{} `json:"results"`
Errors []string `json:"errors,omitempty"`
}{
Results: results,
Errors: errs,
}, http.StatusOK)
}

View File

@@ -2,12 +2,26 @@ package web
import (
"github.com/go-chi/chi/v5"
"io/fs"
"net/http"
)
func (wb *Web) Routes() http.Handler {
r := chi.NewRouter()
// Load static files from embedded filesystem
staticFS, err := fs.Sub(assetsEmbed, "assets/build")
if err != nil {
panic(err)
}
imagesFS, err := fs.Sub(imagesEmbed, "assets/images")
if err != nil {
panic(err)
}
r.Handle("/assets/*", http.StripPrefix("/assets/", http.FileServer(http.FS(staticFS))))
r.Handle("/images/*", http.StripPrefix("/images/", http.FileServer(http.FS(imagesFS))))
r.Get("/login", wb.LoginHandler)
r.Post("/login", wb.LoginHandler)
r.Get("/register", wb.RegisterHandler)
@@ -28,12 +42,14 @@ func (wb *Web) Routes() http.Handler {
r.Post("/repair", wb.handleRepairMedia)
r.Get("/repair/jobs", wb.handleGetRepairJobs)
r.Post("/repair/jobs/{id}/process", wb.handleProcessRepairJob)
r.Post("/repair/jobs/{id}/process-items", wb.handleProcessRepairJobItems)
r.Post("/repair/jobs/{id}/stop", wb.handleStopRepairJob)
r.Delete("/repair/jobs", wb.handleDeleteRepairJob)
r.Get("/torrents", wb.handleGetTorrents)
r.Delete("/torrents/{category}/{hash}", wb.handleDeleteTorrent)
r.Delete("/torrents/", wb.handleDeleteTorrents)
r.Get("/nzbs", wb.handleGetNZBs)
r.Post("/nzbs/add", wb.handleAddNZBContent)
r.Delete("/nzbs/{id}", wb.handleDeleteNZB)
r.Get("/config", wb.handleGetConfig)
r.Post("/config", wb.handleUpdateConfig)
})

File diff suppressed because it is too large Load Diff

View File

@@ -1,185 +1,197 @@
{{ define "download" }}
<div class="container mt-4">
<div class="card">
<div class="card-header">
<h4 class="mb-0"><i class="bi bi-cloud-download me-2"></i>Add New Download</h4>
</div>
<div class="card-body">
<form id="downloadForm" enctype="multipart/form-data">
<div class="mb-2">
<label for="magnetURI" class="form-label">Torrent(s)</label>
<textarea class="form-control" id="magnetURI" name="urls" rows="8" placeholder="Paste your magnet links or torrent URLs here, one per line..."></textarea>
<div class="space-y-6">
<!-- Download Form -->
<div class="card bg-base-100 shadow-xl">
<div class="card-body">
<form id="downloadForm" enctype="multipart/form-data" class="space-y-3">
<!-- Mode Selection -->
<div class="flex justify-center mb-4">
<div class="join">
<button type="button" class="btn btn-primary join-item" id="torrentMode" data-mode="torrent">
<i class="bi bi-magnet mr-2"></i>Torrents
</button>
<button type="button" class="btn btn-outline join-item" id="nzbMode" data-mode="nzb">
<i class="bi bi-file-zip mr-2"></i>NZBs
</button>
</div>
</div>
<!-- Torrent Input Section -->
<div class="space-y-2" id="torrentInputs">
<div class="form-control">
<label class="label" for="magnetURI">
<span class="label-text font-semibold">
<i class="bi bi-magnet mr-2 text-primary"></i>Torrent Links
</span>
<span class="label-text-alt">Paste magnet links or URLs</span>
</label>
<textarea class="textarea textarea-bordered h-32 font-mono text-sm"
id="magnetURI"
name="urls"
placeholder="Paste your magnet links or torrent URLs here, one per line..."></textarea>
</div>
<div class="mb-3">
<input type="file" class="form-control" id="torrentFiles" name="torrents" multiple accept=".torrent,.magnet">
<div class="divider">OR</div>
<div class="form-control">
<label class="label">
<span class="label-text font-semibold">
<i class="bi bi-file-earmark-arrow-up mr-2 text-secondary"></i>Upload Torrent Files
</span>
<span class="label-text-alt">Select .torrent files</span>
</label>
<input type="file"
class="file-input file-input-bordered w-full"
id="torrentFiles"
name="torrents"
multiple
accept=".torrent">
<div class="label">
<span class="label-text-alt">
<i class="bi bi-info-circle mr-1"></i>You can select multiple files at once
</span>
</div>
</div>
</div>
<!-- NZB Input Section -->
<div class="space-y-2 hidden" id="nzbInputs">
<div class="form-control">
<label class="label" for="nzbURLs">
<span class="label-text font-semibold">
<i class="bi bi-link-45deg mr-2 text-primary"></i>NZB URLs
</span>
<span class="label-text-alt">Paste NZB download URLs</span>
</label>
<textarea class="textarea textarea-bordered h-32 font-mono text-sm"
id="nzbURLs"
name="nzbUrls"
placeholder="Paste your NZB URLs here, one per line..."></textarea>
</div>
<hr />
<div class="divider">OR</div>
<div class="row mb-3">
<div class="col">
<label for="downloadAction" class="form-label">Post Download Action</label>
<select class="form-select" id="downloadAction" name="downloadAction">
<option value="symlink" selected>Symlink</option>
<option value="download">Download</option>
<option value="none">None</option>
<div class="form-control">
<label class="label">
<span class="label-text font-semibold">
<i class="bi bi-file-earmark-arrow-up mr-2 text-secondary"></i>Upload NZB Files
</span>
<span class="label-text-alt">Select .nzb files</span>
</label>
<input type="file"
class="file-input file-input-bordered w-full"
id="nzbFiles"
name="nzbs"
multiple
accept=".nzb">
<div class="label">
<span class="label-text-alt">
<i class="bi bi-info-circle mr-1"></i>You can select multiple files at once
</span>
</div>
</div>
</div>
<div class="divider"></div>
<!-- Configuration Section -->
<div class="grid grid-cols-1 lg:grid-cols-2 gap-3">
<div class="space-y-2">
<h3 class="text-lg font-semibold flex items-center">
<i class="bi bi-gear mr-2 text-info"></i>Download Settings
</h3>
<div class="form-control">
<label class="label" for="downloadAction">
<span class="label-text">Post Download Action</span>
</label>
<select class="select select-bordered" id="downloadAction" name="downloadAction">
<option value="symlink" selected>Create Symlink</option>
<option value="download">Download Files</option>
<option value="none">No Action</option>
</select>
<small class="text-muted">Choose how to handle the added torrent (Default to symlinks)</small>
</div>
<div class="col">
<label for="downloadFolder" class="form-label">Download Folder</label>
<input type="text" class="form-control" id="downloadFolder" name="downloadFolder" placeholder="Enter Download Folder (e.g /downloads/torrents)">
<small class="text-muted">Default is your qbittorent download_folder</small>
</div>
<div class="col">
<label for="arr" class="form-label">Arr (if any)</label>
<input type="text" class="form-control" id="arr" name="arr" placeholder="Enter Category (e.g sonarr, radarr, radarr4k)">
<small class="text-muted">Optional, leave empty if not using Arr</small>
</div>
</div>
{{ if .HasMultiDebrid }}
<div class="row mb-3">
<div class="col-md-6">
<label for="debrid" class="form-label">Select Debrid</label>
<select class="form-select" id="debrid" name="debrid">
{{ range $index, $debrid := .Debrids }}
<option value="{{ $debrid }}" {{ if eq $index 0 }}selected{{end}}>{{ $debrid }}</option>
{{ end }}
</select>
<small class="text-muted">Select a debrid service to use for this download</small>
</div>
</div>
{{ end }}
<div class="row mb-3">
<div class="col-md-2 mb-3">
<div class="form-check d-inline-block">
<input type="checkbox" class="form-check-input" name="downloadUncached" id="downloadUncached">
<label class="form-check-label" for="downloadUncached">Download Uncached</label>
<div class="label">
<span class="label-text-alt">How to handle files after download completion</span>
</div>
</div>
<div class="form-control">
<label class="label" for="downloadFolder">
<span class="label-text">Download Folder</span>
</label>
<input type="text"
class="input input-bordered"
id="downloadFolder"
name="downloadFolder"
placeholder="/downloads/torrents">
<div class="label">
<span class="label-text-alt" id="downloadFolderHint">Leave empty to use default qBittorrent folder</span>
</div>
</div>
</div>
<button type="submit" class="btn btn-primary" id="submitDownload">
<i class="bi bi-cloud-upload me-2"></i>Add to Download Queue
<div class="space-y-2">
<h3 class="text-lg font-semibold flex items-center">
<i class="bi bi-tags mr-2 text-warning"></i>Categorization
</h3>
<div class="form-control">
<label class="label" for="arr">
<span class="label-text">Arr Category</span>
</label>
<input type="text"
class="input input-bordered"
id="arr"
name="arr"
placeholder="sonarr, radarr, etc.">
<div class="label">
<span class="label-text-alt">Optional: Specify which Arr service should handle this</span>
</div>
</div>
{{ if .HasMultiDebrid }}
<div class="form-control">
<label class="label" for="debrid">
<span class="label-text">Debrid Service</span>
</label>
<select class="select select-bordered" id="debrid" name="debrid">
{{ range $index, $debrid := .Debrids }}
<option value="{{ $debrid }}" {{ if eq $index 0 }}selected{{end}}>
{{ $debrid }}
</option>
{{ end }}
</select>
<div class="label">
<span class="label-text-alt">Choose which debrid service to use</span>
</div>
</div>
{{ end }}
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="downloadUncached" id="downloadUncached">
<div>
<span class="label-text font-medium">Download Uncached Content</span>
<div class="label-text-alt">Allow downloading of content not cached by debrid service</div>
</div>
</label>
</div>
</div>
<!-- Submit Button -->
<div class="form-control">
<button type="submit" class="btn btn-primary btn-lg" id="submitDownload">
<i class="bi bi-cloud-upload mr-2"></i><span id="submitButtonText">Add to Download Queue</span>
</button>
</form>
</div>
</div>
</form>
</div>
</div>
<script>
let downloadFolder = '{{ .DownloadFolder }}';
document.addEventListener('DOMContentLoaded', () => {
const loadSavedDownloadOptions = () => {
const savedCategory = localStorage.getItem('downloadCategory');
const savedAction = localStorage.getItem('downloadAction');
const savedDownloadUncached = localStorage.getItem('downloadUncached');
document.getElementById('arr').value = savedCategory || '';
document.getElementById('downloadAction').value = savedAction || 'symlink';
document.getElementById('downloadUncached').checked = savedDownloadUncached === 'true';
document.getElementById('downloadFolder').value = localStorage.getItem('downloadFolder') || downloadFolder || '';
};
const saveCurrentDownloadOptions = () => {
const arr = document.getElementById('arr').value;
const downloadAction = document.getElementById('downloadAction').value;
const downloadUncached = document.getElementById('downloadUncached').checked;
const downloadFolder = document.getElementById('downloadFolder').value;
localStorage.setItem('downloadCategory', arr);
localStorage.setItem('downloadAction', downloadAction);
localStorage.setItem('downloadUncached', downloadUncached.toString());
localStorage.setItem('downloadFolder', downloadFolder);
};
// Load the last used download options from local storage
loadSavedDownloadOptions();
// Handle form submission
document.getElementById('downloadForm').addEventListener('submit', async (e) => {
e.preventDefault();
const submitBtn = document.getElementById('submitDownload');
const originalText = submitBtn.innerHTML;
submitBtn.disabled = true;
submitBtn.innerHTML = '<span class="spinner-border spinner-border-sm me-2"></span>Adding...';
try {
const formData = new FormData();
// Add URLs if present
const urls = document.getElementById('magnetURI').value
.split('\n')
.map(url => url.trim())
.filter(url => url.length > 0);
if (urls.length > 0) {
formData.append('urls', urls.join('\n'));
}
// Add torrent files if present
const fileInput = document.getElementById('torrentFiles');
for (let i = 0; i < fileInput.files.length; i++) {
formData.append('files', fileInput.files[i]);
}
if (urls.length + fileInput.files.length === 0) {
createToast('Please submit at least one torrent', 'warning');
return;
}
if (urls.length + fileInput.files.length > 100) {
createToast('Please submit up to 100 torrents at a time', 'warning');
return;
}
formData.append('arr', document.getElementById('arr').value);
formData.append('downloadFolder', document.getElementById('downloadFolder').value);
formData.append('action', document.getElementById('downloadAction').value);
formData.append('downloadUncached', document.getElementById('downloadUncached').checked);
formData.append('debrid', document.getElementById('debrid') ? document.getElementById('debrid').value : '');
const response = await fetcher('/api/add', {
method: 'POST',
body: formData
});
const result = await response.json();
if (!response.ok) throw new Error(result.error || 'Unknown error');
if (result.errors && result.errors.length > 0) {
if (result.results.length > 0) {
createToast(`Added ${result.results.length} torrents with ${result.errors.length} errors:\n${result.errors.join('\n')}`, 'warning');
} else {
createToast(`Failed to add torrents:\n${result.errors.join('\n')}`, 'error');
}
} else {
createToast(`Successfully added ${result.results.length} torrents!`);
document.getElementById('magnetURI').value = '';
document.getElementById('torrentFiles').value = '';
}
} catch (error) {
createToast(`Error adding downloads: ${error.message}`, 'error');
} finally {
submitBtn.disabled = false;
submitBtn.innerHTML = originalText;
}
});
// Save the download options to local storage when they change
document.getElementById('arr').addEventListener('change', saveCurrentDownloadOptions);
document.getElementById('downloadAction').addEventListener('change', saveCurrentDownloadOptions);
// Read the URL parameters for a magnet link and add it to the download queue if found
const urlParams = new URLSearchParams(window.location.search);
const magnetURI = urlParams.get('magnet');
if (magnetURI) {
document.getElementById('magnetURI').value = magnetURI;
history.replaceState({}, document.title, window.location.pathname);
}
});
</script>
</div>
<script>
document.addEventListener('DOMContentLoaded', () => {
let downloadFolder = "{{ .DownloadFolder }}" || '';
window.downloadManager = new DownloadManager(downloadFolder);
});
</script>
{{ end }}

View File

@@ -1,25 +1,50 @@
{{ define "index" }}
<div class="container mt-4">
<div class="card">
<div class="card-header d-flex justify-content-between align-items-center gap-4">
<h4 class="mb-0 text-nowrap"><i class="bi bi-table me-2"></i>Active Torrents</h4>
<div class="d-flex align-items-center overflow-auto" style="flex-wrap: nowrap; gap: 0.5rem;">
<button class="btn btn-outline-danger btn-sm" id="batchDeleteBtn" style="display: none; flex-shrink: 0;">
<i class="bi bi-trash me-1"></i>Delete Selected
<div class="space-y-6">
<!-- Controls Section -->
<div class="card bg-base-100 shadow-xl">
<div class="card-body">
<!-- Dashboard Mode Toggle -->
<div class="flex justify-center mb-4">
<div class="join">
<button class="btn btn-primary join-item" id="torrentsMode" data-mode="torrents">
<i class="bi bi-magnet mr-2"></i>Torrents
</button>
<button class="btn btn-outline-secondary btn-sm me-2" id="refreshBtn" style="flex-shrink: 0;">
<i class="bi bi-arrow-clockwise me-1"></i>Refresh
<button class="btn btn-outline join-item" id="nzbsMode" data-mode="nzbs">
<i class="bi bi-file-zip mr-2"></i>NZBs
</button>
<select class="form-select form-select-sm d-inline-block w-auto me-2" id="stateFilter" style="flex-shrink: 0;">
</div>
</div>
<div class="flex flex-col lg:flex-row justify-between items-start lg:items-center gap-4">
<!-- Batch Actions -->
<div class="flex items-center gap-2">
<button class="btn btn-secondary btn-sm hidden" id="batchDeleteBtn">
<i class="bi bi-trash"></i>
<span class="hidden sm:inline">Delete Selected</span>
</button>
<button class="btn btn-error btn-sm hidden" id="batchDeleteDebridBtn">
<i class="bi bi-cloud-fog-fill"></i>
<span class="hidden sm:inline">Remove From Debrid</span>
</button>
<button class="btn btn-outline btn-sm" id="refreshBtn">
<i class="bi bi-arrow-clockwise"></i>
<span class="hidden sm:inline">Refresh</span>
</button>
</div>
<!-- Filters -->
<div class="flex flex-wrap items-center gap-2 w-full lg:w-auto">
<select class="select select-bordered select-sm w-full sm:w-auto min-w-32" id="stateFilter">
<option value="">All States</option>
<option value="pausedUP">PausedUP(Completed)</option>
<option value="pausedUP">Completed</option>
<option value="downloading">Downloading</option>
<option value="error">Error</option>
</select>
<select class="form-select form-select-sm d-inline-block w-auto" id="categoryFilter">
<select class="select select-bordered select-sm w-full sm:w-auto min-w-32" id="categoryFilter">
<option value="">All Categories</option>
</select>
<select class="form-select form-select-sm d-inline-block w-auto" id="sortSelector" style="flex-shrink: 0;">
<select class="select select-bordered select-sm w-full sm:w-auto min-w-48" id="sortSelector">
<option value="added_on" selected>Date Added (Newest First)</option>
<option value="added_on_asc">Date Added (Oldest First)</option>
<option value="name_asc">Name (A-Z)</option>
@@ -31,470 +56,159 @@
</select>
</div>
</div>
<div class="card-body p-0">
<div class="table-responsive">
<table class="table table-hover mb-0">
<thead>
<tr>
<th>
<input type="checkbox" class="form-check-input" id="selectAll">
</th>
<th>Name</th>
<th>Size</th>
<th>Progress</th>
<th>Speed</th>
<th>Category</th>
<th>Debrid</th>
<th>State</th>
<th>Actions</th>
</tr>
</thead>
<tbody id="torrentsList">
</tbody>
</table>
</div>
<div class="d-flex justify-content-between align-items-center p-3 border-top">
<div class="pagination-info">
<span id="paginationInfo">Showing 0-0 of 0 torrents</span>
</div>
<nav aria-label="Torrents pagination">
<ul class="pagination pagination-sm m-0" id="paginationControls"></ul>
</nav>
</div>
</div>
<!-- Data Table -->
<div class="card bg-base-100 shadow-xl">
<div class="card-body p-0">
<div class="overflow-x-auto">
<table class="table table-hover">
<!-- Torrents Headers -->
<thead class="bg-base-200" id="torrentsHeaders">
<tr>
<th class="w-12">
<label class="cursor-pointer">
<input type="checkbox" class="checkbox checkbox-sm" id="selectAll">
</label>
</th>
<th class="font-semibold">
<i class="bi bi-file-text mr-2"></i>Name
</th>
<th class="font-semibold">
<i class="bi bi-hdd mr-2"></i>Size
</th>
<th class="font-semibold">
<i class="bi bi-speedometer2 mr-2"></i>Progress
</th>
<th class="font-semibold">
<i class="bi bi-download mr-2"></i>Speed
</th>
<th class="font-semibold">
<i class="bi bi-tag mr-2"></i>Category
</th>
<th class="font-semibold">
<i class="bi bi-cloud mr-2"></i>Debrid
</th>
<th class="font-semibold">
<i class="bi bi-people mr-2"></i>Seeders
</th>
<th class="font-semibold">
<i class="bi bi-activity mr-2"></i>State
</th>
<th class="font-semibold w-32">Actions</th>
</tr>
</thead>
<!-- NZBs Headers -->
<thead class="bg-base-200 hidden" id="nzbsHeaders">
<tr>
<th class="w-12">
<label class="cursor-pointer">
<input type="checkbox" class="checkbox checkbox-sm" id="selectAllNzb">
</label>
</th>
<th class="font-semibold">
<i class="bi bi-file-zip mr-2"></i>Name
</th>
<th class="font-semibold">
<i class="bi bi-hdd mr-2"></i>Size
</th>
<th class="font-semibold">
<i class="bi bi-speedometer2 mr-2"></i>Progress
</th>
<th class="font-semibold">
<i class="bi bi-clock mr-2"></i>ETA
</th>
<th class="font-semibold">
<i class="bi bi-tag mr-2"></i>Category
</th>
<th class="font-semibold">
<i class="bi bi-activity mr-2"></i>Status
</th>
<th class="font-semibold">
<i class="bi bi-calendar mr-2"></i>Age
</th>
<th class="font-semibold w-32">Actions</th>
</tr>
</thead>
<tbody id="dataList">
<!-- Dynamic content will be loaded here -->
</tbody>
</table>
</div>
<!-- Pagination -->
<div class="flex flex-col sm:flex-row justify-between items-center p-6 border-t border-base-200 gap-4">
<div class="text-sm text-base-content/70">
<span id="paginationInfo">Loading data...</span>
</div>
<div class="join" id="paginationControls"></div>
</div>
</div>
</div>
<!-- Context menu for torrent rows -->
<div class="dropdown-menu context-menu shadow" id="torrentContextMenu">
<h6 class="dropdown-header torrent-name text-truncate"></h6>
<div class="dropdown-divider"></div>
<button class="dropdown-item" data-action="copy-magnet">
<i class="bi bi-magnet me-2"></i>Copy Magnet Link
</button>
<button class="dropdown-item" data-action="copy-name">
<i class="bi bi-copy me-2"></i>Copy Name
</button>
<div class="dropdown-divider"></div>
<button class="dropdown-item text-danger" data-action="delete">
<i class="bi bi-trash me-2"></i>Delete
</button>
<!-- Empty State -->
<div class="card bg-base-100 shadow-xl hidden" id="emptyState">
<div class="card-body text-center py-16">
<div class="text-6xl text-base-content/30 mb-4">
<i class="bi bi-inbox"></i>
</div>
<h3 class="text-2xl font-bold mb-2" id="emptyStateTitle">No Data Found</h3>
<p class="text-base-content/70 mb-6" id="emptyStateMessage">No downloads found.</p>
<a href="{{.URLBase}}download" class="btn btn-primary">
<i class="bi bi-plus-circle mr-2"></i>Add New Download
</a>
</div>
</div>
</div>
<script>
let refs = {
torrentsList: document.getElementById('torrentsList'),
categoryFilter: document.getElementById('categoryFilter'),
stateFilter: document.getElementById('stateFilter'),
sortSelector: document.getElementById('sortSelector'),
selectAll: document.getElementById('selectAll'),
batchDeleteBtn: document.getElementById('batchDeleteBtn'),
refreshBtn: document.getElementById('refreshBtn'),
torrentContextMenu: document.getElementById('torrentContextMenu'),
paginationControls: document.getElementById('paginationControls'),
paginationInfo: document.getElementById('paginationInfo')
};
let state = {
torrents: [],
selectedTorrents: new Set(),
categories: new Set(),
states: new Set('downloading', 'pausedUP', 'error'),
selectedCategory: refs.categoryFilter?.value || '',
selectedState: refs.stateFilter?.value || '',
selectedTorrentContextMenu: null,
sortBy: refs.sortSelector?.value || 'added_on',
itemsPerPage: 20,
currentPage: 1
};
<!-- Torrent Context Menu -->
<ul class="menu bg-base-100 shadow-lg rounded-box context-menu hidden fixed z-50" id="torrentContextMenu">
<li class="menu-title">
<span class="torrent-name text-sm font-bold truncate max-w-48"></span>
</li>
<hr/>
<li><a class="menu-item text-sm" data-action="copy-magnet">
<i class="bi bi-magnet text-primary"></i>Copy Magnet Link
</a></li>
<li><a class="menu-item text-sm" data-action="copy-name">
<i class="bi bi-clipboard text-info"></i>Copy Name
</a></li>
<hr/>
<li><a class="menu-item text-sm text-error" data-action="delete">
<i class="bi bi-trash"></i>Delete Torrent
</a></li>
</ul>
const torrentRowTemplate = (torrent) => `
<tr data-hash="${torrent.hash}" data-magnet="${torrent.magnet || ''}" data-name="${torrent.name}">
<td>
<input type="checkbox" class="form-check-input torrent-select" data-hash="${torrent.hash}" ${state.selectedTorrents.has(torrent.hash) ? 'checked' : ''}>
</td>
<td class="text-nowrap text-truncate overflow-hidden" style="max-width: 350px;" title="${torrent.name}">${torrent.name}</td>
<td class="text-nowrap">${formatBytes(torrent.size)}</td>
<td style="min-width: 150px;">
<div class="progress" style="height: 8px;">
<div class="progress-bar" role="progressbar"
style="width: ${(torrent.progress * 100).toFixed(1)}%"
aria-valuenow="${(torrent.progress * 100).toFixed(1)}"
aria-valuemin="0"
aria-valuemax="100"></div>
</div>
<small class="text-muted">${(torrent.progress * 100).toFixed(1)}%</small>
</td>
<td>${formatSpeed(torrent.dlspeed)}</td>
<td><span class="badge bg-secondary">${torrent.category || 'None'}</span></td>
<td>${torrent.debrid || 'None'}</td>
<td><span class="badge ${getStateColor(torrent.state)}">${torrent.state}</span></td>
<td>
<button class="btn btn-sm btn-outline-danger" onclick="deleteTorrent('${torrent.hash}', '${torrent.category || ''}', false)">
<i class="bi bi-trash"></i>
</button>
${torrent.debrid && torrent.id ? `
<button class="btn btn-sm btn-outline-danger" onclick="deleteTorrent('${torrent.hash}', '${torrent.category || ''}', true)">
<i class="bi bi-trash"></i> Remove from Debrid
</button>
` : ''}
</td>
</tr>
`;
<!-- NZB Context Menu -->
<ul class="menu bg-base-100 shadow-lg rounded-box context-menu hidden fixed z-50" id="nzbContextMenu">
<li class="menu-title">
<span class="nzb-name text-sm font-bold truncate max-w-48"></span>
</li>
<hr/>
<li><a class="menu-item text-sm" data-action="pause">
<i class="bi bi-pause text-warning"></i>Pause Download
</a></li>
<li><a class="menu-item text-sm" data-action="resume">
<i class="bi bi-play text-success"></i>Resume Download
</a></li>
<li><a class="menu-item text-sm" data-action="retry">
<i class="bi bi-arrow-clockwise text-info"></i>Retry Download
</a></li>
<li><a class="menu-item text-sm" data-action="copy-name">
<i class="bi bi-clipboard text-info"></i>Copy Name
</a></li>
<hr/>
<li><a class="menu-item text-sm text-error" data-action="delete">
<i class="bi bi-trash"></i>Delete NZB
</a></li>
</ul>
function formatBytes(bytes) {
if (!bytes) return '0 B';
const k = 1024;
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return `${parseFloat((bytes / Math.pow(k, i)).toFixed(2))} ${sizes[i]}`;
}
function formatSpeed(speed) {
return `${formatBytes(speed)}/s`;
}
function getStateColor(state) {
const stateColors = {
'downloading': 'bg-primary',
'pausedup': 'bg-success',
'error': 'bg-danger',
};
return stateColors[state?.toLowerCase()] || 'bg-secondary';
}
function updateUI() {
// Filter torrents by selected category and state
let filteredTorrents = state.torrents;
if (state.selectedCategory) {
filteredTorrents = filteredTorrents.filter(t => t.category === state.selectedCategory);
}
if (state.selectedState) {
filteredTorrents = filteredTorrents.filter(t => t.state === state.selectedState);
}
// Sort the filtered torrents
filteredTorrents = sortTorrents(filteredTorrents, state.sortBy);
const totalPages = Math.ceil(filteredTorrents.length / state.itemsPerPage);
if (state.currentPage > totalPages && totalPages > 0) {
state.currentPage = totalPages;
}
const paginatedTorrents = paginateTorrents(filteredTorrents);
// Update the torrents list table
refs.torrentsList.innerHTML = paginatedTorrents.map(torrent => torrentRowTemplate(torrent)).join('');
// Update the category filter dropdown
const currentCategories = Array.from(state.categories).sort();
const categoryOptions = ['<option value="">All Categories</option>']
.concat(currentCategories.map(cat =>
`<option value="${cat}" ${cat === state.selectedCategory ? 'selected' : ''}>${cat}</option>`
));
refs.categoryFilter.innerHTML = categoryOptions.join('');
// Clean up selected torrents that no longer exist
state.selectedTorrents = new Set(
Array.from(state.selectedTorrents)
.filter(hash => filteredTorrents.some(t => t.hash === hash))
);
// Update batch delete button visibility
refs.batchDeleteBtn.style.display = state.selectedTorrents.size > 0 ? '' : 'none';
// Update the select all checkbox state
refs.selectAll.checked = filteredTorrents.length > 0 && filteredTorrents.every(torrent => state.selectedTorrents.has(torrent.hash));
}
async function loadTorrents() {
try {
const response = await fetcher('/api/torrents');
const torrents = await response.json();
state.torrents = torrents;
state.categories = new Set(torrents.map(t => t.category).filter(Boolean));
updateUI();
} catch (error) {
console.error('Error loading torrents:', error);
}
}
function sortTorrents(torrents, sortBy) {
// Create a copy of the array to avoid mutating the original
const result = [...torrents];
// Parse the sort value to determine field and direction
const [field, direction] = sortBy.includes('_asc') || sortBy.includes('_desc')
? [sortBy.split('_').slice(0, -1).join('_'), sortBy.endsWith('_asc') ? 'asc' : 'desc']
: [sortBy, 'desc']; // Default to descending if not specified
result.sort((a, b) => {
let valueA, valueB;
// Get values based on field
switch (field) {
case 'name':
valueA = a.name?.toLowerCase() || '';
valueB = b.name?.toLowerCase() || '';
break;
case 'size':
valueA = a.size || 0;
valueB = b.size || 0;
break;
case 'progress':
valueA = a.progress || 0;
valueB = b.progress || 0;
break;
case 'added_on':
valueA = a.added_on || 0;
valueB = b.added_on || 0;
break;
default:
valueA = a[field] || 0;
valueB = b[field] || 0;
}
// Compare based on type
if (typeof valueA === 'string') {
return direction === 'asc'
? valueA.localeCompare(valueB)
: valueB.localeCompare(valueA);
} else {
return direction === 'asc'
? valueA - valueB
: valueB - valueA;
}
});
return result;
}
async function deleteTorrent(hash, category, removeFromDebrid = false) {
if (!confirm('Are you sure you want to delete this torrent?')) return;
try {
await fetcher(`/api/torrents/${category}/${hash}?removeFromDebrid=${removeFromDebrid}`, {
method: 'DELETE'
});
await loadTorrents();
createToast('Torrent deleted successfully');
} catch (error) {
console.error('Error deleting torrent:', error);
createToast('Failed to delete torrent', 'error');
}
}
async function deleteSelectedTorrents() {
if (!confirm(`Are you sure you want to delete ${state.selectedTorrents.size} selected torrents?`)) return;
try {
// COmma separated list of hashes
const hashes = Array.from(state.selectedTorrents).join(',');
await fetcher(`/api/torrents/?hashes=${encodeURIComponent(hashes)}`, {
method: 'DELETE'
});
await loadTorrents();
createToast('Selected torrents deleted successfully');
} catch (error) {
console.error('Error deleting torrents:', error);
createToast('Failed to delete some torrents' , 'error');
}
}
function paginateTorrents(torrents) {
const totalItems = torrents.length;
const totalPages = Math.ceil(totalItems / state.itemsPerPage);
const startIndex = (state.currentPage - 1) * state.itemsPerPage;
const endIndex = Math.min(startIndex + state.itemsPerPage, totalItems);
// Update pagination info text
refs.paginationInfo.textContent =
`Showing ${totalItems > 0 ? startIndex + 1 : 0}-${endIndex} of ${totalItems} torrents`;
// Generate pagination controls
refs.paginationControls.innerHTML = '';
if (totalPages <= 1) {
return torrents.slice(startIndex, endIndex);
}
// Previous button
const prevLi = document.createElement('li');
prevLi.className = `page-item ${state.currentPage === 1 ? 'disabled' : ''}`;
prevLi.innerHTML = `
<a class="page-link" href="#" aria-label="Previous" ${state.currentPage === 1 ? 'tabindex="-1" aria-disabled="true"' : ''}>
<span aria-hidden="true">&laquo;</span>
</a>
`;
if (state.currentPage > 1) {
prevLi.querySelector('a').addEventListener('click', (e) => {
e.preventDefault();
state.currentPage--;
updateUI();
});
}
refs.paginationControls.appendChild(prevLi);
// Page numbers
const maxPageButtons = 5;
let startPage = Math.max(1, state.currentPage - Math.floor(maxPageButtons / 2));
let endPage = Math.min(totalPages, startPage + maxPageButtons - 1);
if (endPage - startPage + 1 < maxPageButtons) {
startPage = Math.max(1, endPage - maxPageButtons + 1);
}
for (let i = startPage; i <= endPage; i++) {
const pageLi = document.createElement('li');
pageLi.className = `page-item ${i === state.currentPage ? 'active' : ''}`;
pageLi.innerHTML = `<a class="page-link" href="#">${i}</a>`;
pageLi.querySelector('a').addEventListener('click', (e) => {
e.preventDefault();
state.currentPage = i;
updateUI();
});
refs.paginationControls.appendChild(pageLi);
}
// Next button
const nextLi = document.createElement('li');
nextLi.className = `page-item ${state.currentPage === totalPages ? 'disabled' : ''}`;
nextLi.innerHTML = `
<a class="page-link" href="#" aria-label="Next" ${state.currentPage === totalPages ? 'tabindex="-1" aria-disabled="true"' : ''}>
<span aria-hidden="true">&raquo;</span>
</a>
`;
if (state.currentPage < totalPages) {
nextLi.querySelector('a').addEventListener('click', (e) => {
e.preventDefault();
state.currentPage++;
updateUI();
});
}
refs.paginationControls.appendChild(nextLi);
return torrents.slice(startIndex, endIndex);
}
document.addEventListener('DOMContentLoaded', () => {
loadTorrents();
const refreshInterval = setInterval(loadTorrents, 5000);
refs.refreshBtn.addEventListener('click', loadTorrents);
refs.batchDeleteBtn.addEventListener('click', deleteSelectedTorrents);
refs.selectAll.addEventListener('change', (e) => {
const filteredTorrents = state.torrents.filter(t => {
if (state.selectedCategory && t.category !== state.selectedCategory) return false;
if (state.selectedState && t.state?.toLowerCase() !== state.selectedState.toLowerCase()) return false;
return true;
});
if (e.target.checked) {
filteredTorrents.forEach(torrent => state.selectedTorrents.add(torrent.hash));
} else {
filteredTorrents.forEach(torrent => state.selectedTorrents.delete(torrent.hash));
}
updateUI();
});
refs.torrentsList.addEventListener('change', (e) => {
if (e.target.classList.contains('torrent-select')) {
const hash = e.target.dataset.hash;
if (e.target.checked) {
state.selectedTorrents.add(hash);
} else {
state.selectedTorrents.delete(hash);
}
updateUI();
}
});
refs.categoryFilter.addEventListener('change', (e) => {
state.selectedCategory = e.target.value;
state.currentPage = 1; // Reset to first page
updateUI();
});
refs.stateFilter.addEventListener('change', (e) => {
state.selectedState = e.target.value;
state.currentPage = 1; // Reset to first page
updateUI();
});
refs.sortSelector.addEventListener('change', (e) => {
state.sortBy = e.target.value;
state.currentPage = 1; // Reset to first page
updateUI();
});
window.addEventListener('beforeunload', () => {
clearInterval(refreshInterval);
});
document.addEventListener('click', (e) => {
if (!refs.torrentContextMenu.contains(e.target)) {
refs.torrentContextMenu.style.display = 'none';
}
});
refs.torrentsList.addEventListener('contextmenu', (e) => {
const row = e.target.closest('tr');
if (!row) return;
e.preventDefault();
state.selectedTorrentContextMenu = row.dataset.hash;
refs.torrentContextMenu.querySelector('.torrent-name').textContent = row.dataset.name;
refs.torrentContextMenu.style.display = 'block';
const { pageX, pageY } = e;
const { clientWidth, clientHeight } = document.documentElement;
const { offsetWidth, offsetHeight } = refs.torrentContextMenu;
refs.torrentContextMenu.style.maxWidth = `${clientWidth - 72}px`;
refs.torrentContextMenu.style.left = `${Math.min(pageX, clientWidth - offsetWidth - 5)}px`;
refs.torrentContextMenu.style.top = `${Math.min(pageY, clientHeight - offsetHeight - 5)}px`;
});
refs.torrentContextMenu.addEventListener('click', async (e) => {
const action = e.target.closest('[data-action]')?.dataset.action;
if (!action) return;
const actions = {
'copy-magnet': async (torrent) => {
try {
await navigator.clipboard.writeText(`magnet:?xt=urn:btih:${torrent.hash}`);
createToast('Magnet link copied to clipboard');
} catch (error) {
console.error('Error copying magnet link:', error);
createToast('Failed to copy magnet link', 'error');
}
},
'copy-name': async (torrent) => {
try {
await navigator.clipboard.writeText(torrent.name);
createToast('Torrent name copied to clipboard');
} catch (error) {
console.error('Error copying torrent name:', error);
createToast('Failed to copy torrent name', 'error');
}
},
'delete': async (torrent) => {
await deleteTorrent(torrent.hash, torrent.category || '', false);
}
};
const torrent = state.torrents.find(t => t.hash === state.selectedTorrentContextMenu);
if (torrent && actions[action]) {
await actions[action](torrent);
refs.torrentContextMenu.style.display = 'none';
}
});
});
</script>
<script>
document.addEventListener('DOMContentLoaded', () => {
window.dashboard = new Dashboard();
});
</script>
{{ end }}

View File

@@ -1,463 +1,198 @@
{{ define "layout" }}
<!DOCTYPE html>
<html lang="en" data-bs-theme="light">
<html lang="en" data-theme="dark">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Decypharr - {{.Title}}</title>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.7.2/font/bootstrap-icons.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/css/select2.min.css" rel="stylesheet"/>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/select2-bootstrap-5-theme@1.3.0/dist/select2-bootstrap-5-theme.min.css"/>
<style>
:root {
--primary-color: #2563eb;
--secondary-color: #1e40af;
--bg-color: #f8fafc;
--card-bg: #ffffff;
--text-color: #333333;
--card-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
--nav-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
--border-color: #e5e7eb;
}
[data-bs-theme="dark"] {
--primary-color: #3b82f6;
--secondary-color: #60a5fa;
--bg-color: #1e293b;
--card-bg: #283548;
--text-color: #e5e7eb;
--card-shadow: 0 4px 6px rgba(0, 0, 0, 0.3);
--nav-shadow: 0 2px 4px rgba(0, 0, 0, 0.3);
--border-color: #4b5563;
}
<link href="{{.URLBase}}assets/css/styles.css" rel="stylesheet" type="text/css" />
body {
background-color: var(--bg-color);
color: var(--text-color);
transition: background-color 0.3s ease, color 0.3s ease;
display: flex;
flex-direction: column;
min-height: 100vh;
}
<link rel="apple-touch-icon" sizes="180x180" href="{{.URLBase}}images/favicon/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="{{.URLBase}}images/favicon/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="{{.URLBase}}images/favicon/favicon-16x16.png">
<link rel="manifest" href="{{.URLBase}}images/favicon/site.webmanifest">
footer {
background-color: var(--bg-color);
border-top: 1px solid var(--border-color);
}
<!-- Preload JavaScript -->
<link rel="preload" href="{{.URLBase}}assets/js/common.js" as="script">
footer a {
color: var(--text-color);
}
footer a:hover {
color: var(--primary-color);
}
.navbar {
padding: 1rem 0;
background: var(--card-bg) !important;
box-shadow: var(--nav-shadow);
border-bottom: 1px solid var(--border-color);
}
.navbar-brand {
color: var(--primary-color) !important;
font-weight: 700;
font-size: 1.5rem;
}
.card {
border: none;
border-radius: 10px;
box-shadow: var(--card-shadow);
background-color: var(--card-bg);
}
.nav-link {
padding: 0.5rem 1rem;
color: var(--text-color);
}
.nav-link.active {
color: var(--primary-color) !important;
font-weight: 500;
}
.table {
color: var(--text-color);
}
/* Dark mode specific overrides */
[data-bs-theme="dark"] .navbar-light .navbar-toggler-icon {
filter: invert(1);
}
[data-bs-theme="dark"] .form-control,
[data-bs-theme="dark"] .form-select {
background-color: #374151;
color: #e5e7eb;
border-color: #4b5563;
}
[data-bs-theme="dark"] .form-control:focus,
[data-bs-theme="dark"] .form-select:focus {
border-color: var(--primary-color);
}
/* Theme toggle button styles */
.theme-toggle {
cursor: pointer;
padding: 0.5rem;
border-radius: 50%;
width: 38px;
height: 38px;
display: flex;
align-items: center;
justify-content: center;
transition: background-color 0.3s;
}
.theme-toggle:hover {
background-color: rgba(128, 128, 128, 0.2);
}
.password-toggle-container {
position: relative;
}
.password-toggle-btn {
position: absolute;
right: 10px;
top: 50%;
transform: translateY(-50%);
background: none;
border: none;
color: #6c757d;
cursor: pointer;
padding: 0;
z-index: 10;
}
.password-toggle-btn:hover {
color: #495057;
}
.form-control.has-toggle {
padding-right: 35px;
}
textarea.has-toggle {
-webkit-text-security: disc;
text-security: disc;
font-family: monospace !important;
}
textarea.has-toggle[data-password-visible="true"] {
-webkit-text-security: none;
text-security: none;
}
/* Adjust toggle button position for textareas */
.password-toggle-container textarea.has-toggle ~ .password-toggle-btn {
top: 20px;
}
</style>
<script>
// Early theme detection to prevent FOUC
(function() {
const savedTheme = localStorage.getItem('theme');
if (savedTheme) {
document.documentElement.setAttribute('data-bs-theme', savedTheme);
document.documentElement.setAttribute('data-theme', savedTheme);
} else if (window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches) {
document.documentElement.setAttribute('data-bs-theme', 'dark');
document.documentElement.setAttribute('data-theme', 'dark');
} else {
document.documentElement.setAttribute('data-bs-theme', 'light');
document.documentElement.setAttribute('data-theme', 'light');
}
})();
// Set global URL base
window.urlBase = "{{.URLBase}}";
</script>
</head>
<body>
<div class="toast-container position-fixed bottom-0 end-0 p-3">
<body class="min-h-screen bg-base-200 flex flex-col">
<!-- Toast Container -->
<div class="toast-container fixed bottom-4 right-4 z-50 space-y-2">
<!-- Toast messages will be created dynamically here -->
</div>
<nav class="navbar navbar-expand-lg navbar-light mb-4">
<div class="container">
<a class="navbar-brand" href="/">
<i class="bi bi-cloud-download me-2"></i>Decypharr
</a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav me-auto">
<li class="nav-item">
<a class="nav-link {{if eq .Page "index"}}active{{end}}" href="{{.URLBase}}">
<i class="bi bi-table me-1"></i>Torrents
</a>
</li>
<li class="nav-item">
<a class="nav-link {{if eq .Page "download"}}active{{end}}" href="{{.URLBase}}download">
<i class="bi bi-cloud-download me-1"></i>Download
</a>
</li>
<li class="nav-item">
<a class="nav-link {{if eq .Page "repair"}}active{{end}}" href="{{.URLBase}}repair">
<i class="bi bi-tools me-1"></i>Repair
</a>
</li>
<li class="nav-item">
<a class="nav-link {{if eq .Page "config"}}active{{end}}" href="{{.URLBase}}config">
<i class="bi bi-gear me-1"></i>Settings
</a>
</li>
<li class="nav-item">
<a class="nav-link" href="{{.URLBase}}webdav" target="_blank">
<i class="bi bi-cloud me-1"></i>WebDAV
</a>
</li>
<li class="nav-item">
<a class="nav-link" href="{{.URLBase}}logs" target="_blank">
<i class="bi bi-journal me-1"></i>Logs
</a>
</li>
<!-- Navigation -->
<header class="navbar bg-base-100 shadow-lg sticky top-0 z-40 backdrop-blur-sm">
<div class="navbar-start">
<div class="dropdown">
<div tabindex="0" role="button" class="btn btn-ghost lg:hidden">
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M4 6h16M4 12h8m-8 6h16"/>
</svg>
</div>
<ul class="menu menu-sm dropdown-content mt-3 z-[1] p-2 shadow-lg bg-base-100 rounded-box w-52 border border-base-300">
<li><a href="{{.URLBase}}" class="{{if eq .Page "index"}}active{{end}}">
<i class="bi bi-grid-3x3-gap text-primary"></i>Dashboard
</a></li>
<li><a href="{{.URLBase}}download" class="{{if eq .Page "download"}}active{{end}}">
<i class="bi bi-cloud-download text-secondary"></i>Download
</a></li>
<li><a href="{{.URLBase}}repair" class="{{if eq .Page "repair"}}active{{end}}">
<i class="bi bi-wrench-adjustable text-accent"></i>Repair
</a></li>
<li><a href="{{.URLBase}}config" class="{{if eq .Page "config"}}active{{end}}">
<i class="bi bi-gear text-info"></i>Settings
</a></li>
<li><a href="{{.URLBase}}webdav" target="_blank">
<i class="bi bi-cloud text-success"></i>WebDAV
</a></li>
<li><a href="{{.URLBase}}logs" target="_blank">
<i class="bi bi-journal-text text-warning"></i>Logs
</a></li>
</ul>
<div class="d-flex align-items-center">
<div class="theme-toggle me-3" id="themeToggle" title="Toggle dark mode">
<i class="bi bi-sun-fill" id="lightIcon"></i>
<i class="bi bi-moon-fill d-none" id="darkIcon"></i>
</div>
<a href="{{.URLBase}}debug/stats" class="me-2">
<i class="bi bi-bar-chart-line me-1"></i>Stats
</div>
<a class="btn btn-ghost text-xl font-bold text-primary group" href="{{.URLBase}}">
<!-- Logo -->
<img src="{{.URLBase}}images/logo.svg" alt="Decypharr Logo" class="w-8 h-8 inline-block mr-2">
<span class="hidden sm:inline bg-gradient-to-r from-primary to-secondary bg-clip-text text-transparent">Decypharr</span>
</a>
</div>
<div class="navbar-center hidden lg:flex">
<ul class="menu menu-horizontal px-1 gap-1">
<li><a href="{{.URLBase}}" class="{{if eq .Page "index"}}active{{end}} tooltip tooltip-bottom" data-tip="Dashboard">
<i class="bi bi-grid-3x3-gap"></i>
<span class="hidden xl:inline">Dashboard</span>
</a></li>
<li><a href="{{.URLBase}}download" class="{{if eq .Page "download"}}active{{end}} tooltip tooltip-bottom" data-tip="Add Downloads">
<i class="bi bi-cloud-download"></i>
<span class="hidden xl:inline">Download</span>
</a></li>
<li><a href="{{.URLBase}}repair" class="{{if eq .Page "repair"}}active{{end}} tooltip tooltip-bottom" data-tip="Repair Media">
<i class="bi bi-wrench-adjustable"></i>
<span class="hidden xl:inline">Repair</span>
</a></li>
<li><a href="{{.URLBase}}config" class="{{if eq .Page "config"}}active{{end}} tooltip tooltip-bottom" data-tip="Settings">
<i class="bi bi-gear"></i>
<span class="hidden xl:inline">Settings</span>
</a></li>
<li><a href="{{.URLBase}}webdav" target="_blank" class="tooltip tooltip-bottom" data-tip="WebDAV Access">
<i class="bi bi-cloud"></i>
<span class="hidden xl:inline">WebDAV</span>
</a></li>
<li><a href="{{.URLBase}}logs" target="_blank" class="tooltip tooltip-bottom" data-tip="System Logs">
<i class="bi bi-journal-text"></i>
<span class="hidden xl:inline">Logs</span>
</a></li>
</ul>
</div>
<div class="navbar-end">
<div class="flex items-center gap-3">
<!-- Theme Toggle -->
<div class="tooltip tooltip-left" data-tip="Toggle Theme">
<label class="swap swap-rotate btn btn-ghost btn-circle hover:bg-base-300 transition-colors">
<input type="checkbox" id="themeToggle" class="theme-controller" />
<!-- Sun icon for light mode -->
<i class="swap-off bi bi-sun text-lg text-warning"></i>
<!-- Moon icon for dark mode -->
<i class="swap-on bi bi-moon-stars text-lg text-info"></i>
</label>
</div>
<!-- Stats Link -->
<div class="tooltip tooltip-left" data-tip="System Statistics">
<a href="{{.URLBase}}debug/stats" class="btn btn-ghost btn-sm hover:bg-base-300 transition-colors">
<i class="bi bi-graph-up text-lg"></i>
<span class="hidden md:inline ml-1">Stats</span>
</a>
<span class="badge bg-primary" id="version-badge">Loading...</span>
</div>
<!-- Version Badge -->
<div class="tooltip tooltip-left" data-tip="Current Version">
<div class="badge badge-primary font-mono text-xs hover:badge-primary-focus transition-colors cursor-pointer" id="version-badge">
Loading...
</div>
</div>
</div>
</div>
</nav>
</header>
{{ if eq .Page "index" }}
{{ template "index" . }}
{{ else if eq .Page "download" }}
{{ template "download" . }}
{{ else if eq .Page "repair" }}
{{ template "repair" . }}
{{ else if eq .Page "config" }}
{{ template "config" . }}
{{ else if eq .Page "login" }}
{{ template "login" . }}
{{ else if eq .Page "register" }}
{{ template "register" . }}
{{ else }}
{{ end }}
<footer class="mt-auto py-2 text-center border-top">
<div class="container">
<small class="text-muted">
<a href="https://github.com/sirrobot01/decypharr" target="_blank" class="text-decoration-none me-3">
<i class="bi bi-github me-1"></i>GitHub
</a>
<a href="https://sirrobot01.github.io/decypharr" target="_blank" class="text-decoration-none">
<i class="bi bi-book me-1"></i>Documentation
</a>
</small>
<!-- Main Content -->
<main class="flex-1 container mx-auto px-4 py-6">
{{ if eq .Page "index" }}
{{ template "index" . }}
{{ else if eq .Page "download" }}
{{ template "download" . }}
{{ else if eq .Page "repair" }}
{{ template "repair" . }}
{{ else if eq .Page "config" }}
{{ template "config" . }}
{{ else if eq .Page "login" }}
{{ template "login" . }}
{{ else if eq .Page "register" }}
{{ template "register" . }}
{{ else }}
<div class="hero min-h-96">
<div class="hero-content text-center">
<div class="max-w-md">
<h1 class="text-5xl font-bold text-error">404</h1>
<p class="py-6">Page not found. The page you're looking for doesn't exist.</p>
<a href="{{.URLBase}}" class="btn btn-primary">Go Home</a>
</div>
</div>
</div>
{{ end }}
</main>
<!-- Footer -->
<footer class="footer footer-center p-6 bg-base-300 text-base-content border-t border-base-200">
<aside class="grid-flow-col gap-4">
<a href="https://github.com/sirrobot01/decypharr" target="_blank"
class="link link-hover flex items-center gap-2 hover:text-primary transition-colors">
<i class="bi bi-github text-lg"></i>
<span>GitHub</span>
</a>
<a href="https://sirrobot01.github.io/decypharr" target="_blank"
class="link link-hover flex items-center gap-2 hover:text-primary transition-colors">
<i class="bi bi-book text-lg"></i>
<span>Documentation</span>
</a>
</aside>
</footer>
<!-- Scripts -->
<script src="{{.URLBase}}assets/js/jquery-3.7.1.min.js"></script>
<script src="{{.URLBase}}assets/js/common.js"></script>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.bundle.min.js"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/js/select2.min.js"></script>
<script>
window.urlBase = "{{.URLBase}}";
function joinURL(base, path) {
if (!base.endsWith('/')) {
base += '/';
}
if (path.startsWith('/')) {
path = path.substring(1);
}
return base + path;
}
function fetcher(endpoint, options = {}) {
// Use the global urlBase or default to empty string
let baseUrl = window.urlBase || '';
let url = joinURL(baseUrl, endpoint);
// Return the regular fetcher with the complete URL
return fetch(url, options);
}
/**
* Create a toast message
* @param {string} message - The message to display
* @param {string} [type='success'] - The type of toast (success, warning, error)
*/
const createToast = (message, type = 'success') => {
type = ['success', 'warning', 'error'].includes(type) ? type : 'success';
const toastTimeouts = {
success: 5000,
warning: 10000,
error: 15000
};
const toastContainer = document.querySelector('.toast-container');
const toastId = `toast-${Date.now()}`;
const toastHtml = `
<div id="${toastId}" class="toast" role="alert" aria-live="assertive" aria-atomic="true">
<div class="toast-header ${type === 'error' ? 'bg-danger text-white' : type === 'warning' ? 'bg-warning text-dark' : 'bg-success text-white'}">
<strong class="me-auto">
${type === 'error' ? 'Error' : type === 'warning' ? 'Warning' : 'Success'}
</strong>
<button type="button" class="btn-close ${type === 'warning' ? '' : 'btn-close-white'}" data-bs-dismiss="toast" aria-label="Close"></button>
</div>
<div class="toast-body">
${message.replace(/\n/g, '<br>')}
</div>
</div>
`;
toastContainer.insertAdjacentHTML('beforeend', toastHtml);
const toastElement = document.getElementById(toastId);
const toast = new bootstrap.Toast(toastElement, {
autohide: true,
delay: toastTimeouts[type]
});
toast.show();
toastElement.addEventListener('hidden.bs.toast', () => {
toastElement.remove();
});
};
function createPasswordField(name, id, placeholder = "", required = false) {
return `
<div class="password-toggle-container">
<input type="password"
class="form-control has-toggle"
name="${name}"
id="${id}"
placeholder="${placeholder}"
${required ? 'required' : ''}>
<button type="button"
class="password-toggle-btn"
onclick="togglePassword('${id}');">
<i class="bi bi-eye" id="${id}_icon"></i>
</button>
</div>
`;
}
function togglePassword(fieldId) {
const field = document.getElementById(fieldId);
const icon = document.getElementById(fieldId + '_icon');
if (field.type === 'password') {
field.type = 'text';
icon.className = 'bi bi-eye-slash';
} else {
field.type = 'password';
icon.className = 'bi bi-eye';
}
}
// Add this function to handle textarea password toggling
function togglePasswordTextarea(fieldId) {
const field = document.getElementById(fieldId);
const icon = document.getElementById(fieldId + '_icon');
if (field.style.webkitTextSecurity === 'disc' || field.style.webkitTextSecurity === '') {
// Show text
field.style.webkitTextSecurity = 'none';
field.style.textSecurity = 'none'; // For other browsers
field.setAttribute('data-password-visible', 'true');
icon.className = 'bi bi-eye-slash';
} else {
// Hide text
field.style.webkitTextSecurity = 'disc';
field.style.textSecurity = 'disc'; // For other browsers
field.setAttribute('data-password-visible', 'false');
icon.className = 'bi bi-eye';
}
}
// Theme management
const themeToggle = document.getElementById('themeToggle');
const lightIcon = document.getElementById('lightIcon');
const darkIcon = document.getElementById('darkIcon');
const htmlElement = document.documentElement;
// Function to set the theme
function setTheme(theme) {
htmlElement.setAttribute('data-bs-theme', theme);
localStorage.setItem('theme', theme);
if (theme === 'dark') {
lightIcon.classList.add('d-none');
darkIcon.classList.remove('d-none');
} else {
lightIcon.classList.remove('d-none');
darkIcon.classList.add('d-none');
}
}
// Check for saved theme preference or use system preference
const savedTheme = localStorage.getItem('theme');
if (savedTheme) {
setTheme(savedTheme);
} else {
// Check for system preference
if (window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches) {
setTheme('dark');
} else {
setTheme('light');
}
}
// Toggle theme when button is clicked
themeToggle.addEventListener('click', () => {
const currentTheme = htmlElement.getAttribute('data-bs-theme');
setTheme(currentTheme === 'dark' ? 'light' : 'dark');
});
// Listen for system theme changes
if (window.matchMedia) {
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', e => {
if (!localStorage.getItem('theme')) {
setTheme(e.matches ? 'dark' : 'light');
}
});
}
document.addEventListener('DOMContentLoaded', function() {
fetcher('/version')
.then(response => response.json())
.then(data => {
const versionBadge = document.getElementById('version-badge');
// Add url to version badge
versionBadge.innerHTML = `<a href="https://github.com/sirrobot01/decypharr/releases/tag/${data.version}" target="_blank" class="text-white">${data.channel}-${data.version}</a>`;
if (data.channel === 'beta') {
versionBadge.classList.add('beta');
} else if (data.channel === 'nightly') {
versionBadge.classList.add('nightly');
}
})
.catch(error => {
console.error('Error fetching version:', error);
document.getElementById('version-badge').textContent = 'Unknown';
});
});
</script>
<!-- Page-specific scripts -->
{{ if eq .Page "index" }}
<script src="{{.URLBase}}assets/js/dashboard.js"></script>
{{ else if eq .Page "download" }}
<script src="{{.URLBase}}assets/js/download.js"></script>
{{ else if eq .Page "repair" }}
<script src="{{.URLBase}}assets/js/repair.js"></script>
{{ else if eq .Page "config" }}
<script src="{{.URLBase}}assets/js/config.js"></script>
{{ end }}
</body>
</html>
{{ end }}

View File

@@ -1,27 +1,25 @@
{{ define "login" }}
<div class="container mt-5">
<div class="row justify-content-center">
<div class="col-md-6 col-lg-4">
<div class="card">
<div class="card-header">
<h4 class="mb-0 text-center">Login</h4>
<div class="flex min-h-screen items-center justify-center bg-base-200">
<div class="card w-full max-w-sm bg-base-100 shadow-xl">
<div class="card-body">
<h2 class="card-title justify-center mb-6">Login</h2>
<form id="loginForm" class="space-y-4">
<div class="form-control">
<label class="label" for="username">
<span class="label-text">Username</span>
</label>
<input type="text" class="input input-bordered w-full" id="username" name="username" required>
</div>
<div class="card-body">
<form id="loginForm">
<div class="mb-3">
<label for="username" class="form-label">Username</label>
<input type="text" class="form-control" id="username" name="username" required>
</div>
<div class="mb-3">
<label for="password" class="form-label">Password</label>
<input type="password" class="form-control" id="password" name="password" required>
</div>
<div class="d-grid">
<button type="submit" class="btn btn-primary">Login</button>
</div>
</form>
<div class="form-control">
<label class="label" for="password">
<span class="label-text">Password</span>
</label>
<input type="password" class="input input-bordered w-full" id="password" name="password" required>
</div>
</div>
<div class="form-control mt-6">
<button type="submit" class="btn btn-primary w-full">Login</button>
</div>
</form>
</div>
</div>
</div>
@@ -29,6 +27,7 @@
<script>
document.getElementById('loginForm').addEventListener('submit', async (e) => {
e.preventDefault();
let loginBtn = document.querySelector('#loginForm button[type="submit"]');
const formData = {
username: document.getElementById('username').value,
@@ -36,7 +35,7 @@
};
try {
const response = await fetcher('/login', {
const response = await window.decypharrUtils.fetcher('/login', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@@ -45,13 +44,21 @@
});
if (response.ok) {
window.location.href = '/';
window.decypharrUtils.createToast('Login successful! Redirecting...', 'success');
// Redirect after a short delay
setTimeout(() => {
window.location.href = window.urlBase || '/';
}, 1000);
} else {
createToast('Invalid credentials', 'error');
const errorText = await response.text();
throw new Error(errorText || 'Invalid credentials');
}
} catch (error) {
console.error('Login error:', error);
createToast('Login failed', 'error');
window.decypharrUtils.createToast(error.message || 'Login failed. Please try again.', 'error');
} finally {
window.decypharrUtils.setButtonLoading(loginBtn, false);
}
});
</script>

View File

@@ -1,32 +1,32 @@
{{ define "register" }}
<div class="container mt-5">
<div class="row justify-content-center">
<div class="col-md-6 col-lg-4">
<div class="card">
<div class="card-header">
<h4 class="mb-0 text-center">First Time Auth Setup</h4>
<div class="flex min-h-screen items-center justify-center bg-base-200">
<div class="card w-full max-w-md bg-base-100 shadow-xl">
<div class="card-body">
<h2 class="card-title justify-center mb-6">First Time Auth Setup</h2>
<form id="authForm" class="space-y-4">
<div class="form-control">
<label class="label" for="username">
<span class="label-text">Username</span>
</label>
<input type="text" class="input input-bordered w-full" id="username" name="username" required>
</div>
<div class="card-body">
<form id="authForm">
<div class="mb-3">
<label for="username" class="form-label">Username</label>
<input type="text" class="form-control" id="username" name="username" required>
</div>
<div class="mb-3">
<label for="password" class="form-label">Password</label>
<input type="password" class="form-control" id="password" name="password" required>
</div>
<div class="mb-3">
<label for="confirmPassword" class="form-label">Confirm Password</label>
<input type="password" class="form-control" id="confirmPassword" name="confirmPassword" required>
</div>
<div class="d-grid gap-2">
<button type="submit" class="btn btn-primary mb-2">Save</button>
<button type="button" id="skipAuthBtn" class="btn btn-secondary">Skip</button>
</div>
</form>
<div class="form-control">
<label class="label" for="password">
<span class="label-text">Password</span>
</label>
<input type="password" class="input input-bordered w-full" id="password" name="password" required>
</div>
</div>
<div class="form-control">
<label class="label" for="confirmPassword">
<span class="label-text">Confirm Password</span>
</label>
<input type="password" class="input input-bordered w-full" id="confirmPassword" name="confirmPassword" required>
</div>
<div class="form-control mt-6 space-y-2">
<button type="submit" class="btn btn-primary w-full">Save</button>
<button type="button" id="skipAuthBtn" class="btn btn-secondary w-full">Skip</button>
</div>
</form>
</div>
</div>
</div>
@@ -61,10 +61,10 @@
if (!response.ok) {
return response.text().then(errorText => {
// Throw an error with the response text
createToast(errorText || 'Registration failed', 'error');
window.decypharrUtils.createToast(errorText || 'Registration failed', 'error');
});
} else {
window.location.href = joinURL(window.urlBase, '/');
window.location.href = window.decypharrUtils.joinURL(window.urlBase, '/');
}
})
@@ -75,10 +75,10 @@
// Handle skip auth button
skipAuthBtn.addEventListener('click', function() {
fetcher('/skip-auth', { method: 'GET' })
window.decypharrUtils.fetcher('/skip-auth', { method: 'GET' })
.then(response => {
if (response.ok) {
window.location.href = joinURL(window.urlBase, '/');
window.location.href = window.decypharrUtils.joinURL(window.urlBase, '/');
} else {
throw new Error('Failed to skip authentication');
}

File diff suppressed because it is too large Load Diff

View File

@@ -126,13 +126,17 @@ func (wb *Web) DownloadHandler(w http.ResponseWriter, r *http.Request) {
for _, d := range cfg.Debrids {
debrids = append(debrids, d.Name)
}
downloadFolder := ""
if cfg.QBitTorrent != nil {
downloadFolder = cfg.QBitTorrent.DownloadFolder
}
data := map[string]interface{}{
"URLBase": cfg.URLBase,
"Page": "download",
"Title": "Download",
"Debrids": debrids,
"HasMultiDebrid": len(debrids) > 1,
"DownloadFolder": cfg.QBitTorrent.DownloadFolder,
"DownloadFolder": downloadFolder,
}
_ = wb.templates.ExecuteTemplate(w, "layout", data)
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"html/template"
"os"
)
@@ -50,14 +51,21 @@ type RepairRequest struct {
//go:embed templates/*
var content embed.FS
//go:embed assets/build/*
var assetsEmbed embed.FS
//go:embed assets/images/*
var imagesEmbed embed.FS
type Web struct {
logger zerolog.Logger
cookie *sessions.CookieStore
templates *template.Template
torrents *store.TorrentStorage
usenet usenet.Usenet
}
func New() *Web {
func New(usenet usenet.Usenet) *Web {
templates := template.Must(template.ParseFS(
content,
"templates/layout.html",
@@ -80,5 +88,6 @@ func New() *Web {
templates: templates,
cookie: cookieStore,
torrents: store.Get().Torrents(),
usenet: usenet,
}
}

View File

@@ -1,473 +1,8 @@
package webdav
import (
"crypto/tls"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
)
var sharedClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
MaxIdleConns: 100,
MaxIdleConnsPerHost: 20,
MaxConnsPerHost: 50,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableKeepAlives: false,
},
Timeout: 0,
}
type streamError struct {
Err error
StatusCode int
IsClientDisconnection bool
}
func (e *streamError) Error() string {
return e.Err.Error()
}
func (e *streamError) Unwrap() error {
return e.Err
}
type File struct {
name string
torrentName string
link string
downloadLink string
size int64
isDir bool
fileId string
isRar bool
metadataOnly bool
content []byte
children []os.FileInfo // For directories
cache *store.Cache
modTime time.Time
// Minimal state for interface compliance only
readOffset int64 // Only used for Read() method compliance
}
// File interface implementations for File
func (f *File) Close() error {
if f.isDir {
return nil // No resources to close for directories
}
// For files, we don't have any resources to close either
// This is just to satisfy the os.File interface
f.content = nil
f.children = nil
f.downloadLink = ""
f.readOffset = 0
return nil
}
func (f *File) getDownloadLink() (string, error) {
// Check if we already have a final URL cached
if f.downloadLink != "" && isValidURL(f.downloadLink) {
return f.downloadLink, nil
}
downloadLink, err := f.cache.GetDownloadLink(f.torrentName, f.name, f.link)
if err != nil {
return "", err
}
if downloadLink != "" && isValidURL(downloadLink) {
f.downloadLink = downloadLink
return downloadLink, nil
}
return "", os.ErrNotExist
}
func (f *File) getDownloadByteRange() (*[2]int64, error) {
byteRange, err := f.cache.GetDownloadByteRange(f.torrentName, f.name)
if err != nil {
return nil, err
}
return byteRange, nil
}
func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) error {
content := f.content
size := int64(len(content))
// Handle range requests for preloaded content
if rangeHeader := r.Header.Get("Range"); rangeHeader != "" {
ranges, err := parseRange(rangeHeader, size)
if err != nil || len(ranges) != 1 {
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
start, end := ranges[0].start, ranges[0].end
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, size))
w.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusPartialContent)
_, err = w.Write(content[start : end+1])
return err
}
// Full content
w.Header().Set("Content-Length", fmt.Sprintf("%d", size))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusOK)
_, err := w.Write(content)
return err
}
func (f *File) StreamResponse(w http.ResponseWriter, r *http.Request) error {
// Handle preloaded content files
if f.content != nil {
return f.servePreloadedContent(w, r)
}
// Try streaming with retry logic
return f.streamWithRetry(w, r, 0)
}
func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCount int) error {
const maxRetries = 0
_log := f.cache.Logger()
// Get download link (with caching optimization)
downloadLink, err := f.getDownloadLink()
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusPreconditionFailed}
}
if downloadLink == "" {
return &streamError{Err: fmt.Errorf("empty download link"), StatusCode: http.StatusNotFound}
}
// Create upstream request with streaming optimizations
upstreamReq, err := http.NewRequest("GET", downloadLink, nil)
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusInternalServerError}
}
setVideoStreamingHeaders(upstreamReq)
// Handle range requests (critical for video seeking)
isRangeRequest := f.handleRangeRequest(upstreamReq, r, w)
if isRangeRequest == -1 {
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
resp, err := sharedClient.Do(upstreamReq)
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusServiceUnavailable}
}
defer resp.Body.Close()
// Handle upstream errors with retry logic
shouldRetry, retryErr := f.handleUpstream(resp, retryCount, maxRetries)
if shouldRetry && retryCount < maxRetries {
// Retry with new download link
_log.Debug().
Int("retry_count", retryCount+1).
Str("file", f.name).
Msg("Retrying stream request")
return f.streamWithRetry(w, r, retryCount+1)
}
if retryErr != nil {
return retryErr
}
setVideoResponseHeaders(w, resp, isRangeRequest == 1)
// Stream with optimized buffering for video
return f.streamVideoOptimized(w, resp.Body)
}
func (f *File) handleUpstream(resp *http.Response, retryCount, maxRetries int) (shouldRetry bool, err error) {
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent {
return false, nil
}
_log := f.cache.Logger()
// Clean up response body properly
cleanupResp := func(resp *http.Response) {
if resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
switch resp.StatusCode {
case http.StatusServiceUnavailable:
// Read the body to check for specific error messages
body, readErr := io.ReadAll(resp.Body)
cleanupResp(resp)
if readErr != nil {
_log.Error().Err(readErr).Msg("Failed to read response body")
return false, &streamError{
Err: fmt.Errorf("failed to read error response: %w", readErr),
StatusCode: http.StatusServiceUnavailable,
}
}
bodyStr := string(body)
if strings.Contains(bodyStr, "you have exceeded your traffic") {
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Bandwidth exceeded. Marking link as invalid")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "bandwidth_exceeded")
// Retry with a different API key if available and we haven't exceeded retries
if retryCount < maxRetries {
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("bandwidth exceeded after %d retries", retryCount),
StatusCode: http.StatusServiceUnavailable,
}
}
return false, &streamError{
Err: fmt.Errorf("service unavailable: %s", bodyStr),
StatusCode: http.StatusServiceUnavailable,
}
case http.StatusNotFound:
cleanupResp(resp)
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Link not found (404). Marking link as invalid and regenerating")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "link_not_found")
// Try to regenerate download link if we haven't exceeded retries
if retryCount < maxRetries {
// Clear cached link to force regeneration
f.downloadLink = ""
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("file not found after %d retries", retryCount),
StatusCode: http.StatusNotFound,
}
default:
body, _ := io.ReadAll(resp.Body)
cleanupResp(resp)
_log.Error().
Int("status_code", resp.StatusCode).
Str("file", f.name).
Str("response_body", string(body)).
Msg("Unexpected upstream error")
return false, &streamError{
Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)),
StatusCode: http.StatusBadGateway,
}
}
}
func (f *File) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w http.ResponseWriter) int {
rangeHeader := r.Header.Get("Range")
if rangeHeader == "" {
// For video files, apply byte range if exists
if byteRange, _ := f.getDownloadByteRange(); byteRange != nil {
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", byteRange[0], byteRange[1]))
}
return 0 // No range request
}
// Parse range request
ranges, err := parseRange(rangeHeader, f.size)
if err != nil || len(ranges) != 1 {
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", f.size))
return -1 // Invalid range
}
// Apply byte range offset if exists
byteRange, _ := f.getDownloadByteRange()
start, end := ranges[0].start, ranges[0].end
if byteRange != nil {
start += byteRange[0]
end += byteRange[0]
}
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
return 1 // Valid range request
}
func (f *File) streamVideoOptimized(w http.ResponseWriter, src io.Reader) error {
// Use larger buffer for video streaming (better throughput)
buf := make([]byte, 64*1024) // 64KB buffer
// First chunk optimization - send immediately for faster start
n, err := src.Read(buf)
if err != nil && err != io.EOF {
if isClientDisconnection(err) {
return &streamError{Err: err, StatusCode: 0, IsClientDisconnection: true}
}
return &streamError{Err: err, StatusCode: 0}
}
if n > 0 {
// Write first chunk immediately
_, writeErr := w.Write(buf[:n])
if writeErr != nil {
if isClientDisconnection(writeErr) {
return &streamError{Err: writeErr, StatusCode: 0, IsClientDisconnection: true}
}
return &streamError{Err: writeErr, StatusCode: 0}
}
// Flush immediately for faster video start
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
}
if err == io.EOF {
return nil
}
// Continue with optimized copy for remaining data
_, err = io.CopyBuffer(w, src, buf)
if err != nil {
if isClientDisconnection(err) {
return &streamError{Err: err, StatusCode: 0, IsClientDisconnection: true}
}
return &streamError{Err: err, StatusCode: 0}
}
return nil
}
/*
These are the methods that implement the os.File interface for the File type.
Only Stat and ReadDir are used
*/
func (f *File) Stat() (os.FileInfo, error) {
if f.isDir {
return &FileInfo{
name: f.name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: f.modTime,
isDir: true,
}, nil
}
return &FileInfo{
name: f.name,
size: f.size,
mode: 0644,
modTime: f.modTime,
isDir: false,
}, nil
}
func (f *File) Read(p []byte) (n int, err error) {
if f.isDir {
return 0, os.ErrInvalid
}
if f.metadataOnly {
return 0, io.EOF
}
// For preloaded content files (like version.txt)
if f.content != nil {
if f.readOffset >= int64(len(f.content)) {
return 0, io.EOF
}
n = copy(p, f.content[f.readOffset:])
f.readOffset += int64(n)
return n, nil
}
// For streaming files, return an error to force use of StreamResponse
return 0, fmt.Errorf("use StreamResponse method for streaming files")
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
if f.isDir {
return 0, os.ErrInvalid
}
// Only handle seeking for preloaded content
if f.content != nil {
newOffset := f.readOffset
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset += offset
case io.SeekEnd:
newOffset = int64(len(f.content)) + offset
default:
return 0, os.ErrInvalid
}
if newOffset < 0 {
newOffset = 0
}
if newOffset > int64(len(f.content)) {
newOffset = int64(len(f.content))
}
f.readOffset = newOffset
return f.readOffset, nil
}
// For streaming files, return error to force use of StreamResponse
return 0, fmt.Errorf("use StreamResponse method for streaming files")
}
func (f *File) Write(p []byte) (n int, err error) {
return 0, os.ErrPermission
}
func (f *File) Readdir(count int) ([]os.FileInfo, error) {
if !f.isDir {
return nil, os.ErrInvalid
}
if count <= 0 {
return f.children, nil
}
if len(f.children) == 0 {
return nil, io.EOF
}
if count > len(f.children) {
count = len(f.children)
}
files := f.children[:count]
f.children = f.children[count:]
return files, nil
type File interface {
Name() string
Size() int64
IsDir() bool
ModTime() string
}

View File

@@ -56,7 +56,7 @@ type entry struct {
func filesToXML(urlPath string, fi os.FileInfo, children []os.FileInfo) stringbuf.StringBuf {
now := time.Now().UTC().Format("2006-01-02T15:04:05.000-07:00")
now := time.Now().UTC().Format(time.RFC3339)
entries := make([]entry, 0, len(children)+1)
// Add the current file itself
@@ -65,7 +65,7 @@ func filesToXML(urlPath string, fi os.FileInfo, children []os.FileInfo) stringbu
escName: xmlEscape(fi.Name()),
isDir: fi.IsDir(),
size: fi.Size(),
modTime: fi.ModTime().Format("2006-01-02T15:04:05.000-07:00"),
modTime: fi.ModTime().Format(time.RFC3339),
})
for _, info := range children {
@@ -81,7 +81,7 @@ func filesToXML(urlPath string, fi os.FileInfo, children []os.FileInfo) stringbu
escName: xmlEscape(nm),
isDir: info.IsDir(),
size: info.Size(),
modTime: info.ModTime().Format("2006-01-02T15:04:05.000-07:00"),
modTime: info.ModTime().Format(time.RFC3339),
})
}
@@ -238,17 +238,30 @@ func setVideoResponseHeaders(w http.ResponseWriter, resp *http.Response, isRange
w.Header().Set("Content-Range", contentRange)
}
// Video streaming optimizations
w.Header().Set("Accept-Ranges", "bytes") // Enable seeking
w.Header().Set("Connection", "keep-alive") // Keep connection open
// Prevent buffering in proxies/CDNs
w.Header().Set("X-Accel-Buffering", "no") // Nginx
w.Header().Set("Proxy-Buffering", "off") // General proxy
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Range")
w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range")
w.WriteHeader(resp.StatusCode)
}
func getContentType(fileName string) string {
contentType := "application/octet-stream"
// Determine content type based on file extension
switch {
case strings.HasSuffix(fileName, ".mp4"):
contentType = "video/mp4"
case strings.HasSuffix(fileName, ".mkv"):
contentType = "video/x-matroska"
case strings.HasSuffix(fileName, ".avi"):
contentType = "video/x-msvideo"
case strings.HasSuffix(fileName, ".mov"):
contentType = "video/quicktime"
case strings.HasSuffix(fileName, ".m4v"):
contentType = "video/x-m4v"
case strings.HasSuffix(fileName, ".ts"):
contentType = "video/mp2t"
case strings.HasSuffix(fileName, ".srt"):
contentType = "application/x-subrip"
case strings.HasSuffix(fileName, ".vtt"):
contentType = "text/vtt"
}
return contentType
}

View File

@@ -2,6 +2,7 @@ package webdav
import (
"context"
"github.com/rs/zerolog"
"github.com/stanNthe5/stringbuf"
"net/http"
"os"
@@ -18,7 +19,7 @@ const (
metadataOnlyKey contextKey = "metadataOnly"
)
func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
func handlePropfind(h Handler, logger zerolog.Logger, w http.ResponseWriter, r *http.Request) {
// Setup context for metadata only
ctx := context.WithValue(r.Context(), metadataOnlyKey, true)
r = r.WithContext(ctx)
@@ -37,7 +38,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
// Always include the resource itself
f, err := h.OpenFile(r.Context(), cleanPath, os.O_RDONLY, 0)
if err != nil {
h.logger.Error().Err(err).Str("path", cleanPath).Msg("Failed to open file")
logger.Error().Err(err).Str("path", cleanPath).Msg("Failed to open file")
http.NotFound(w, r)
return
}
@@ -45,17 +46,16 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
fi, err := f.Stat()
if err != nil {
h.logger.Error().Err(err).Msg("Failed to stat file")
logger.Error().Err(err).Msg("Failed to stat file")
http.Error(w, "Server Error", http.StatusInternalServerError)
return
}
var rawEntries []os.FileInfo
if fi.IsDir() {
rawEntries = append(rawEntries, h.getChildren(cleanPath)...)
rawEntries = append(rawEntries, h.GetChildren(cleanPath)...)
}
now := time.Now().UTC().Format("2006-01-02T15:04:05.000-07:00")
entries := make([]entry, 0, len(rawEntries)+1)
// Add the current file itself
entries = append(entries, entry{
@@ -63,7 +63,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
escName: xmlEscape(fi.Name()),
isDir: fi.IsDir(),
size: fi.Size(),
modTime: fi.ModTime().Format("2006-01-02T15:04:05.000-07:00"),
modTime: fi.ModTime().Format(time.RFC3339),
})
for _, info := range rawEntries {
@@ -79,7 +79,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
escName: xmlEscape(nm),
isDir: info.IsDir(),
size: info.Size(),
modTime: info.ModTime().Format("2006-01-02T15:04:05.000-07:00"),
modTime: info.ModTime().Format(time.RFC3339),
})
}
@@ -108,7 +108,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
}
_, _ = sb.WriteString(`<d:getlastmodified>`)
_, _ = sb.WriteString(now)
_, _ = sb.WriteString(e.modTime)
_, _ = sb.WriteString(`</d:getlastmodified>`)
_, _ = sb.WriteString(`<d:displayname>`)

View File

@@ -106,6 +106,19 @@
</li>
{{- end}}
{{$isBadPath := hasSuffix .Path "__bad__"}}
{{- if and $isBadPath (gt (len .Children) 0) }}
<li>
<span class="file-number">&nbsp;</span>
<span class="file-name">&nbsp;</span>
<span class="file-info">&nbsp;</span>
<button
class="delete-btn"
id="delete-all-btn"
data-name="{{.DeleteAllBadTorrentKey}}">
Delete All
</button>
</li>
{{- end}}
{{- range $i, $file := .Children}}
<li class="{{if $isBadPath}}disabled{{end}}">
<a {{ if not $isBadPath}}href="{{urlpath (printf "%s/%s" $.Path $file.Name)}}"{{end}}>
@@ -118,7 +131,7 @@
</a>
{{- if and $.CanDelete }}
<button
class="delete-btn"
class="delete-btn delete-with-id-btn"
data-name="{{$file.Name}}"
data-path="{{printf "%s/%s" $.Path $file.ID}}">
Delete
@@ -128,7 +141,7 @@
{{- end}}
</ul>
<script>
document.querySelectorAll('.delete-btn').forEach(btn=>{
document.querySelectorAll('.delete-with-id-btn').forEach(btn=>{
btn.addEventListener('click', ()=>{
let p = btn.getAttribute('data-path');
let name = btn.getAttribute('data-name');
@@ -137,6 +150,14 @@
.then(_=>location.reload());
});
});
const deleteAllButton = document.getElementById('delete-all-btn');
deleteAllButton.addEventListener('click', () => {
let p = deleteAllButton.getAttribute('data-name');
if (!confirm('Delete all entries marked Bad?')) return;
fetch(p, { method: 'DELETE' })
.then(_=>location.reload());
});
</script>
</body>
</html>

Some files were not shown because too many files have changed in this diff Show More