1 Commits
beta ... usenet

Author SHA1 Message Date
Mukhtar Akere
f9861e3b54 Implementing a streaming setup with Usenet 2025-08-01 15:27:24 +01:00
65 changed files with 9437 additions and 924 deletions

View File

@@ -25,4 +25,5 @@ node_modules/
# Build artifacts
decypharr
healthcheck
*.exe
*.exe
.venv/

1
.gitignore vendored
View File

@@ -14,4 +14,5 @@ logs/**
auth.json
.ven/
.env
.venv/
node_modules/

View File

@@ -6,8 +6,10 @@ import (
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/qbit"
"github.com/sirrobot01/decypharr/pkg/sabnzbd"
"github.com/sirrobot01/decypharr/pkg/server"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"github.com/sirrobot01/decypharr/pkg/version"
"github.com/sirrobot01/decypharr/pkg/web"
"github.com/sirrobot01/decypharr/pkg/webdav"
@@ -58,20 +60,30 @@ func Start(ctx context.Context) error {
`, version.GetInfo(), cfg.LogLevel)
// Initialize services
qb := qbit.New()
wd := webdav.New()
_usenet := usenet.New()
debridCaches := store.Get().Debrid().Caches()
wd := webdav.New(debridCaches, _usenet)
var sb *sabnzbd.SABnzbd
ui := web.New().Routes()
ui := web.New(_usenet).Routes()
webdavRoutes := wd.Routes()
qbitRoutes := qb.Routes()
qb := qbit.New()
// Register routes
handlers := map[string]http.Handler{
"/": ui,
"/api/v2": qbitRoutes,
"/webdav": webdavRoutes,
}
srv := server.New(handlers)
if qb != nil {
handlers["/api/v2"] = qb.Routes()
}
if _usenet != nil {
sb = sabnzbd.New(_usenet)
sabRoutes := sb.Routes()
handlers["/sabnzbd"] = sabRoutes
}
srv := server.New(_usenet, handlers)
done := make(chan struct{})
go func(ctx context.Context) {
@@ -93,8 +105,13 @@ func Start(ctx context.Context) error {
cancelSvc() // tell existing services to shut down
_log.Info().Msg("Restarting Decypharr...")
<-done // wait for them to finish
qb.Reset()
if qb != nil {
qb.Reset()
}
store.Reset()
if _usenet != nil {
_usenet.Close()
}
// rebuild svcCtx off the original parent
svcCtx, cancelSvc = context.WithCancel(ctx)

11
go.mod
View File

@@ -5,23 +5,29 @@ go 1.24.0
toolchain go1.24.3
require (
github.com/Tensai75/nzbparser v0.1.0
github.com/anacrolix/torrent v1.55.0
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/chrisfarms/yenc v0.0.0-20140520125709-00bca2f8b3cb
github.com/go-chi/chi/v5 v5.1.0
github.com/go-co-op/gocron/v2 v2.16.1
github.com/google/uuid v1.6.0
github.com/gorilla/sessions v1.4.0
github.com/nwaples/rardecode/v2 v2.0.0-beta.4
github.com/puzpuzpuz/xsync/v4 v4.1.0
github.com/robfig/cron/v3 v3.0.1
github.com/rs/zerolog v1.33.0
github.com/sourcegraph/conc v0.3.0
github.com/stanNthe5/stringbuf v0.0.3
go.uber.org/ratelimit v0.3.1
golang.org/x/crypto v0.33.0
golang.org/x/net v0.35.0
golang.org/x/sync v0.12.0
golang.org/x/sync v0.15.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
require (
github.com/Tensai75/subjectparser v0.1.0 // indirect
github.com/anacrolix/missinggo v1.3.0 // indirect
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
@@ -35,5 +41,8 @@ require (
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.26.0 // indirect
)

20
go.sum
View File

@@ -8,6 +8,10 @@ github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrX
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/Tensai75/nzbparser v0.1.0 h1:6RppAuWFahqu/kKjWO5Br0xuEYcxGz+XBTxYc+qvPo4=
github.com/Tensai75/nzbparser v0.1.0/go.mod h1:IUIIaeGaYp2dLAAF29BWYeKTfI4COvXaeQAzQiTOfMY=
github.com/Tensai75/subjectparser v0.1.0 h1:6fEWnRov8lDHxJS2EWqY6VonwYfrIRN+k8h8H7fFwHA=
github.com/Tensai75/subjectparser v0.1.0/go.mod h1:PNBFBnkOGbVDfX+56ZmC4GKSpqoRMCF1Y44xYd7NLGI=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -49,6 +53,8 @@ github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67
github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4=
github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chrisfarms/yenc v0.0.0-20140520125709-00bca2f8b3cb h1:BK9pqCayfiXrcRypTPxDsunA6hPJtOyOTJYY2DJ429g=
github.com/chrisfarms/yenc v0.0.0-20140520125709-00bca2f8b3cb/go.mod h1:V4bkS2felTTOSIsYx9JivzrbdBOuksi02ZkzfbHUVAk=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -157,6 +163,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nwaples/rardecode/v2 v2.0.0-beta.4 h1:sdiJxQdPjECn2lh9nLFFhgLCf+0ulDU5rODbtERTlUY=
github.com/nwaples/rardecode/v2 v2.0.0-beta.4/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -185,6 +193,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
@@ -200,6 +210,8 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/stanNthe5/stringbuf v0.0.3 h1:3ChRipDckEY6FykaQ1Dowy3B+ZQa72EDBCasvT5+D1w=
github.com/stanNthe5/stringbuf v0.0.3/go.mod h1:hii5Vr+mucoWkNJlIYQVp8YvuPtq45fFnJEAhcPf2cQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -222,6 +234,8 @@ go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0=
go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -252,8 +266,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -272,6 +286,8 @@ golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -47,7 +47,6 @@ type Debrid struct {
type QBitTorrent struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Port string `json:"port,omitempty"` // deprecated
DownloadFolder string `json:"download_folder,omitempty"`
Categories []string `json:"categories,omitempty"`
RefreshInterval int `json:"refresh_interval,omitempty"`
@@ -82,26 +81,55 @@ type Auth struct {
Password string `json:"password,omitempty"`
}
type SABnzbd struct {
DownloadFolder string `json:"download_folder,omitempty"`
RefreshInterval int `json:"refresh_interval,omitempty"`
Categories []string `json:"categories,omitempty"`
}
type Usenet struct {
Providers []UsenetProvider `json:"providers,omitempty"` // List of usenet providers
MountFolder string `json:"mount_folder,omitempty"` // Folder where usenet downloads are mounted
SkipPreCache bool `json:"skip_pre_cache,omitempty"`
Chunks int `json:"chunks,omitempty"` // Number of chunks to pre-cache
RcUrl string `json:"rc_url,omitempty"` // Rclone RC URL for the webdav
RcUser string `json:"rc_user,omitempty"` // Rclone RC username
RcPass string `json:"rc_pass,omitempty"` // Rclone RC password
}
type UsenetProvider struct {
Name string `json:"name,omitempty"`
Host string `json:"host,omitempty"` // Host of the usenet server
Port int `json:"port,omitempty"` // Port of the usenet server
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Connections int `json:"connections,omitempty"` // Number of connections to use
SSL bool `json:"ssl,omitempty"` // Use SSL for the connection
UseTLS bool `json:"use_tls,omitempty"` // Use TLS for the connection
}
type Config struct {
// server
BindAddress string `json:"bind_address,omitempty"`
URLBase string `json:"url_base,omitempty"`
Port string `json:"port,omitempty"`
LogLevel string `json:"log_level,omitempty"`
Debrids []Debrid `json:"debrids,omitempty"`
QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"`
Arrs []Arr `json:"arrs,omitempty"`
Repair Repair `json:"repair,omitempty"`
WebDav WebDav `json:"webdav,omitempty"`
AllowedExt []string `json:"allowed_file_types,omitempty"`
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
Path string `json:"-"` // Path to save the config file
UseAuth bool `json:"use_auth,omitempty"`
Auth *Auth `json:"-"`
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
LogLevel string `json:"log_level,omitempty"`
Debrids []Debrid `json:"debrids,omitempty"`
QBitTorrent *QBitTorrent `json:"qbittorrent,omitempty"`
SABnzbd *SABnzbd `json:"sabnzbd,omitempty"`
Usenet *Usenet `json:"usenet,omitempty"` // Usenet configuration
Arrs []Arr `json:"arrs,omitempty"`
Repair Repair `json:"repair,omitempty"`
WebDav WebDav `json:"webdav,omitempty"`
AllowedExt []string `json:"allowed_file_types,omitempty"`
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
Path string `json:"-"` // Path to save the config file
UseAuth bool `json:"use_auth,omitempty"`
Auth *Auth `json:"-"`
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
}
func (c *Config) JsonFile() string {
@@ -115,6 +143,10 @@ func (c *Config) TorrentsFile() string {
return filepath.Join(c.Path, "torrents.json")
}
func (c *Config) NZBsPath() string {
return filepath.Join(c.Path, "cache/nzbs")
}
func (c *Config) loadConfig() error {
// Load the config file
if configPath == "" {
@@ -142,9 +174,6 @@ func (c *Config) loadConfig() error {
}
func validateDebrids(debrids []Debrid) error {
if len(debrids) == 0 {
return errors.New("no debrids configured")
}
for _, debrid := range debrids {
// Basic field validation
@@ -159,17 +188,51 @@ func validateDebrids(debrids []Debrid) error {
return nil
}
func validateQbitTorrent(config *QBitTorrent) error {
if config.DownloadFolder == "" {
return errors.New("qbittorent download folder is required")
func validateUsenet(usenet *Usenet) error {
if usenet == nil {
return nil // No usenet configuration provided
}
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
return fmt.Errorf("qbittorent download folder(%s) does not exist", config.DownloadFolder)
for _, usenet := range usenet.Providers {
// Basic field validation
if usenet.Host == "" {
return errors.New("usenet host is required")
}
if usenet.Username == "" {
return errors.New("usenet username is required")
}
if usenet.Password == "" {
return errors.New("usenet password is required")
}
}
return nil
}
func validateSabznbd(config *SABnzbd) error {
if config == nil {
return nil // No SABnzbd configuration provided
}
if config.DownloadFolder != "" {
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
return fmt.Errorf("sabnzbd download folder(%s) does not exist", config.DownloadFolder)
}
}
return nil
}
func validateRepair(config *Repair) error {
func validateQbitTorrent(config *QBitTorrent) error {
if config == nil {
return nil // No qBittorrent configuration provided
}
if config.DownloadFolder != "" {
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
return fmt.Errorf("qbittorent download folder(%s) does not exist", config.DownloadFolder)
}
}
return nil
}
func validateRepair(config Repair) error {
if !config.Enabled {
return nil
}
@@ -181,19 +244,34 @@ func validateRepair(config *Repair) error {
func ValidateConfig(config *Config) error {
// Run validations concurrently
// Check if there's at least one debrid or usenet configured
hasUsenet := false
if config.Usenet != nil && len(config.Usenet.Providers) > 0 {
hasUsenet = true
}
if len(config.Debrids) == 0 && !hasUsenet {
return errors.New("at least one debrid or usenet provider must be configured")
}
if err := validateDebrids(config.Debrids); err != nil {
return err
}
if err := validateQbitTorrent(&config.QBitTorrent); err != nil {
if err := validateUsenet(config.Usenet); err != nil {
return err
}
if err := validateRepair(&config.Repair); err != nil {
if err := validateSabznbd(config.SABnzbd); err != nil {
return err
}
if err := validateQbitTorrent(config.QBitTorrent); err != nil {
return err
}
if err := validateRepair(config.Repair); err != nil {
return err
}
return nil
}
@@ -299,6 +377,10 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
}
d.DownloadAPIKeys = downloadKeys
if d.Workers == 0 {
d.Workers = perDebrid
}
if !d.UseWebDav {
return d
}
@@ -309,9 +391,6 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
if d.WebDav.DownloadLinksRefreshInterval == "" {
d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes
}
if d.Workers == 0 {
d.Workers = perDebrid
}
if d.FolderNaming == "" {
d.FolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext")
}
@@ -338,17 +417,47 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
return d
}
func (c *Config) updateUsenet(u UsenetProvider) UsenetProvider {
if u.Name == "" {
parts := strings.Split(u.Host, ".")
if len(parts) >= 2 {
u.Name = parts[len(parts)-2] // Gets "example" from "news.example.com"
} else {
u.Name = u.Host // Fallback to host if it doesn't look like a domain
}
}
if u.Port == 0 {
u.Port = 119 // Default port for usenet
}
if u.Connections == 0 {
u.Connections = 30 // Default connections
}
if u.SSL && !u.UseTLS {
u.UseTLS = true // Use TLS if SSL is enabled
}
return u
}
func (c *Config) setDefaults() {
for i, debrid := range c.Debrids {
c.Debrids[i] = c.updateDebrid(debrid)
}
if c.SABnzbd != nil {
c.SABnzbd.RefreshInterval = cmp.Or(c.SABnzbd.RefreshInterval, 10) // Default to 10 seconds
}
if c.Usenet != nil {
c.Usenet.Chunks = cmp.Or(c.Usenet.Chunks, 5)
for i, provider := range c.Usenet.Providers {
c.Usenet.Providers[i] = c.updateUsenet(provider)
}
}
if len(c.AllowedExt) == 0 {
c.AllowedExt = getDefaultExtensions()
}
c.Port = cmp.Or(c.Port, c.QBitTorrent.Port)
if c.URLBase == "" {
c.URLBase = "/"
}
@@ -395,11 +504,6 @@ func (c *Config) createConfig(path string) error {
c.Port = "8282"
c.LogLevel = "info"
c.UseAuth = true
c.QBitTorrent = QBitTorrent{
DownloadFolder: filepath.Join(path, "downloads"),
Categories: []string{"sonarr", "radarr"},
RefreshInterval: 15,
}
return nil
}
@@ -408,7 +512,3 @@ func Reload() {
instance = nil
once = sync.Once{}
}
func DefaultFreeSlot() int {
return 10
}

178
internal/nntp/client.go Normal file
View File

@@ -0,0 +1,178 @@
package nntp
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"sync/atomic"
"time"
)
// Client represents a failover NNTP client that manages multiple providers
type Client struct {
providers []config.UsenetProvider
pools *xsync.Map[string, *Pool]
logger zerolog.Logger
closed atomic.Bool
minimumMaxConns int // Minimum number of max connections across all pools
}
func NewClient(providers []config.UsenetProvider) (*Client, error) {
client := &Client{
providers: providers,
logger: logger.New("nntp"),
pools: xsync.NewMap[string, *Pool](),
}
if len(providers) == 0 {
return nil, fmt.Errorf("no NNTP providers configured")
}
return client, nil
}
func (c *Client) InitPools() error {
var initErrors []error
successfulPools := 0
for _, provider := range c.providers {
serverPool, err := NewPool(provider, c.logger)
if err != nil {
c.logger.Error().
Err(err).
Str("server", provider.Host).
Int("port", provider.Port).
Msg("Failed to initialize server pool")
initErrors = append(initErrors, err)
continue
}
if c.minimumMaxConns == 0 {
// Set minimumMaxConns to the max connections of the first successful pool
c.minimumMaxConns = serverPool.ConnectionCount()
} else {
c.minimumMaxConns = min(c.minimumMaxConns, serverPool.ConnectionCount())
}
c.pools.Store(provider.Name, serverPool)
successfulPools++
}
if successfulPools == 0 {
return fmt.Errorf("failed to initialize any server pools: %v", initErrors)
}
c.logger.Info().
Int("providers", len(c.providers)).
Msg("NNTP client created")
return nil
}
func (c *Client) Close() {
if c.closed.Load() {
c.logger.Warn().Msg("NNTP client already closed")
return
}
c.pools.Range(func(key string, value *Pool) bool {
if value != nil {
err := value.Close()
if err != nil {
return false
}
}
return true
})
c.closed.Store(true)
c.logger.Info().Msg("NNTP client closed")
}
func (c *Client) GetConnection(ctx context.Context) (*Connection, func(), error) {
if c.closed.Load() {
return nil, nil, fmt.Errorf("nntp client is closed")
}
// Prevent workers from waiting too long for connections
connCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
providerCount := len(c.providers)
for _, provider := range c.providers {
pool, ok := c.pools.Load(provider.Name)
if !ok {
return nil, nil, fmt.Errorf("no pool found for provider %s", provider.Name)
}
if !pool.IsFree() && providerCount > 1 {
continue
}
conn, err := pool.Get(connCtx) // Use timeout context
if err != nil {
if errors.Is(err, ErrNoAvailableConnection) || errors.Is(err, context.DeadlineExceeded) {
continue
}
return nil, nil, fmt.Errorf("error getting connection from provider %s: %w", provider.Name, err)
}
if conn == nil {
continue
}
return conn, func() { pool.Put(conn) }, nil
}
return nil, nil, ErrNoAvailableConnection
}
func (c *Client) DownloadHeader(ctx context.Context, messageID string) (*YencMetadata, error) {
conn, cleanup, err := c.GetConnection(ctx)
if err != nil {
return nil, err
}
defer cleanup()
data, err := conn.GetBody(messageID)
if err != nil {
return nil, err
}
// yEnc decode
part, err := DecodeYencHeaders(bytes.NewReader(data))
if err != nil || part == nil {
return nil, fmt.Errorf("failed to decode segment")
}
// Return both the filename and decoded data
return part, nil
}
func (c *Client) MinimumMaxConns() int {
return c.minimumMaxConns
}
func (c *Client) TotalActiveConnections() int {
total := 0
c.pools.Range(func(key string, value *Pool) bool {
if value != nil {
total += value.ActiveConnections()
}
return true
})
return total
}
func (c *Client) Pools() *xsync.Map[string, *Pool] {
return c.pools
}
func (c *Client) GetProviders() []config.UsenetProvider {
return c.providers
}

394
internal/nntp/conns.go Normal file
View File

@@ -0,0 +1,394 @@
package nntp
import (
"bufio"
"crypto/tls"
"fmt"
"github.com/chrisfarms/yenc"
"github.com/rs/zerolog"
"io"
"net"
"net/textproto"
"strconv"
"strings"
)
// Connection represents an NNTP connection
type Connection struct {
username, password, address string
port int
conn net.Conn
text *textproto.Conn
reader *bufio.Reader
writer *bufio.Writer
logger zerolog.Logger
}
func (c *Connection) authenticate() error {
// Send AUTHINFO USER command
if err := c.sendCommand(fmt.Sprintf("AUTHINFO USER %s", c.username)); err != nil {
return NewConnectionError(fmt.Errorf("failed to send username: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return NewConnectionError(fmt.Errorf("failed to read user response: %w", err))
}
if resp.Code != 381 {
return classifyNNTPError(resp.Code, fmt.Sprintf("unexpected response to AUTHINFO USER: %s", resp.Message))
}
// Send AUTHINFO PASS command
if err := c.sendCommand(fmt.Sprintf("AUTHINFO PASS %s", c.password)); err != nil {
return NewConnectionError(fmt.Errorf("failed to send password: %w", err))
}
resp, err = c.readResponse()
if err != nil {
return NewConnectionError(fmt.Errorf("failed to read password response: %w", err))
}
if resp.Code != 281 {
return classifyNNTPError(resp.Code, fmt.Sprintf("authentication failed: %s", resp.Message))
}
return nil
}
// startTLS initiates TLS encryption with proper error handling
func (c *Connection) startTLS() error {
if err := c.sendCommand("STARTTLS"); err != nil {
return NewConnectionError(fmt.Errorf("failed to send STARTTLS: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return NewConnectionError(fmt.Errorf("failed to read STARTTLS response: %w", err))
}
if resp.Code != 382 {
return classifyNNTPError(resp.Code, fmt.Sprintf("STARTTLS not supported: %s", resp.Message))
}
// Upgrade connection to TLS
tlsConn := tls.Client(c.conn, &tls.Config{
ServerName: c.address,
InsecureSkipVerify: false,
})
c.conn = tlsConn
c.reader = bufio.NewReader(tlsConn)
c.writer = bufio.NewWriter(tlsConn)
c.text = textproto.NewConn(tlsConn)
c.logger.Debug().Msg("TLS encryption enabled")
return nil
}
// ping sends a simple command to test the connection
func (c *Connection) ping() error {
if err := c.sendCommand("DATE"); err != nil {
return NewConnectionError(err)
}
_, err := c.readResponse()
if err != nil {
return NewConnectionError(err)
}
return nil
}
// sendCommand sends a command to the NNTP server
func (c *Connection) sendCommand(command string) error {
_, err := fmt.Fprintf(c.writer, "%s\r\n", command)
if err != nil {
return err
}
return c.writer.Flush()
}
// readResponse reads a response from the NNTP server
func (c *Connection) readResponse() (*Response, error) {
line, err := c.text.ReadLine()
if err != nil {
return nil, err
}
parts := strings.SplitN(line, " ", 2)
code, err := strconv.Atoi(parts[0])
if err != nil {
return nil, fmt.Errorf("invalid response code: %s", parts[0])
}
message := ""
if len(parts) > 1 {
message = parts[1]
}
return &Response{
Code: code,
Message: message,
}, nil
}
// readMultilineResponse reads a multiline response
func (c *Connection) readMultilineResponse() (*Response, error) {
resp, err := c.readResponse()
if err != nil {
return nil, err
}
// Check if this is a multiline response
if resp.Code < 200 || resp.Code >= 300 {
return resp, nil
}
lines, err := c.text.ReadDotLines()
if err != nil {
return nil, err
}
resp.Lines = lines
return resp, nil
}
// GetArticle retrieves an article by message ID with proper error classification
func (c *Connection) GetArticle(messageID string) (*Article, error) {
messageID = FormatMessageID(messageID)
if err := c.sendCommand(fmt.Sprintf("ARTICLE %s", messageID)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send ARTICLE command: %w", err))
}
resp, err := c.readMultilineResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read article response: %w", err))
}
if resp.Code != 220 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
return c.parseArticle(messageID, resp.Lines)
}
// GetBody retrieves article body by message ID with proper error classification
func (c *Connection) GetBody(messageID string) ([]byte, error) {
messageID = FormatMessageID(messageID)
if err := c.sendCommand(fmt.Sprintf("BODY %s", messageID)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send BODY command: %w", err))
}
// Read the initial response
resp, err := c.readResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read body response: %w", err))
}
if resp.Code != 222 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
// Read the raw body data directly using textproto to preserve exact formatting for yEnc
lines, err := c.text.ReadDotLines()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read body data: %w", err))
}
// Join with \r\n to preserve original line endings and add final \r\n
body := strings.Join(lines, "\r\n")
if len(lines) > 0 {
body += "\r\n"
}
return []byte(body), nil
}
// GetHead retrieves article headers by message ID
func (c *Connection) GetHead(messageID string) ([]byte, error) {
messageID = FormatMessageID(messageID)
if err := c.sendCommand(fmt.Sprintf("HEAD %s", messageID)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send HEAD command: %w", err))
}
// Read the initial response
resp, err := c.readResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read head response: %w", err))
}
if resp.Code != 221 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
// Read the header data using textproto
lines, err := c.text.ReadDotLines()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read header data: %w", err))
}
// Join with \r\n to preserve original line endings and add final \r\n
headers := strings.Join(lines, "\r\n")
if len(lines) > 0 {
headers += "\r\n"
}
return []byte(headers), nil
}
// GetSegment retrieves a specific segment with proper error handling
func (c *Connection) GetSegment(messageID string, segmentNumber int) (*Segment, error) {
messageID = FormatMessageID(messageID)
body, err := c.GetBody(messageID)
if err != nil {
return nil, err // GetBody already returns classified errors
}
return &Segment{
MessageID: messageID,
Number: segmentNumber,
Bytes: int64(len(body)),
Data: body,
}, nil
}
// Stat retrieves article statistics by message ID with proper error classification
func (c *Connection) Stat(messageID string) (articleNumber int, echoedID string, err error) {
messageID = FormatMessageID(messageID)
if err = c.sendCommand(fmt.Sprintf("STAT %s", messageID)); err != nil {
return 0, "", NewConnectionError(fmt.Errorf("failed to send STAT: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return 0, "", NewConnectionError(fmt.Errorf("failed to read STAT response: %w", err))
}
if resp.Code != 223 {
return 0, "", classifyNNTPError(resp.Code, resp.Message)
}
fields := strings.Fields(resp.Message)
if len(fields) < 2 {
return 0, "", NewProtocolError(resp.Code, fmt.Sprintf("unexpected STAT response format: %q", resp.Message))
}
if articleNumber, err = strconv.Atoi(fields[0]); err != nil {
return 0, "", NewProtocolError(resp.Code, fmt.Sprintf("invalid article number %q: %v", fields[0], err))
}
echoedID = fields[1]
return articleNumber, echoedID, nil
}
// SelectGroup selects a newsgroup and returns group information
func (c *Connection) SelectGroup(groupName string) (*GroupInfo, error) {
if err := c.sendCommand(fmt.Sprintf("GROUP %s", groupName)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send GROUP command: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read GROUP response: %w", err))
}
if resp.Code != 211 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
// Parse GROUP response: "211 number low high group-name"
fields := strings.Fields(resp.Message)
if len(fields) < 4 {
return nil, NewProtocolError(resp.Code, fmt.Sprintf("unexpected GROUP response format: %q", resp.Message))
}
groupInfo := &GroupInfo{
Name: groupName,
}
if count, err := strconv.Atoi(fields[0]); err == nil {
groupInfo.Count = count
}
if low, err := strconv.Atoi(fields[1]); err == nil {
groupInfo.Low = low
}
if high, err := strconv.Atoi(fields[2]); err == nil {
groupInfo.High = high
}
return groupInfo, nil
}
// parseArticle parses article data from response lines
func (c *Connection) parseArticle(messageID string, lines []string) (*Article, error) {
article := &Article{
MessageID: messageID,
Groups: []string{},
}
headerEnd := -1
for i, line := range lines {
if line == "" {
headerEnd = i
break
}
// Parse headers
if strings.HasPrefix(line, "Subject: ") {
article.Subject = strings.TrimPrefix(line, "Subject: ")
} else if strings.HasPrefix(line, "From: ") {
article.From = strings.TrimPrefix(line, "From: ")
} else if strings.HasPrefix(line, "Date: ") {
article.Date = strings.TrimPrefix(line, "Date: ")
} else if strings.HasPrefix(line, "Newsgroups: ") {
groups := strings.TrimPrefix(line, "Newsgroups: ")
article.Groups = strings.Split(groups, ",")
for i := range article.Groups {
article.Groups[i] = strings.TrimSpace(article.Groups[i])
}
}
}
// Join body lines
if headerEnd != -1 && headerEnd+1 < len(lines) {
body := strings.Join(lines[headerEnd+1:], "\n")
article.Body = []byte(body)
article.Size = int64(len(article.Body))
}
return article, nil
}
// close closes the NNTP connection
func (c *Connection) close() error {
if c.conn != nil {
return c.conn.Close()
}
return nil
}
func DecodeYenc(reader io.Reader) (*yenc.Part, error) {
part, err := yenc.Decode(reader)
if err != nil {
return nil, NewYencDecodeError(fmt.Errorf("failed to create yenc decoder: %w", err))
}
return part, nil
}
func IsValidMessageID(messageID string) bool {
if len(messageID) < 3 {
return false
}
return strings.Contains(messageID, "@")
}
// FormatMessageID ensures message ID has proper format
func FormatMessageID(messageID string) string {
messageID = strings.TrimSpace(messageID)
if !strings.HasPrefix(messageID, "<") {
messageID = "<" + messageID
}
if !strings.HasSuffix(messageID, ">") {
messageID = messageID + ">"
}
return messageID
}

116
internal/nntp/decoder.go Normal file
View File

@@ -0,0 +1,116 @@
package nntp
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
)
// YencMetadata contains just the header information
type YencMetadata struct {
Name string // filename
Size int64 // total file size
Part int // part number
Total int // total parts
Begin int64 // part start byte
End int64 // part end byte
LineSize int // line length
}
// DecodeYencHeaders extracts only yenc header metadata without decoding body
func DecodeYencHeaders(reader io.Reader) (*YencMetadata, error) {
buf := bufio.NewReader(reader)
metadata := &YencMetadata{}
// Find and parse =ybegin header
if err := parseYBeginHeader(buf, metadata); err != nil {
return nil, NewYencDecodeError(fmt.Errorf("failed to parse ybegin header: %w", err))
}
// Parse =ypart header if this is a multipart file
if metadata.Part > 0 {
if err := parseYPartHeader(buf, metadata); err != nil {
return nil, NewYencDecodeError(fmt.Errorf("failed to parse ypart header: %w", err))
}
}
return metadata, nil
}
func parseYBeginHeader(buf *bufio.Reader, metadata *YencMetadata) error {
var s string
var err error
// Find the =ybegin line
for {
s, err = buf.ReadString('\n')
if err != nil {
return err
}
if len(s) >= 7 && s[:7] == "=ybegin" {
break
}
}
// Parse the header line
parts := strings.SplitN(s[7:], "name=", 2)
if len(parts) > 1 {
metadata.Name = strings.TrimSpace(parts[1])
}
// Parse other parameters
for _, header := range strings.Split(parts[0], " ") {
kv := strings.SplitN(strings.TrimSpace(header), "=", 2)
if len(kv) < 2 {
continue
}
switch kv[0] {
case "size":
metadata.Size, _ = strconv.ParseInt(kv[1], 10, 64)
case "line":
metadata.LineSize, _ = strconv.Atoi(kv[1])
case "part":
metadata.Part, _ = strconv.Atoi(kv[1])
case "total":
metadata.Total, _ = strconv.Atoi(kv[1])
}
}
return nil
}
func parseYPartHeader(buf *bufio.Reader, metadata *YencMetadata) error {
var s string
var err error
// Find the =ypart line
for {
s, err = buf.ReadString('\n')
if err != nil {
return err
}
if len(s) >= 6 && s[:6] == "=ypart" {
break
}
}
// Parse part parameters
for _, header := range strings.Split(s[6:], " ") {
kv := strings.SplitN(strings.TrimSpace(header), "=", 2)
if len(kv) < 2 {
continue
}
switch kv[0] {
case "begin":
metadata.Begin, _ = strconv.ParseInt(kv[1], 10, 64)
case "end":
metadata.End, _ = strconv.ParseInt(kv[1], 10, 64)
}
}
return nil
}

195
internal/nntp/errors.go Normal file
View File

@@ -0,0 +1,195 @@
package nntp
import (
"errors"
"fmt"
)
// Error types for NNTP operations
type ErrorType int
const (
ErrorTypeUnknown ErrorType = iota
ErrorTypeConnection
ErrorTypeAuthentication
ErrorTypeTimeout
ErrorTypeArticleNotFound
ErrorTypeGroupNotFound
ErrorTypePermissionDenied
ErrorTypeServerBusy
ErrorTypeInvalidCommand
ErrorTypeProtocol
ErrorTypeYencDecode
ErrorTypeNoAvailableConnection
)
// Error represents an NNTP-specific error
type Error struct {
Type ErrorType
Code int // NNTP response code
Message string // Error message
Err error // Underlying error
}
// Predefined errors for common cases
var (
ErrArticleNotFound = &Error{Type: ErrorTypeArticleNotFound, Code: 430, Message: "article not found"}
ErrGroupNotFound = &Error{Type: ErrorTypeGroupNotFound, Code: 411, Message: "group not found"}
ErrPermissionDenied = &Error{Type: ErrorTypePermissionDenied, Code: 502, Message: "permission denied"}
ErrAuthenticationFail = &Error{Type: ErrorTypeAuthentication, Code: 482, Message: "authentication failed"}
ErrServerBusy = &Error{Type: ErrorTypeServerBusy, Code: 400, Message: "server busy"}
ErrPoolNotFound = &Error{Type: ErrorTypeUnknown, Code: 0, Message: "NNTP pool not found", Err: nil}
ErrNoAvailableConnection = &Error{Type: ErrorTypeNoAvailableConnection, Code: 0, Message: "no available connection in pool", Err: nil}
)
func (e *Error) Error() string {
if e.Err != nil {
return fmt.Sprintf("NNTP %s (code %d): %s - %v", e.Type.String(), e.Code, e.Message, e.Err)
}
return fmt.Sprintf("NNTP %s (code %d): %s", e.Type.String(), e.Code, e.Message)
}
func (e *Error) Unwrap() error {
return e.Err
}
func (e *Error) Is(target error) bool {
if t, ok := target.(*Error); ok {
return e.Type == t.Type
}
return false
}
// IsRetryable returns true if the error might be resolved by retrying
func (e *Error) IsRetryable() bool {
switch e.Type {
case ErrorTypeConnection, ErrorTypeTimeout, ErrorTypeServerBusy:
return true
case ErrorTypeArticleNotFound, ErrorTypeGroupNotFound, ErrorTypePermissionDenied, ErrorTypeAuthentication:
return false
default:
return false
}
}
// ShouldStopParsing returns true if this error should stop the entire parsing process
func (e *Error) ShouldStopParsing() bool {
switch e.Type {
case ErrorTypeAuthentication, ErrorTypePermissionDenied:
return true // Critical auth issues
case ErrorTypeConnection:
return false // Can continue with other connections
case ErrorTypeArticleNotFound:
return false // Can continue searching for other articles
case ErrorTypeServerBusy:
return false // Temporary issue
default:
return false
}
}
func (et ErrorType) String() string {
switch et {
case ErrorTypeConnection:
return "CONNECTION"
case ErrorTypeAuthentication:
return "AUTHENTICATION"
case ErrorTypeTimeout:
return "TIMEOUT"
case ErrorTypeArticleNotFound:
return "ARTICLE_NOT_FOUND"
case ErrorTypeGroupNotFound:
return "GROUP_NOT_FOUND"
case ErrorTypePermissionDenied:
return "PERMISSION_DENIED"
case ErrorTypeServerBusy:
return "SERVER_BUSY"
case ErrorTypeInvalidCommand:
return "INVALID_COMMAND"
case ErrorTypeProtocol:
return "PROTOCOL"
case ErrorTypeYencDecode:
return "YENC_DECODE"
default:
return "UNKNOWN"
}
}
// Helper functions to create specific errors
func NewConnectionError(err error) *Error {
return &Error{
Type: ErrorTypeConnection,
Message: "connection failed",
Err: err,
}
}
func NewTimeoutError(err error) *Error {
return &Error{
Type: ErrorTypeTimeout,
Message: "operation timed out",
Err: err,
}
}
func NewProtocolError(code int, message string) *Error {
return &Error{
Type: ErrorTypeProtocol,
Code: code,
Message: message,
}
}
func NewYencDecodeError(err error) *Error {
return &Error{
Type: ErrorTypeYencDecode,
Message: "yEnc decode failed",
Err: err,
}
}
// classifyNNTPError classifies an NNTP response code into an error type
func classifyNNTPError(code int, message string) *Error {
switch {
case code == 430 || code == 423:
return &Error{Type: ErrorTypeArticleNotFound, Code: code, Message: message}
case code == 411:
return &Error{Type: ErrorTypeGroupNotFound, Code: code, Message: message}
case code == 502 || code == 503:
return &Error{Type: ErrorTypePermissionDenied, Code: code, Message: message}
case code == 481 || code == 482:
return &Error{Type: ErrorTypeAuthentication, Code: code, Message: message}
case code == 400:
return &Error{Type: ErrorTypeServerBusy, Code: code, Message: message}
case code == 500 || code == 501:
return &Error{Type: ErrorTypeInvalidCommand, Code: code, Message: message}
case code >= 400:
return &Error{Type: ErrorTypeProtocol, Code: code, Message: message}
default:
return &Error{Type: ErrorTypeUnknown, Code: code, Message: message}
}
}
func IsArticleNotFoundError(err error) bool {
var nntpErr *Error
if errors.As(err, &nntpErr) {
return nntpErr.Type == ErrorTypeArticleNotFound
}
return false
}
func IsAuthenticationError(err error) bool {
var nntpErr *Error
if errors.As(err, &nntpErr) {
return nntpErr.Type == ErrorTypeAuthentication
}
return false
}
func IsRetryableError(err error) bool {
var nntpErr *Error
if errors.As(err, &nntpErr) {
return nntpErr.IsRetryable()
}
return false
}

299
internal/nntp/pool.go Normal file
View File

@@ -0,0 +1,299 @@
package nntp
import (
"bufio"
"context"
"crypto/tls"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"net"
"net/textproto"
"sync"
"sync/atomic"
"time"
)
// Pool manages a pool of NNTP connections
type Pool struct {
address, username, password string
maxConns, port int
ssl bool
useTLS bool
connections chan *Connection
logger zerolog.Logger
closed atomic.Bool
totalConnections atomic.Int32
activeConnections atomic.Int32
}
// Segment represents a usenet segment
type Segment struct {
MessageID string
Number int
Bytes int64
Data []byte
}
// Article represents a complete usenet article
type Article struct {
MessageID string
Subject string
From string
Date string
Groups []string
Body []byte
Size int64
}
// Response represents an NNTP server response
type Response struct {
Code int
Message string
Lines []string
}
// GroupInfo represents information about a newsgroup
type GroupInfo struct {
Name string
Count int // Number of articles in the group
Low int // Lowest article number
High int // Highest article number
}
// NewPool creates a new NNTP connection pool
func NewPool(provider config.UsenetProvider, logger zerolog.Logger) (*Pool, error) {
maxConns := provider.Connections
if maxConns <= 0 {
maxConns = 1
}
pool := &Pool{
address: provider.Host,
username: provider.Username,
password: provider.Password,
port: provider.Port,
maxConns: maxConns,
ssl: provider.SSL,
useTLS: provider.UseTLS,
connections: make(chan *Connection, maxConns),
logger: logger,
}
return pool.initializeConnections()
}
func (p *Pool) initializeConnections() (*Pool, error) {
var wg sync.WaitGroup
var mu sync.Mutex
var successfulConnections []*Connection
var errs []error
// Create connections concurrently
for i := 0; i < p.maxConns; i++ {
wg.Add(1)
go func(connIndex int) {
defer wg.Done()
conn, err := p.createConnection()
mu.Lock()
defer mu.Unlock()
if err != nil {
errs = append(errs, err)
} else {
successfulConnections = append(successfulConnections, conn)
}
}(i)
}
// Wait for all connection attempts to complete
wg.Wait()
// Add successful connections to the pool
for _, conn := range successfulConnections {
p.connections <- conn
}
p.totalConnections.Store(int32(len(successfulConnections)))
if len(successfulConnections) == 0 {
return nil, fmt.Errorf("failed to create any connections: %v", errs)
}
// Log results
p.logger.Info().
Str("server", p.address).
Int("port", p.port).
Int("requested_connections", p.maxConns).
Int("successful_connections", len(successfulConnections)).
Int("failed_connections", len(errs)).
Msg("NNTP connection pool created")
// If some connections failed, log a warning but continue
if len(errs) > 0 {
p.logger.Warn().
Int("failed_count", len(errs)).
Msg("Some connections failed during pool initialization")
}
return p, nil
}
// Get retrieves a connection from the pool
func (p *Pool) Get(ctx context.Context) (*Connection, error) {
if p.closed.Load() {
return nil, NewConnectionError(fmt.Errorf("connection pool is closed"))
}
select {
case conn := <-p.connections:
if conn == nil {
return nil, NewConnectionError(fmt.Errorf("received nil connection from pool"))
}
p.activeConnections.Add(1)
if err := conn.ping(); err != nil {
p.activeConnections.Add(-1)
err := conn.close()
if err != nil {
return nil, err
}
// Create a new connection
newConn, err := p.createConnection()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to create replacement connection: %w", err))
}
p.activeConnections.Add(1)
return newConn, nil
}
return conn, nil
case <-ctx.Done():
return nil, NewTimeoutError(ctx.Err())
}
}
// Put returns a connection to the pool
func (p *Pool) Put(conn *Connection) {
if conn == nil {
return
}
defer p.activeConnections.Add(-1)
if p.closed.Load() {
conn.close()
return
}
// Try non-blocking first
select {
case p.connections <- conn:
return
default:
}
// If pool is full, this usually means we have too many connections
// Force return by making space (close oldest connection)
select {
case oldConn := <-p.connections:
oldConn.close() // Close the old connection
p.connections <- conn // Put the new one back
case <-time.After(1 * time.Second):
// Still can't return - close this connection
conn.close()
}
}
// Close closes all connections in the pool
func (p *Pool) Close() error {
if p.closed.Load() {
return nil
}
p.closed.Store(true)
close(p.connections)
for conn := range p.connections {
err := conn.close()
if err != nil {
return err
}
}
p.logger.Info().Msg("NNTP connection pool closed")
return nil
}
// createConnection creates a new NNTP connection with proper error handling
func (p *Pool) createConnection() (*Connection, error) {
addr := fmt.Sprintf("%s:%d", p.address, p.port)
var conn net.Conn
var err error
if p.ssl {
conn, err = tls.DialWithDialer(&net.Dialer{}, "tcp", addr, &tls.Config{
InsecureSkipVerify: false,
})
} else {
conn, err = net.Dial("tcp", addr)
}
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to connect to %s: %w", addr, err))
}
reader := bufio.NewReaderSize(conn, 256*1024) // 256KB buffer for better performance
writer := bufio.NewWriterSize(conn, 256*1024) // 256KB buffer for better performance
text := textproto.NewConn(conn)
nntpConn := &Connection{
username: p.username,
password: p.password,
address: p.address,
port: p.port,
conn: conn,
text: text,
reader: reader,
writer: writer,
logger: p.logger,
}
// Read welcome message
_, err = nntpConn.readResponse()
if err != nil {
conn.Close()
return nil, NewConnectionError(fmt.Errorf("failed to read welcome message: %w", err))
}
// Authenticate if credentials are provided
if p.username != "" && p.password != "" {
if err := nntpConn.authenticate(); err != nil {
conn.Close()
return nil, err // authenticate() already returns NNTPError
}
}
// Enable TLS if requested (STARTTLS)
if p.useTLS && !p.ssl {
if err := nntpConn.startTLS(); err != nil {
conn.Close()
return nil, err // startTLS() already returns NNTPError
}
}
return nntpConn, nil
}
func (p *Pool) ConnectionCount() int {
return int(p.totalConnections.Load())
}
func (p *Pool) ActiveConnections() int {
return int(p.activeConnections.Load())
}
func (p *Pool) IsFree() bool {
return p.ActiveConnections() < p.maxConns
}

View File

@@ -5,7 +5,6 @@ import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/logger"
@@ -180,8 +179,7 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
resp, err = c.doRequest(req)
if err != nil {
// Check if this is a network error that might be worth retrying
if isRetryableError(err) && attempt < c.maxRetries {
if attempt < c.maxRetries {
// Apply backoff with jitter
jitter := time.Duration(rand.Int63n(int64(backoff / 4)))
sleepTime := backoff + jitter
@@ -390,30 +388,3 @@ func Default() *Client {
})
return instance
}
func isRetryableError(err error) bool {
errString := err.Error()
// Connection reset and other network errors
if strings.Contains(errString, "connection reset by peer") ||
strings.Contains(errString, "read: connection reset") ||
strings.Contains(errString, "connection refused") ||
strings.Contains(errString, "network is unreachable") ||
strings.Contains(errString, "connection timed out") ||
strings.Contains(errString, "no such host") ||
strings.Contains(errString, "i/o timeout") ||
strings.Contains(errString, "unexpected EOF") ||
strings.Contains(errString, "TLS handshake timeout") {
return true
}
// Check for net.Error type which can provide more information
var netErr net.Error
if errors.As(err, &netErr) {
// Retry on timeout errors and temporary errors
return netErr.Timeout()
}
// Not a retryable error
return false
}

View File

@@ -1,5 +1,16 @@
package utils
import (
"fmt"
"io"
"mime"
"net/http"
"net/url"
"path"
"path/filepath"
"strings"
)
func RemoveItem[S ~[]E, E comparable](s S, values ...E) S {
result := make(S, 0, len(s))
outer:
@@ -22,3 +33,131 @@ func Contains(slice []string, value string) bool {
}
return false
}
func GenerateHash(data string) string {
// Simple hash generation using a basic algorithm (for demonstration purposes)
_hash := 0
for _, char := range data {
_hash = (_hash*31 + int(char)) % 1000003 // Simple hash function
}
return string(rune(_hash))
}
func DownloadFile(url string) (string, []byte, error) {
resp, err := http.Get(url)
if err != nil {
return "", nil, fmt.Errorf("failed to download file: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", nil, fmt.Errorf("failed to download file: status code %d", resp.StatusCode)
}
filename := getFilenameFromResponse(resp, url)
data, err := io.ReadAll(resp.Body)
if err != nil {
return "", nil, fmt.Errorf("failed to read response body: %w", err)
}
return filename, data, nil
}
func getFilenameFromResponse(resp *http.Response, originalURL string) string {
// 1. Try Content-Disposition header
if cd := resp.Header.Get("Content-Disposition"); cd != "" {
if _, params, err := mime.ParseMediaType(cd); err == nil {
if filename := params["filename"]; filename != "" {
return filename
}
}
}
// 2. Try to decode URL-encoded filename from Content-Disposition
if cd := resp.Header.Get("Content-Disposition"); cd != "" {
if strings.Contains(cd, "filename*=") {
// Handle RFC 5987 encoded filenames
parts := strings.Split(cd, "filename*=")
if len(parts) > 1 {
encoded := strings.Trim(parts[1], `"`)
if strings.HasPrefix(encoded, "UTF-8''") {
if decoded, err := url.QueryUnescape(encoded[7:]); err == nil {
return decoded
}
}
}
}
}
// 3. Fall back to URL path
if parsedURL, err := url.Parse(originalURL); err == nil {
if filename := filepath.Base(parsedURL.Path); filename != "." && filename != "/" {
// URL decode the filename
if decoded, err := url.QueryUnescape(filename); err == nil {
return decoded
}
return filename
}
}
// 4. Default filename
return "downloaded_file"
}
func ValidateServiceURL(urlStr string) error {
if urlStr == "" {
return fmt.Errorf("URL cannot be empty")
}
// Try parsing as full URL first
u, err := url.Parse(urlStr)
if err == nil && u.Scheme != "" && u.Host != "" {
// It's a full URL, validate scheme
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("URL scheme must be http or https")
}
return nil
}
// Check if it's a host:port format (no scheme)
if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") {
// Try parsing with http:// prefix
testURL := "http://" + urlStr
u, err := url.Parse(testURL)
if err != nil {
return fmt.Errorf("invalid host:port format: %w", err)
}
if u.Host == "" {
return fmt.Errorf("host is required in host:port format")
}
// Validate port number
if u.Port() == "" {
return fmt.Errorf("port is required in host:port format")
}
return nil
}
return fmt.Errorf("invalid URL format: %s", urlStr)
}
func ExtractFilenameFromURL(rawURL string) string {
// Parse the URL
parsedURL, err := url.Parse(rawURL)
if err != nil {
return ""
}
// Get the base filename from path
filename := path.Base(parsedURL.Path)
// Handle edge cases
if filename == "/" || filename == "." || filename == "" {
return ""
}
return filename
}

View File

@@ -57,3 +57,15 @@ func IsSampleFile(path string) bool {
}
return RegexMatch(sampleRegex, path)
}
func IsParFile(path string) bool {
ext := filepath.Ext(path)
return strings.EqualFold(ext, ".par") || strings.EqualFold(ext, ".par2")
}
func IsRarFile(path string) bool {
ext := filepath.Ext(path)
return strings.EqualFold(ext, ".rar") || strings.EqualFold(ext, ".r00") ||
strings.EqualFold(ext, ".r01") || strings.EqualFold(ext, ".r02") ||
strings.EqualFold(ext, ".r03") || strings.EqualFold(ext, ".r04")
}

View File

@@ -3,12 +3,11 @@ package qbit
import (
"context"
"encoding/base64"
"fmt"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"net/url"
"strings"
)
@@ -20,45 +19,6 @@ const (
arrKey contextKey = "arr"
)
func validateServiceURL(urlStr string) error {
if urlStr == "" {
return fmt.Errorf("URL cannot be empty")
}
// Try parsing as full URL first
u, err := url.Parse(urlStr)
if err == nil && u.Scheme != "" && u.Host != "" {
// It's a full URL, validate scheme
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("URL scheme must be http or https")
}
return nil
}
// Check if it's a host:port format (no scheme)
if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") {
// Try parsing with http:// prefix
testURL := "http://" + urlStr
u, err := url.Parse(testURL)
if err != nil {
return fmt.Errorf("invalid host:port format: %w", err)
}
if u.Host == "" {
return fmt.Errorf("host is required in host:port format")
}
// Validate port number
if u.Port() == "" {
return fmt.Errorf("port is required in host:port format")
}
return nil
}
return fmt.Errorf("invalid URL format: %s", urlStr)
}
func getCategory(ctx context.Context) string {
if category, ok := ctx.Value(categoryKey).(string); ok {
return category
@@ -146,7 +106,7 @@ func (q *QBit) authContext(next http.Handler) http.Handler {
}
}
a.Source = "auto"
if err := validateServiceURL(a.Host); err != nil {
if err := utils.ValidateServiceURL(a.Host); err != nil {
// Return silently, no need to raise a problem. Just do not add the Arr to the context/config.json
next.ServeHTTP(w, r)
return

View File

@@ -18,13 +18,16 @@ type QBit struct {
}
func New() *QBit {
_cfg := config.Get()
cfg := _cfg.QBitTorrent
cfg := config.Get()
qbitCfg := cfg.QBitTorrent
if qbitCfg == nil {
return nil
}
return &QBit{
Username: cfg.Username,
Password: cfg.Password,
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
Username: qbitCfg.Username,
Password: qbitCfg.Password,
DownloadFolder: qbitCfg.DownloadFolder,
Categories: qbitCfg.Categories,
storage: store.Get().Torrents(),
logger: logger.New("qbit"),
}

View File

@@ -684,18 +684,3 @@ func (r *Reader) ExtractFile(file *File) ([]byte, error) {
return r.readBytes(file.DataOffset, int(file.CompressedSize))
}
// Helper functions
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}

View File

@@ -214,7 +214,6 @@ func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job {
}
}
// initRun initializes the repair run, setting up necessary configurations, checks and caches
func (r *Repair) initRun(ctx context.Context) {
if r.useWebdav {
// Webdav use is enabled, initialize debrid torrent caches

171
pkg/sabnzbd/config.go Normal file
View File

@@ -0,0 +1,171 @@
package sabnzbd
// ConfigResponse represents configuration response
type ConfigResponse struct {
Config *Config `json:"config"`
}
type ConfigNewzbin struct {
Username string `json:"username"`
BookmarkRate int `json:"bookmark_rate"`
Url string `json:"url"`
Bookmarks int `json:"bookmarks"`
Password string `json:"password"`
Unbookmark int `json:"unbookmark"`
}
// Category represents a SABnzbd category
type Category struct {
Name string `json:"name"`
Order int `json:"order"`
Pp string `json:"pp"`
Script string `json:"script"`
Dir string `json:"dir"`
NewzBin string `json:"newzbin"`
Priority string `json:"priority"`
}
// Server represents a usenet server
type Server struct {
Name string `json:"name"`
Host string `json:"host"`
Port int `json:"port"`
Username string `json:"username"`
Password string `json:"password"`
Connections int `json:"connections"`
Retention int `json:"retention"`
Priority int `json:"priority"`
SSL bool `json:"ssl"`
Optional bool `json:"optional"`
}
type Config struct {
Misc MiscConfig `json:"misc"`
Categories []Category `json:"categories"`
Servers []Server `json:"servers"`
}
type MiscConfig struct {
// Directory Configuration
CompleteDir string `json:"complete_dir"`
DownloadDir string `json:"download_dir"`
AdminDir string `json:"admin_dir"`
NzbBackupDir string `json:"nzb_backup_dir"`
ScriptDir string `json:"script_dir"`
EmailDir string `json:"email_dir"`
WebDir string `json:"web_dir"`
// Processing Options
ParOption string `json:"par_option"`
ParOptionConvert string `json:"par_option_convert"`
ParOptionDuplicate string `json:"par_option_duplicate"`
DirectUnpack string `json:"direct_unpack"`
FlatUnpack string `json:"flat_unpack"`
EnableRecursiveUnpack string `json:"enable_recursive_unpack"`
OverwriteFiles string `json:"overwrite_files"`
IgnoreWrongUnrar string `json:"ignore_wrong_unrar"`
IgnoreUnrarDates string `json:"ignore_unrar_dates"`
PreCheck string `json:"pre_check"`
// File Handling
Permissions string `json:"permissions"`
FolderRename string `json:"folder_rename"`
FileRename string `json:"file_rename"`
ReplaceIllegal string `json:"replace_illegal"`
ReplaceDots string `json:"replace_dots"`
ReplaceSpaces string `json:"replace_spaces"`
SanitizeSafe string `json:"sanitize_safe"`
IgnoreSamples string `json:"ignore_samples"`
UnwantedExtensions []string `json:"unwanted_extensions"`
ActionOnUnwanted string `json:"action_on_unwanted"`
ActionOnDuplicate string `json:"action_on_duplicate"`
BackupForDuplicates string `json:"backup_for_duplicates"`
CleanupList []string `json:"cleanup_list"`
DeobfuscateFinalFilenames string `json:"deobfuscate_final_filenames"`
// Scripts and Processing
PreScript string `json:"pre_script"`
PostScript string `json:"post_script"`
EmptyPostproc string `json:"empty_postproc"`
PauseOnPostProcessing string `json:"pause_on_post_processing"`
// System Resources
Nice string `json:"nice"`
NiceUnpack string `json:"nice_unpack"`
Ionice string `json:"ionice"`
Fsync string `json:"fsync"`
// Bandwidth and Performance
BandwidthMax string `json:"bandwidth_max"`
BandwidthPerc string `json:"bandwidth_perc"`
RefreshRate string `json:"refresh_rate"`
DirscanSpeed string `json:"dirscan_speed"`
FolderMaxLength string `json:"folder_max_length"`
PropagationDelay string `json:"propagation_delay"`
// Storage Management
DownloadFree string `json:"download_free"`
CompleteFree string `json:"complete_free"`
// Queue Management
QueueComplete string `json:"queue_complete"`
QueueCompletePers string `json:"queue_complete_pers"`
AutoSort string `json:"auto_sort"`
NewNzbOnFailure string `json:"new_nzb_on_failure"`
PauseOnPwrar string `json:"pause_on_pwrar"`
WarnedOldQueue string `json:"warned_old_queue"`
// Web Interface
WebHost string `json:"web_host"`
WebPort string `json:"web_port"`
WebUsername string `json:"web_username"`
WebPassword string `json:"web_password"`
WebColor string `json:"web_color"`
WebColor2 string `json:"web_color2"`
AutoBrowser string `json:"auto_browser"`
Autobrowser string `json:"autobrowser"` // Duplicate field - may need to resolve
// HTTPS Configuration
EnableHTTPS string `json:"enable_https"`
EnableHTTPSVerification string `json:"enable_https_verification"`
HTTPSPort string `json:"https_port"`
HTTPSCert string `json:"https_cert"`
HTTPSKey string `json:"https_key"`
HTTPSChain string `json:"https_chain"`
// Security and API
APIKey string `json:"api_key"`
NzbKey string `json:"nzb_key"`
HostWhitelist string `json:"host_whitelist"`
LocalRanges []string `json:"local_ranges"`
InetExposure string `json:"inet_exposure"`
APILogging string `json:"api_logging"`
APIWarnings string `json:"api_warnings"`
// Logging
LogLevel string `json:"log_level"`
LogSize string `json:"log_size"`
MaxLogSize string `json:"max_log_size"`
LogBackups string `json:"log_backups"`
LogNew string `json:"log_new"`
// Notifications
MatrixUsername string `json:"matrix_username"`
MatrixPassword string `json:"matrix_password"`
MatrixServer string `json:"matrix_server"`
MatrixRoom string `json:"matrix_room"`
// Miscellaneous
ConfigLock string `json:"config_lock"`
Language string `json:"language"`
CheckNewRel string `json:"check_new_rel"`
RSSFilenames string `json:"rss_filenames"`
IPv6Hosting string `json:"ipv6_hosting"`
EnableBonjour string `json:"enable_bonjour"`
Cherryhost string `json:"cherryhost"`
WinMenu string `json:"win_menu"`
AMPM string `json:"ampm"`
NotifiedNewSkin string `json:"notified_new_skin"`
HelpURI string `json:"helpuri"`
SSDURI string `json:"ssduri"`
}

121
pkg/sabnzbd/context.go Normal file
View File

@@ -0,0 +1,121 @@
package sabnzbd
import (
"context"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"strings"
"github.com/sirrobot01/decypharr/pkg/arr"
)
type contextKey string
const (
apiKeyKey contextKey = "apikey"
modeKey contextKey = "mode"
arrKey contextKey = "arr"
categoryKey contextKey = "category"
)
func getMode(ctx context.Context) string {
if mode, ok := ctx.Value(modeKey).(string); ok {
return mode
}
return ""
}
func (s *SABnzbd) categoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := r.URL.Query().Get("category")
if category == "" {
// Check form data
_ = r.ParseForm()
category = r.Form.Get("category")
}
if category == "" {
category = r.FormValue("category")
}
ctx := context.WithValue(r.Context(), categoryKey, strings.TrimSpace(category))
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func getArrFromContext(ctx context.Context) *arr.Arr {
if a, ok := ctx.Value(arrKey).(*arr.Arr); ok {
return a
}
return nil
}
func getCategory(ctx context.Context) string {
if category, ok := ctx.Value(categoryKey).(string); ok {
return category
}
return ""
}
// modeContext extracts the mode parameter from the request
func (s *SABnzbd) modeContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
mode := r.URL.Query().Get("mode")
if mode == "" {
// Check form data
_ = r.ParseForm()
mode = r.Form.Get("mode")
}
// Extract category for Arr integration
category := r.URL.Query().Get("cat")
if category == "" {
category = r.Form.Get("cat")
}
// Create a default Arr instance for the category
downloadUncached := false
a := arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
ctx := context.WithValue(r.Context(), modeKey, strings.TrimSpace(mode))
ctx = context.WithValue(ctx, arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
// authContext creates a middleware that extracts the Arr host and token from the Authorization header
// and adds it to the request context.
// This is used to identify the Arr instance for the request.
// Only a valid host and token will be added to the context/config. The rest are manual
func (s *SABnzbd) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host := r.FormValue("ma_username")
token := r.FormValue("ma_password")
category := getCategory(r.Context())
arrs := store.Get().Arr()
// Check if arr exists
a := arrs.Get(category)
if a == nil {
// Arr is not configured, create a new one
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
}
host = strings.TrimSpace(host)
if host != "" {
a.Host = host
}
token = strings.TrimSpace(token)
if token != "" {
a.Token = token
}
a.Source = "auto"
if err := utils.ValidateServiceURL(a.Host); err != nil {
// Return silently, no need to raise a problem. Just do not add the Arr to the context/config.json
next.ServeHTTP(w, r)
return
}
arrs.AddOrUpdate(a)
ctx := context.WithValue(r.Context(), arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}

476
pkg/sabnzbd/handlers.go Normal file
View File

@@ -0,0 +1,476 @@
package sabnzbd
import (
"context"
"fmt"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/usenet"
"io"
"net/http"
"strconv"
"strings"
"time"
)
// handleAPI is the main handler for all SABnzbd API requests
func (s *SABnzbd) handleAPI(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
mode := getMode(ctx)
switch mode {
case ModeQueue:
s.handleQueue(w, r)
case ModeHistory:
s.handleHistory(w, r)
case ModeConfig:
s.handleConfig(w, r)
case ModeStatus, ModeFullStatus:
s.handleStatus(w, r)
case ModeGetConfig:
s.handleConfig(w, r)
case ModeAddURL:
s.handleAddURL(w, r)
case ModeAddFile:
s.handleAddFile(w, r)
case ModeVersion:
s.handleVersion(w, r)
case ModeGetCats:
s.handleGetCategories(w, r)
case ModeGetScripts:
s.handleGetScripts(w, r)
case ModeGetFiles:
s.handleGetFiles(w, r)
default:
// Default to queue if no mode specified
s.logger.Warn().Str("mode", mode).Msg("Unknown API mode, returning 404")
http.Error(w, "Not Found", http.StatusNotFound)
}
}
func (s *SABnzbd) handleQueue(w http.ResponseWriter, r *http.Request) {
name := r.FormValue("name")
if name == "" {
s.handleListQueue(w, r)
return
}
name = strings.ToLower(strings.TrimSpace(name))
switch name {
case "delete":
s.handleQueueDelete(w, r)
case "pause":
s.handleQueuePause(w, r)
case "resume":
s.handleQueueResume(w, r)
}
}
// handleResume handles resume operations
func (s *SABnzbd) handleQueueResume(w http.ResponseWriter, r *http.Request) {
response := StatusResponse{Status: true}
request.JSONResponse(w, response, http.StatusOK)
}
// handleDelete handles delete operations
func (s *SABnzbd) handleQueueDelete(w http.ResponseWriter, r *http.Request) {
nzoIDs := r.FormValue("value")
if nzoIDs == "" {
s.writeError(w, "No NZB IDs provided", http.StatusBadRequest)
return
}
var successCount int
var errors []string
for _, nzoID := range strings.Split(nzoIDs, ",") {
nzoID = strings.TrimSpace(nzoID)
if nzoID == "" {
continue // Skip empty IDs
}
s.logger.Info().Str("nzo_id", nzoID).Msg("Deleting NZB")
// Use atomic delete operation
if err := s.usenet.Store().AtomicDelete(nzoID); err != nil {
s.logger.Error().
Err(err).
Str("nzo_id", nzoID).
Msg("Failed to delete NZB")
errors = append(errors, fmt.Sprintf("Failed to delete %s: %v", nzoID, err))
} else {
successCount++
}
}
// Return response with success/error information
if len(errors) > 0 {
if successCount == 0 {
// All deletions failed
s.writeError(w, fmt.Sprintf("All deletions failed: %s", strings.Join(errors, "; ")), http.StatusInternalServerError)
return
} else {
// Partial success
s.logger.Warn().
Int("success_count", successCount).
Int("error_count", len(errors)).
Strs("errors", errors).
Msg("Partial success in queue deletion")
}
}
response := StatusResponse{
Status: true,
Error: "", // Could add error details here if needed
}
request.JSONResponse(w, response, http.StatusOK)
}
// handlePause handles pause operations
func (s *SABnzbd) handleQueuePause(w http.ResponseWriter, r *http.Request) {
response := StatusResponse{Status: true}
request.JSONResponse(w, response, http.StatusOK)
}
// handleQueue returns the current download queue
func (s *SABnzbd) handleListQueue(w http.ResponseWriter, r *http.Request) {
nzbs := s.usenet.Store().GetQueue()
queue := Queue{
Version: Version,
Slots: []QueueSlot{},
}
// Convert NZBs to queue slots
for _, nzb := range nzbs {
if nzb.ETA <= 0 {
nzb.ETA = 0 // Ensure ETA is non-negative
}
var timeLeft string
if nzb.ETA == 0 {
timeLeft = "00:00:00" // If ETA is 0, set TimeLeft to "00:00:00"
} else {
// Convert ETA from seconds to "HH:MM:SS" format
duration := time.Duration(nzb.ETA) * time.Second
timeLeft = duration.String()
}
slot := QueueSlot{
Status: s.mapNZBStatus(nzb.Status),
Mb: nzb.TotalSize,
Filename: nzb.Name,
Cat: nzb.Category,
MBLeft: 0,
Percentage: nzb.Percentage,
NzoId: nzb.ID,
Size: nzb.TotalSize,
TimeLeft: timeLeft, // This is in "00:00:00" format
}
queue.Slots = append(queue.Slots, slot)
}
response := QueueResponse{
Queue: queue,
Status: true,
Version: Version,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleHistory returns the download history
func (s *SABnzbd) handleHistory(w http.ResponseWriter, r *http.Request) {
limitStr := r.FormValue("limit")
if limitStr == "" {
limitStr = "0"
}
limit, err := strconv.Atoi(limitStr)
if err != nil {
s.logger.Error().Err(err).Msg("Invalid limit parameter for history")
s.writeError(w, "Invalid limit parameter", http.StatusBadRequest)
return
}
if limit < 0 {
limit = 0
}
history := s.getHistory(r.Context(), limit)
response := HistoryResponse{
History: history,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleConfig returns the configuration
func (s *SABnzbd) handleConfig(w http.ResponseWriter, r *http.Request) {
response := ConfigResponse{
Config: s.config,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleAddURL handles adding NZB by URL
func (s *SABnzbd) handleAddURL(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
_arr := getArrFromContext(ctx)
cat := getCategory(ctx)
if _arr == nil {
// If Arr is not in context, create a new one with default values
_arr = arr.New(cat, "", "", false, false, nil, "", "")
}
if r.Method != http.MethodPost {
s.logger.Warn().Str("method", r.Method).Msg("Invalid method")
s.writeError(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
url := r.FormValue("name")
action := r.FormValue("action")
downloadDir := r.FormValue("download_dir")
if action == "" {
action = "symlink"
}
if downloadDir == "" {
downloadDir = s.config.Misc.DownloadDir
}
if url == "" {
s.writeError(w, "URL is required", http.StatusBadRequest)
return
}
nzoID, err := s.addNZBURL(ctx, url, _arr, action, downloadDir)
if err != nil {
s.writeError(w, err.Error(), http.StatusInternalServerError)
return
}
if nzoID == "" {
s.writeError(w, "Failed to add NZB", http.StatusInternalServerError)
return
}
response := AddNZBResponse{
Status: true,
NzoIds: []string{nzoID},
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleAddFile handles NZB file uploads
func (s *SABnzbd) handleAddFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
_arr := getArrFromContext(ctx)
cat := getCategory(ctx)
if _arr == nil {
// If Arr is not in context, create a new one with default values
_arr = arr.New(cat, "", "", false, false, nil, "", "")
}
if r.Method != http.MethodPost {
s.writeError(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Parse multipart form
err := r.ParseMultipartForm(32 << 20) // 32 MB limit
if err != nil {
s.writeError(w, "Failed to parse multipart form", http.StatusBadRequest)
return
}
file, header, err := r.FormFile("name")
if err != nil {
s.writeError(w, "No file uploaded", http.StatusBadRequest)
return
}
defer file.Close()
// Read file content
content, err := io.ReadAll(file)
if err != nil {
s.writeError(w, "Failed to read file", http.StatusInternalServerError)
return
}
action := r.FormValue("action")
downloadDir := r.FormValue("download_dir")
if action == "" {
action = "symlink"
}
if downloadDir == "" {
downloadDir = s.config.Misc.DownloadDir
}
// Process NZB file
nzbID, err := s.addNZBFile(ctx, content, header.Filename, _arr, action, downloadDir)
if err != nil {
s.writeError(w, fmt.Sprintf("Failed to add NZB file: %s", err.Error()), http.StatusInternalServerError)
return
}
if nzbID == "" {
s.writeError(w, "Failed to add NZB file", http.StatusInternalServerError)
return
}
response := AddNZBResponse{
Status: true,
NzoIds: []string{nzbID},
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleVersion returns version information
func (s *SABnzbd) handleVersion(w http.ResponseWriter, r *http.Request) {
response := VersionResponse{
Version: Version,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleGetCategories returns available categories
func (s *SABnzbd) handleGetCategories(w http.ResponseWriter, r *http.Request) {
categories := s.getCategories()
request.JSONResponse(w, categories, http.StatusOK)
}
// handleGetScripts returns available scripts
func (s *SABnzbd) handleGetScripts(w http.ResponseWriter, r *http.Request) {
scripts := []string{"None"}
request.JSONResponse(w, scripts, http.StatusOK)
}
// handleGetFiles returns files for a specific NZB
func (s *SABnzbd) handleGetFiles(w http.ResponseWriter, r *http.Request) {
nzoID := r.FormValue("value")
var files []string
if nzoID != "" {
nzb := s.usenet.Store().Get(nzoID)
if nzb != nil {
for _, file := range nzb.Files {
files = append(files, file.Name)
}
}
}
request.JSONResponse(w, files, http.StatusOK)
}
func (s *SABnzbd) handleStatus(w http.ResponseWriter, r *http.Request) {
type status struct {
CompletedDir string `json:"completed_dir"`
}
response := struct {
Status status `json:"status"`
}{
Status: status{
CompletedDir: s.config.Misc.DownloadDir,
},
}
request.JSONResponse(w, response, http.StatusOK)
}
// Helper methods
func (s *SABnzbd) getHistory(ctx context.Context, limit int) History {
cat := getCategory(ctx)
items := s.usenet.Store().GetHistory(cat, limit)
slots := make([]HistorySlot, 0, len(items))
history := History{
Version: Version,
Paused: false,
}
for _, item := range items {
slot := HistorySlot{
Status: s.mapNZBStatus(item.Status),
Name: item.Name,
NZBName: item.Name,
NzoId: item.ID,
Category: item.Category,
FailMessage: item.FailMessage,
Bytes: item.TotalSize,
Storage: item.Storage,
}
slots = append(slots, slot)
}
history.Slots = slots
return history
}
func (s *SABnzbd) writeError(w http.ResponseWriter, message string, status int) {
response := StatusResponse{
Status: false,
Error: message,
}
request.JSONResponse(w, response, status)
}
func (s *SABnzbd) mapNZBStatus(status string) string {
switch status {
case "downloading":
return StatusDownloading
case "completed":
return StatusCompleted
case "paused":
return StatusPaused
case "error", "failed":
return StatusFailed
case "processing":
return StatusProcessing
case "verifying":
return StatusVerifying
case "repairing":
return StatusRepairing
case "extracting":
return StatusExtracting
case "moving":
return StatusMoving
case "running":
return StatusRunning
default:
return StatusQueued
}
}
func (s *SABnzbd) addNZBURL(ctx context.Context, url string, arr *arr.Arr, action, downloadDir string) (string, error) {
if url == "" {
return "", fmt.Errorf("URL is required")
}
// Download NZB content
filename, content, err := utils.DownloadFile(url)
if err != nil {
s.logger.Error().Err(err).Str("url", url).Msg("Failed to download NZB from URL")
return "", fmt.Errorf("failed to download NZB from URL: %w", err)
}
if len(content) == 0 {
s.logger.Warn().Str("url", url).Msg("Downloaded content is empty")
return "", fmt.Errorf("downloaded content is empty")
}
return s.addNZBFile(ctx, content, filename, arr, action, downloadDir)
}
func (s *SABnzbd) addNZBFile(ctx context.Context, content []byte, filename string, arr *arr.Arr, action, downloadDir string) (string, error) {
if s.usenet == nil {
return "", fmt.Errorf("store not initialized")
}
req := &usenet.ProcessRequest{
NZBContent: content,
Name: filename,
Arr: arr,
Action: action,
DownloadDir: downloadDir,
}
nzb, err := s.usenet.ProcessNZB(ctx, req)
if err != nil {
return "", fmt.Errorf("failed to process NZB: %w", err)
}
return nzb.ID, nil
}

24
pkg/sabnzbd/routes.go Normal file
View File

@@ -0,0 +1,24 @@
package sabnzbd
import (
"net/http"
"github.com/go-chi/chi/v5"
)
func (s *SABnzbd) Routes() http.Handler {
r := chi.NewRouter()
r.Use(s.categoryContext)
r.Use(s.authContext)
// SABnzbd API endpoints - all under /api with mode parameter
r.Route("/api", func(r chi.Router) {
r.Use(s.modeContext)
// Queue operations
r.Get("/", s.handleAPI)
r.Post("/", s.handleAPI)
})
return r
}

116
pkg/sabnzbd/sabnzbd.go Normal file
View File

@@ -0,0 +1,116 @@
package sabnzbd
import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"path/filepath"
)
type SABnzbd struct {
downloadFolder string
config *Config
refreshInterval int
logger zerolog.Logger
usenet usenet.Usenet
defaultCategories []string
}
func New(usenetClient usenet.Usenet) *SABnzbd {
_cfg := config.Get()
cfg := _cfg.SABnzbd
var defaultCategories []string
for _, cat := range _cfg.SABnzbd.Categories {
if cat != "" {
defaultCategories = append(defaultCategories, cat)
}
}
sb := &SABnzbd{
downloadFolder: cfg.DownloadFolder,
refreshInterval: cfg.RefreshInterval,
logger: logger.New("sabnzbd"),
usenet: usenetClient,
defaultCategories: defaultCategories,
}
sb.SetConfig(_cfg)
return sb
}
func (s *SABnzbd) SetConfig(cfg *config.Config) {
sabnzbdConfig := &Config{
Misc: MiscConfig{
CompleteDir: s.downloadFolder,
DownloadDir: s.downloadFolder,
AdminDir: s.downloadFolder,
WebPort: cfg.Port,
Language: "en",
RefreshRate: "1",
QueueComplete: "0",
ConfigLock: "0",
Autobrowser: "1",
CheckNewRel: "1",
},
Categories: s.getCategories(),
}
if cfg.Usenet != nil || len(cfg.Usenet.Providers) == 0 {
for _, provider := range cfg.Usenet.Providers {
if provider.Host == "" || provider.Port == 0 {
continue
}
sabnzbdConfig.Servers = append(sabnzbdConfig.Servers, Server{
Name: provider.Name,
Host: provider.Host,
Port: provider.Port,
Username: provider.Username,
Password: provider.Password,
Connections: provider.Connections,
SSL: provider.SSL,
})
}
}
s.config = sabnzbdConfig
}
func (s *SABnzbd) getCategories() []Category {
_store := store.Get()
arrs := _store.Arr().GetAll()
categories := make([]Category, 0, len(arrs))
added := map[string]struct{}{}
for i, a := range arrs {
if _, ok := added[a.Name]; ok {
continue // Skip if category already added
}
categories = append(categories, Category{
Name: a.Name,
Order: i + 1,
Pp: "3",
Script: "None",
Dir: filepath.Join(s.downloadFolder, a.Name),
Priority: PriorityNormal,
})
}
// Add default categories if not already present
for _, defaultCat := range s.defaultCategories {
if _, ok := added[defaultCat]; ok {
continue // Skip if default category already added
}
categories = append(categories, Category{
Name: defaultCat,
Order: len(categories) + 1,
Pp: "3",
Script: "None",
Dir: filepath.Join(s.downloadFolder, defaultCat),
Priority: PriorityNormal,
})
added[defaultCat] = struct{}{}
}
return categories
}
func (s *SABnzbd) Reset() {
}

150
pkg/sabnzbd/types.go Normal file
View File

@@ -0,0 +1,150 @@
package sabnzbd
// SABnzbd API response types based on official documentation
var (
Version = "4.5.0"
)
// QueueResponse represents the queue status response
type QueueResponse struct {
Queue Queue `json:"queue"`
Status bool `json:"status"`
Version string `json:"version"`
}
// Queue represents the download queue
type Queue struct {
Version string `json:"version"`
Slots []QueueSlot `json:"slots"`
}
// QueueSlot represents a download in the queue
type QueueSlot struct {
Status string `json:"status"`
TimeLeft string `json:"timeleft"`
Mb int64 `json:"mb"`
Filename string `json:"filename"`
Priority string `json:"priority"`
Cat string `json:"cat"`
MBLeft int64 `json:"mbleft"`
Percentage float64 `json:"percentage"`
NzoId string `json:"nzo_id"`
Size int64 `json:"size"`
}
// HistoryResponse represents the history response
type HistoryResponse struct {
History History `json:"history"`
}
// History represents the download history
type History struct {
Version string `json:"version"`
Paused bool `json:"paused"`
Slots []HistorySlot `json:"slots"`
}
// HistorySlot represents a completed download
type HistorySlot struct {
Status string `json:"status"`
Name string `json:"name"`
NZBName string `json:"nzb_name"`
NzoId string `json:"nzo_id"`
Category string `json:"category"`
FailMessage string `json:"fail_message"`
Bytes int64 `json:"bytes"`
Storage string `json:"storage"`
}
// StageLog represents processing stages
type StageLog struct {
Name string `json:"name"`
Actions []string `json:"actions"`
}
// VersionResponse represents version information
type VersionResponse struct {
Version string `json:"version"`
}
// StatusResponse represents general status
type StatusResponse struct {
Status bool `json:"status"`
Error string `json:"error,omitempty"`
}
// FullStatusResponse represents the full status response with queue and history
type FullStatusResponse struct {
Queue Queue `json:"queue"`
History History `json:"history"`
Status bool `json:"status"`
Version string `json:"version"`
}
// AddNZBRequest represents the request to add an NZB
type AddNZBRequest struct {
Name string `json:"name"`
Cat string `json:"cat"`
Script string `json:"script"`
Priority string `json:"priority"`
PP string `json:"pp"`
Password string `json:"password"`
NZBData []byte `json:"nzb_data"`
URL string `json:"url"`
}
// AddNZBResponse represents the response when adding an NZB
type AddNZBResponse struct {
Status bool `json:"status"`
NzoIds []string `json:"nzo_ids"`
Error string `json:"error,omitempty"`
}
// API Mode constants
const (
ModeQueue = "queue"
ModeHistory = "history"
ModeConfig = "config"
ModeGetConfig = "get_config"
ModeAddURL = "addurl"
ModeAddFile = "addfile"
ModeVersion = "version"
ModePause = "pause"
ModeResume = "resume"
ModeDelete = "delete"
ModeShutdown = "shutdown"
ModeRestart = "restart"
ModeGetCats = "get_cats"
ModeGetScripts = "get_scripts"
ModeGetFiles = "get_files"
ModeRetry = "retry"
ModeStatus = "status"
ModeFullStatus = "fullstatus"
)
// Status constants
const (
StatusQueued = "Queued"
StatusPaused = "Paused"
StatusDownloading = "downloading"
StatusProcessing = "Processing"
StatusCompleted = "Completed"
StatusFailed = "Failed"
StatusGrabbing = "Grabbing"
StatusPropagating = "Propagating"
StatusVerifying = "Verifying"
StatusRepairing = "Repairing"
StatusExtracting = "Extracting"
StatusMoving = "Moving"
StatusRunning = "Running"
)
// Priority constants
const (
PriorityForced = "2"
PriorityHigh = "1"
PriorityNormal = "0"
PriorityLow = "-1"
PriorityStop = "-2"
)

View File

@@ -3,6 +3,7 @@ package server
import (
"fmt"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/internal/request"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/store"
@@ -118,5 +119,23 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
profiles = append(profiles, profile)
}
stats["debrids"] = profiles
if s.usenet != nil {
if client := s.usenet.Client(); client != nil {
usenetsData := make([]map[string]interface{}, 0)
client.Pools().Range(func(key string, value *nntp.Pool) bool {
if value != nil {
providerData := make(map[string]interface{})
providerData["name"] = key
providerData["active_connections"] = value.ActiveConnections()
providerData["total_connections"] = value.ConnectionCount()
usenetsData = append(usenetsData, providerData)
}
return true
})
stats["usenet"] = usenetsData
}
}
request.JSONResponse(w, stats, http.StatusOK)
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/usenet"
"io"
"net/http"
"os"
@@ -17,9 +18,10 @@ import (
type Server struct {
router *chi.Mux
logger zerolog.Logger
usenet usenet.Usenet
}
func New(handlers map[string]http.Handler) *Server {
func New(usenet usenet.Usenet, handlers map[string]http.Handler) *Server {
l := logger.New("http")
r := chi.NewRouter()
r.Use(middleware.Recoverer)
@@ -28,6 +30,7 @@ func New(handlers map[string]http.Handler) *Server {
s := &Server{
logger: l,
usenet: usenet,
}
r.Route(cfg.URLBase, func(r chi.Router) {

View File

@@ -1,7 +1,6 @@
package store
import (
"cmp"
"context"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
@@ -37,18 +36,21 @@ func Get() *Store {
arrs := arr.NewStorage()
deb := debrid.NewStorage()
cfg := config.Get()
qbitCfg := cfg.QBitTorrent
instance = &Store{
repair: repair.New(arrs, deb),
arr: arrs,
debrid: deb,
torrents: newTorrentStorage(cfg.TorrentsFile()),
logger: logger.Default(), // Use default logger [decypharr]
refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute,
skipPreCache: qbitCfg.SkipPreCache,
downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)),
importsQueue: NewImportQueue(context.Background(), 1000),
refreshInterval: 10 * time.Minute, // Default refresh interval
skipPreCache: false, // Default skip pre-cache
downloadSemaphore: make(chan struct{}, 5), // Default max concurrent downloads
}
if cfg.QBitTorrent != nil {
instance.refreshInterval = time.Duration(cfg.QBitTorrent.RefreshInterval) * time.Minute
instance.skipPreCache = cfg.QBitTorrent.SkipPreCache
instance.downloadSemaphore = make(chan struct{}, cfg.QBitTorrent.MaxDownloads)
}
if cfg.RemoveStalledAfter != "" {
removeStalledAfter, err := time.ParseDuration(cfg.RemoveStalledAfter)

141
pkg/usenet/cache.go Normal file
View File

@@ -0,0 +1,141 @@
package usenet
import (
"github.com/chrisfarms/yenc"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"sync/atomic"
"time"
)
// SegmentCache provides intelligent caching for NNTP segments
type SegmentCache struct {
cache *xsync.Map[string, *CachedSegment]
logger zerolog.Logger
maxSize int64
currentSize atomic.Int64
}
// CachedSegment represents a cached segment with metadata
type CachedSegment struct {
MessageID string `json:"message_id"`
Data []byte `json:"data"`
DecodedSize int64 `json:"decoded_size"` // Actual size after yEnc decoding
DeclaredSize int64 `json:"declared_size"` // Size declared in NZB
CachedAt time.Time `json:"cached_at"`
AccessCount int64 `json:"access_count"`
LastAccess time.Time `json:"last_access"`
FileBegin int64 `json:"file_begin"` // Start byte offset in the file
FileEnd int64 `json:"file_end"` // End byte offset in the file
}
// NewSegmentCache creates a new segment cache
func NewSegmentCache(logger zerolog.Logger) *SegmentCache {
sc := &SegmentCache{
cache: xsync.NewMap[string, *CachedSegment](),
logger: logger.With().Str("component", "segment_cache").Logger(),
maxSize: 50 * 1024 * 1024, // Default max size 100MB
}
return sc
}
// Get retrieves a segment from cache
func (sc *SegmentCache) Get(messageID string) (*CachedSegment, bool) {
segment, found := sc.cache.Load(messageID)
if !found {
return nil, false
}
segment.AccessCount++
segment.LastAccess = time.Now()
return segment, true
}
// Put stores a segment in cache with intelligent size management
func (sc *SegmentCache) Put(messageID string, data *yenc.Part, declaredSize int64) {
dataSize := data.Size
currentSize := sc.currentSize.Load()
// Check if we need to make room
wouldExceed := (currentSize + dataSize) > sc.maxSize
if wouldExceed {
sc.evictLRU(dataSize)
}
segment := &CachedSegment{
MessageID: messageID,
Data: make([]byte, data.Size),
DecodedSize: dataSize,
DeclaredSize: declaredSize,
CachedAt: time.Now(),
AccessCount: 1,
LastAccess: time.Now(),
}
copy(segment.Data, data.Body)
sc.cache.Store(messageID, segment)
sc.currentSize.Add(dataSize)
}
// evictLRU evicts least recently used segments to make room
func (sc *SegmentCache) evictLRU(neededSpace int64) {
if neededSpace <= 0 {
return // No need to evict if no space is needed
}
if sc.cache.Size() == 0 {
return // Nothing to evict
}
// Create a sorted list of segments by last access time
type segmentInfo struct {
key string
segment *CachedSegment
lastAccess time.Time
}
segments := make([]segmentInfo, 0, sc.cache.Size())
sc.cache.Range(func(key string, value *CachedSegment) bool {
segments = append(segments, segmentInfo{
key: key,
segment: value,
lastAccess: value.LastAccess,
})
return true // continue iteration
})
// Sort by last access time (oldest first)
for i := 0; i < len(segments)-1; i++ {
for j := i + 1; j < len(segments); j++ {
if segments[i].lastAccess.After(segments[j].lastAccess) {
segments[i], segments[j] = segments[j], segments[i]
}
}
}
// Evict segments until we have enough space
freedSpace := int64(0)
for _, seg := range segments {
if freedSpace >= neededSpace {
break
}
sc.cache.Delete(seg.key)
freedSpace += int64(len(seg.segment.Data))
}
}
// Clear removes all cached segments
func (sc *SegmentCache) Clear() {
sc.cache.Clear()
sc.currentSize.Store(0)
}
// Delete removes a specific segment from cache
func (sc *SegmentCache) Delete(messageID string) {
sc.cache.Delete(messageID)
}

281
pkg/usenet/downloader.go Normal file
View File

@@ -0,0 +1,281 @@
package usenet
import (
"context"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/internal/utils"
"golang.org/x/sync/errgroup"
"os"
"path/filepath"
"time"
)
// DownloadWorker manages concurrent NZB downloads
type DownloadWorker struct {
client *nntp.Client
processor *Processor
logger zerolog.Logger
skipPreCache bool // Skip pre-caching for faster processing
mountFolder string // Folder where downloads are mounted
}
// DownloadJob represents a download job for an NZB
type DownloadJob struct {
NZB *NZB
Action string
Priority int
Callback func(*NZB, error)
DownloadDir string
}
// NewDownloadWorker creates a new download worker
func NewDownloadWorker(config *config.Usenet, client *nntp.Client, processor *Processor) *DownloadWorker {
dw := &DownloadWorker{
processor: processor,
client: client,
logger: logger.New("usenet-download-worker"),
skipPreCache: config.SkipPreCache,
mountFolder: config.MountFolder,
}
return dw
}
func (dw *DownloadWorker) CheckAvailability(ctx context.Context, job *DownloadJob) error {
dw.logger.Debug().
Str("nzb_id", job.NZB.ID).
Msg("Checking NZB availability")
// Grab first file to extract message IDs
firstFile := job.NZB.Files[0]
if len(firstFile.Segments) == 0 {
return fmt.Errorf("no segments found in first file of NZB")
}
segments := firstFile.Segments
// Smart sampling: check first, last, and some middle segments
samplesToCheck := dw.getSampleSegments(segments)
// Create error group for concurrent checking
g, gCtx := errgroup.WithContext(ctx)
// Limit concurrent goroutines to prevent overwhelming the NNTP server
maxConcurrency := len(samplesToCheck)
if maxConns := dw.client.MinimumMaxConns(); maxConns < maxConcurrency {
maxConcurrency = maxConns
}
g.SetLimit(maxConcurrency)
// Check each segment concurrently
for i, segment := range samplesToCheck {
segment := segment // capture loop variable
segmentNum := i + 1
g.Go(func() error {
select {
case <-gCtx.Done():
return gCtx.Err() // Return if context is canceled
default:
}
conn, cleanup, err := dw.client.GetConnection(gCtx)
if err != nil {
return fmt.Errorf("failed to get NNTP connection: %w", err)
}
defer cleanup() // Ensure connection is returned to the pool
// Check segment availability
seg, err := conn.GetSegment(segment.MessageID, segmentNum)
if err != nil {
return fmt.Errorf("failed to check segment %d availability: %w", segmentNum, err)
}
if seg == nil {
return fmt.Errorf("segment %d not found", segmentNum)
}
return nil
})
}
// Wait for all checks to complete
if err := g.Wait(); err != nil {
return fmt.Errorf("availability check failed: %w", err)
}
// Update storage with availability info
if err := dw.processor.store.Update(job.NZB); err != nil {
dw.logger.Warn().Err(err).Msg("Failed to update NZB with availability info")
}
return nil
}
func (dw *DownloadWorker) Process(ctx context.Context, job *DownloadJob) error {
var (
finalPath string
err error
)
defer func(err error) {
if job.Callback != nil {
job.Callback(job.NZB, err)
}
}(err)
switch job.Action {
case "download":
finalPath, err = dw.downloadNZB(ctx, job)
case "symlink":
finalPath, err = dw.symlinkNZB(ctx, job)
case "none":
return nil
default:
// Use symlink as default action
finalPath, err = dw.symlinkNZB(ctx, job)
}
if err != nil {
return err
}
if finalPath == "" {
err = fmt.Errorf("final path is empty after processing job: %s", job.Action)
return err
}
// Use atomic transition to completed state
return dw.processor.store.MarkAsCompleted(job.NZB.ID, finalPath)
}
// downloadNZB downloads an NZB to the specified directory
func (dw *DownloadWorker) downloadNZB(ctx context.Context, job *DownloadJob) (string, error) {
dw.logger.Info().
Str("nzb_id", job.NZB.ID).
Str("download_dir", job.DownloadDir).
Msg("Starting NZB download")
// TODO: implement download logic
return job.DownloadDir, nil
}
// getSampleMessageIDs returns a smart sample of message IDs to check
func (dw *DownloadWorker) getSampleSegments(segments []NZBSegment) []NZBSegment {
totalSegments := len(segments)
// For small NZBs, check all segments
if totalSegments <= 2 {
return segments
}
var samplesToCheck []NZBSegment
// Always check the first and last segments
samplesToCheck = append(samplesToCheck, segments[0]) // First segment
samplesToCheck = append(samplesToCheck, segments[totalSegments-1]) // Last segment
return samplesToCheck
}
func (dw *DownloadWorker) symlinkNZB(ctx context.Context, job *DownloadJob) (string, error) {
dw.logger.Info().
Str("nzb_id", job.NZB.ID).
Str("symlink_dir", job.DownloadDir).
Msg("Creating symlinks for NZB")
if job.NZB == nil {
return "", fmt.Errorf("NZB is nil")
}
mountFolder := filepath.Join(dw.mountFolder, job.NZB.Name) // e.g. /mnt/rclone/usenet/__all__/TV_SHOW
if mountFolder == "" {
return "", fmt.Errorf("mount folder is empty")
}
symlinkPath := filepath.Join(job.DownloadDir, job.NZB.Name) // e.g. /mnt/symlinks/usenet/sonarr/TV_SHOW
if err := os.MkdirAll(symlinkPath, 0755); err != nil {
return "", fmt.Errorf("failed to create symlink directory: %w", err)
}
return dw.createSymlinksWebdav(job.NZB, mountFolder, symlinkPath)
}
func (dw *DownloadWorker) createSymlinksWebdav(nzb *NZB, mountPath, symlinkPath string) (string, error) {
files := nzb.GetFiles()
remainingFiles := make(map[string]NZBFile)
for _, file := range files {
remainingFiles[file.Name] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(files))
maxLogCount := 10 // Limit the number of log messages to avoid flooding
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
entries, err := os.ReadDir(mountPath)
if err != nil {
if maxLogCount > 0 && !errors.Is(err, os.ErrNotExist) {
// Only log if it's not a "not found" error
// This is due to the fact the mount path may not exist YET
dw.logger.Warn().
Err(err).
Str("mount_path", mountPath).
Msg("Failed to read directory, retrying")
maxLogCount--
}
continue
}
// Check which files exist in this batch
for _, entry := range entries {
filename := entry.Name()
dw.logger.Info().
Str("filename", filename).
Msg("Checking file existence in mount path")
if file, exists := remainingFiles[filename]; exists {
fullFilePath := filepath.Join(mountPath, filename)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
dw.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
dw.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
dw.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
}
}
if dw.skipPreCache {
return symlinkPath, nil
}
go func() {
defer func() {
if r := recover(); r != nil {
dw.logger.Error().
Interface("panic", r).
Str("nzbName", nzb.Name).
Msg("Recovered from panic in pre-cache goroutine")
}
}()
if err := utils.PreCacheFile(filePaths); err != nil {
dw.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
dw.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
// Pre-cache the first 256KB and 1MB of the file
return symlinkPath, nil
}

353
pkg/usenet/errors.go Normal file
View File

@@ -0,0 +1,353 @@
package usenet
import (
"errors"
"fmt"
"net"
"strings"
"sync"
"time"
)
var (
ErrConnectionFailed = errors.New("failed to connect to NNTP server")
ErrServerUnavailable = errors.New("NNTP server unavailable")
ErrRateLimitExceeded = errors.New("rate limit exceeded")
ErrDownloadTimeout = errors.New("download timeout")
)
// ErrInvalidNZBf creates a formatted error for NZB validation failures
func ErrInvalidNZBf(format string, args ...interface{}) error {
return fmt.Errorf("invalid NZB: "+format, args...)
}
// Error represents a structured usenet error
type Error struct {
Code string
Message string
Err error
ServerAddr string
Timestamp time.Time
Retryable bool
}
func (e *Error) Error() string {
if e.ServerAddr != "" {
return fmt.Sprintf("usenet error [%s] on %s: %s", e.Code, e.ServerAddr, e.Message)
}
return fmt.Sprintf("usenet error [%s]: %s", e.Code, e.Message)
}
func (e *Error) Unwrap() error {
return e.Err
}
func (e *Error) Is(target error) bool {
if target == nil {
return false
}
return e.Err != nil && errors.Is(e.Err, target)
}
// NewUsenetError creates a new UsenetError
func NewUsenetError(code, message string, err error) *Error {
return &Error{
Code: code,
Message: message,
Err: err,
Timestamp: time.Now(),
Retryable: isRetryableError(err),
}
}
// NewServerError creates a new UsenetError with server address
func NewServerError(code, message, serverAddr string, err error) *Error {
return &Error{
Code: code,
Message: message,
Err: err,
ServerAddr: serverAddr,
Timestamp: time.Now(),
Retryable: isRetryableError(err),
}
}
// isRetryableError determines if an error is retryable
func isRetryableError(err error) bool {
if err == nil {
return false
}
// Network errors are generally retryable
var netErr net.Error
if errors.As(err, &netErr) {
return netErr.Timeout()
}
// DNS errors are retryable
var dnsErr *net.DNSError
if errors.As(err, &dnsErr) {
return dnsErr.Temporary()
}
// Connection refused is retryable
if errors.Is(err, net.ErrClosed) {
return true
}
// Check error message for retryable conditions
errMsg := strings.ToLower(err.Error())
retryableMessages := []string{
"connection refused",
"connection reset",
"connection timed out",
"network is unreachable",
"host is unreachable",
"temporary failure",
"service unavailable",
"server overloaded",
"rate limit",
"too many connections",
}
for _, msg := range retryableMessages {
if strings.Contains(errMsg, msg) {
return true
}
}
return false
}
// RetryConfig defines retry behavior
type RetryConfig struct {
MaxRetries int
InitialDelay time.Duration
MaxDelay time.Duration
BackoffFactor float64
RetryableErrors []error
}
// DefaultRetryConfig returns a default retry configuration
func DefaultRetryConfig() *RetryConfig {
return &RetryConfig{
MaxRetries: 3,
InitialDelay: 1 * time.Second,
MaxDelay: 30 * time.Second,
BackoffFactor: 2.0,
RetryableErrors: []error{
ErrConnectionFailed,
ErrServerUnavailable,
ErrRateLimitExceeded,
ErrDownloadTimeout,
},
}
}
// ShouldRetry determines if an error should be retried
func (rc *RetryConfig) ShouldRetry(err error, attempt int) bool {
if attempt >= rc.MaxRetries {
return false
}
// Check if it's a retryable UsenetError
var usenetErr *Error
if errors.As(err, &usenetErr) {
return usenetErr.Retryable
}
// Check if it's in the list of retryable errors
for _, retryableErr := range rc.RetryableErrors {
if errors.Is(err, retryableErr) {
return true
}
}
return isRetryableError(err)
}
// GetDelay calculates the delay for the next retry
func (rc *RetryConfig) GetDelay(attempt int) time.Duration {
if attempt <= 0 {
return rc.InitialDelay
}
delay := time.Duration(float64(rc.InitialDelay) * float64(attempt) * rc.BackoffFactor)
if delay > rc.MaxDelay {
delay = rc.MaxDelay
}
return delay
}
// RetryWithBackoff retries a function with exponential backoff
func RetryWithBackoff(config *RetryConfig, operation func() error) error {
var lastErr error
for attempt := 0; attempt <= config.MaxRetries; attempt++ {
if attempt > 0 {
delay := config.GetDelay(attempt)
time.Sleep(delay)
}
err := operation()
if err == nil {
return nil
}
lastErr = err
if !config.ShouldRetry(err, attempt) {
break
}
}
return lastErr
}
// CircuitBreakerConfig defines circuit breaker behavior
type CircuitBreakerConfig struct {
MaxFailures int
ResetTimeout time.Duration
CheckInterval time.Duration
FailureCallback func(error)
}
// CircuitBreaker implements a circuit breaker pattern for NNTP connections
type CircuitBreaker struct {
config *CircuitBreakerConfig
failures int
lastFailure time.Time
state string // "closed", "open", "half-open"
mu *sync.RWMutex
}
// NewCircuitBreaker creates a new circuit breaker
func NewCircuitBreaker(config *CircuitBreakerConfig) *CircuitBreaker {
if config == nil {
config = &CircuitBreakerConfig{
MaxFailures: 5,
ResetTimeout: 60 * time.Second,
CheckInterval: 10 * time.Second,
}
}
return &CircuitBreaker{
config: config,
state: "closed",
mu: &sync.RWMutex{},
}
}
// Execute executes an operation through the circuit breaker
func (cb *CircuitBreaker) Execute(operation func() error) error {
cb.mu.RLock()
state := cb.state
failures := cb.failures
lastFailure := cb.lastFailure
cb.mu.RUnlock()
// Check if we should attempt reset
if state == "open" && time.Since(lastFailure) > cb.config.ResetTimeout {
cb.mu.Lock()
cb.state = "half-open"
cb.mu.Unlock()
state = "half-open"
}
if state == "open" {
return NewUsenetError("circuit_breaker_open",
fmt.Sprintf("circuit breaker is open (failures: %d)", failures),
ErrServerUnavailable)
}
err := operation()
cb.mu.Lock()
defer cb.mu.Unlock()
if err != nil {
cb.failures++
cb.lastFailure = time.Now()
if cb.failures >= cb.config.MaxFailures {
cb.state = "open"
}
if cb.config.FailureCallback != nil {
go func() {
cb.config.FailureCallback(err)
}()
}
return err
}
// Success - reset if we were in half-open state
if cb.state == "half-open" {
cb.state = "closed"
cb.failures = 0
}
return nil
}
// GetState returns the current circuit breaker state
func (cb *CircuitBreaker) GetState() string {
cb.mu.RLock()
defer cb.mu.RUnlock()
return cb.state
}
// Reset manually resets the circuit breaker
func (cb *CircuitBreaker) Reset() {
cb.mu.Lock()
defer cb.mu.Unlock()
cb.state = "closed"
cb.failures = 0
}
// ValidationError represents validation errors
type ValidationError struct {
Field string
Value interface{}
Message string
}
func (e *ValidationError) Error() string {
return fmt.Sprintf("validation error for field '%s': %s", e.Field, e.Message)
}
// ValidateNZBContent validates NZB content
func ValidateNZBContent(content []byte) error {
if len(content) == 0 {
return &ValidationError{
Field: "content",
Value: len(content),
Message: "NZB content cannot be empty",
}
}
if len(content) > 100*1024*1024 { // 100MB limit
return &ValidationError{
Field: "content",
Value: len(content),
Message: "NZB content exceeds maximum size limit (100MB)",
}
}
contentStr := string(content)
if !strings.Contains(contentStr, "<nzb") {
maxLen := 100
if len(contentStr) < maxLen {
maxLen = len(contentStr)
}
return &ValidationError{
Field: "content",
Value: contentStr[:maxLen],
Message: "content does not appear to be valid NZB format",
}
}
return nil
}

83
pkg/usenet/misc.go Normal file
View File

@@ -0,0 +1,83 @@
package usenet
import (
"io"
"strings"
)
func (s *Streamer) isSkippableError(err error) bool {
if err == nil {
return false
}
// EOF is usually expected/skippable
if err == io.EOF {
return true
}
errMsg := strings.ToLower(err.Error())
// Client disconnection errors
if strings.Contains(errMsg, "client disconnected") ||
strings.Contains(errMsg, "broken pipe") ||
strings.Contains(errMsg, "connection reset") ||
strings.Contains(errMsg, "write failed") ||
strings.Contains(errMsg, "writer is nil") ||
strings.Contains(errMsg, "closed pipe") ||
strings.Contains(errMsg, "context canceled") ||
strings.Contains(errMsg, "operation timed out") ||
strings.Contains(errMsg, "eof") {
return true
}
return false
}
func RecalculateSegmentBoundaries(
segments []NZBSegment,
actualSizes map[string]int64,
) []NZBSegment {
if len(segments) == 0 {
return segments
}
result := make([]NZBSegment, len(segments))
var currentOffset int64
for i, seg := range segments {
// Copy original segment metadata
result[i] = seg
result[i].StartOffset = currentOffset
// Determine which size to use: actual decoded size, or fall back
var size int64
if actual, ok := actualSizes[seg.MessageID]; ok {
size = actual
} else {
// decoded size as computed by parser (EndOffset-StartOffset)
size = seg.EndOffset - seg.StartOffset
}
result[i].EndOffset = currentOffset + size
currentOffset += size
}
return result
}
// GetSegmentActualSizes extracts actual decoded sizes from cache
func GetSegmentActualSizes(segments []NZBSegment, cache *SegmentCache) map[string]int64 {
actualSizes := make(map[string]int64)
if cache == nil {
return actualSizes
}
for _, segment := range segments {
if cached, found := cache.Get(segment.MessageID); found {
actualSizes[segment.MessageID] = int64(len(cached.Data))
}
}
return actualSizes
}

152
pkg/usenet/nzb.go Normal file
View File

@@ -0,0 +1,152 @@
package usenet
import (
"fmt"
"strings"
)
type SegmentRange struct {
Segment NZBSegment // Reference to the segment
ByteStart int64 // Start offset within this segment
ByteEnd int64 // End offset within this segment
TotalStart int64 // Absolute start position in file
TotalEnd int64 // Absolute end position in file
}
func (nzb *NZB) GetFileByName(name string) *NZBFile {
for i := range nzb.Files {
f := nzb.Files[i]
if f.IsDeleted {
continue
}
if nzb.Files[i].Name == name {
return &nzb.Files[i]
}
}
return nil
}
func (nzb *NZB) MarkFileAsRemoved(fileName string) error {
for i, file := range nzb.Files {
if file.Name == fileName {
// Mark the file as deleted
nzb.Files[i].IsDeleted = true
return nil
}
}
return fmt.Errorf("file %s not found in NZB %s", fileName, nzb.ID)
}
func (nf *NZBFile) GetSegmentsInRange(segmentSize int64, start, end int64) []SegmentRange {
if end == -1 {
end = nf.Size - 1
}
var segmentRanges []SegmentRange
var cumulativeSize int64
for i, segment := range nf.Segments {
// Use the file's segment size (uniform)
if segmentSize <= 0 {
segmentSize = segment.Bytes // Fallback to actual segment size if not set
}
// Handle last segment which might be smaller
if i == len(nf.Segments)-1 {
segmentSize = segment.Bytes // Last segment uses actual size
}
cumulativeSize += segmentSize
// Skip segments that end before our start position
if cumulativeSize <= start {
continue
}
// Calculate this segment's boundaries
segmentStart := cumulativeSize - segmentSize
segmentEnd := cumulativeSize - 1
// Calculate intersection with requested range
rangeStart := max(start, segmentStart)
rangeEnd := min(end, segmentEnd)
segmentRange := SegmentRange{
Segment: segment,
ByteStart: rangeStart - segmentStart, // Offset within segment
ByteEnd: rangeEnd - segmentStart, // End offset within segment
TotalStart: rangeStart, // Absolute position
TotalEnd: rangeEnd, // Absolute position
}
segmentRanges = append(segmentRanges, segmentRange)
// Stop if we've covered the entire requested range
if cumulativeSize >= end+1 {
break
}
}
return segmentRanges
}
func (nf *NZBFile) ConvertToSegmentRanges(segments []NZBSegment) []SegmentRange {
var segmentRanges []SegmentRange
var cumulativeSize int64
for i, segment := range segments {
// Use the file's segment size (uniform)
segmentSize := nf.SegmentSize
// Handle last segment which might be smaller
if i == len(segments)-1 {
segmentSize = segment.Bytes // Last segment uses actual size
}
cumulativeSize += segmentSize
segmentRange := SegmentRange{
Segment: segment,
ByteStart: 0, // Always starts at 0 within the segment
ByteEnd: segmentSize - 1, // Ends at segment size - 1
TotalStart: cumulativeSize - segmentSize, // Absolute start position
TotalEnd: cumulativeSize - 1, // Absolute end position
}
segmentRanges = append(segmentRanges, segmentRange)
}
return segmentRanges
}
func (nf *NZBFile) GetCacheKey() string {
return fmt.Sprintf("rar_%s_%d", nf.Name, nf.Size)
}
func (nzb *NZB) GetFiles() []NZBFile {
files := make([]NZBFile, 0, len(nzb.Files))
for _, file := range nzb.Files {
if !file.IsDeleted {
files = append(files, file)
}
}
return files[:len(files):len(files)] // Return a slice to avoid aliasing
}
// ValidateNZB performs basic validation on NZB content
func ValidateNZB(content []byte) error {
if len(content) == 0 {
return fmt.Errorf("empty NZB content")
}
// Check for basic XML structure
if !strings.Contains(string(content), "<nzb") {
return fmt.Errorf("invalid NZB format: missing <nzb> tag")
}
if !strings.Contains(string(content), "<file") {
return fmt.Errorf("invalid NZB format: no files found")
}
return nil
}

863
pkg/usenet/parser.go Normal file
View File

@@ -0,0 +1,863 @@
package usenet
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"github.com/Tensai75/nzbparser"
"github.com/chrisfarms/yenc"
"github.com/nwaples/rardecode/v2"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sourcegraph/conc/pool"
"io"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"sync"
)
// NZBParser provides a simplified, robust NZB parser
type NZBParser struct {
logger zerolog.Logger
client *nntp.Client
cache *SegmentCache
}
type FileGroup struct {
BaseName string
ActualFilename string
Type FileType
Files []nzbparser.NzbFile
Groups map[string]struct{}
}
type FileInfo struct {
Size int64
ChunkSize int64
Name string
}
// NewNZBParser creates a new simplified NZB parser
func NewNZBParser(client *nntp.Client, cache *SegmentCache, logger zerolog.Logger) *NZBParser {
return &NZBParser{
logger: logger.With().Str("component", "nzb_parser").Logger(),
client: client,
cache: cache,
}
}
type FileType int
const (
FileTypeMedia FileType = iota // Direct media files (.mkv, .mp4, etc.) // Check internal/utils.IsMediaFile
FileTypeRar // RAR archives (.rar, .r00, .r01, etc.)
FileTypeArchive // Other archives (.7z, .zip, etc.)
FileTypeIgnore // Files to ignore (.nfo, .txt, par2 etc.)
FileTypeUnknown
)
var (
// RAR file patterns - simplified and more accurate
rarMainPattern = regexp.MustCompile(`\.rar$`)
rarPartPattern = regexp.MustCompile(`\.r\d{2}$`) // .r00, .r01, etc.
rarVolumePattern = regexp.MustCompile(`\.part\d+\.rar$`)
ignoreExtensions = []string{".par2", ".sfv", ".nfo", ".jpg", ".png", ".txt", ".srt", ".idx", ".sub"}
sevenZMainPattern = regexp.MustCompile(`\.7z$`)
sevenZPartPattern = regexp.MustCompile(`\.7z\.\d{3}$`)
extWithNumberPattern = regexp.MustCompile(`\.[^ "\.]*\.\d+$`)
volPar2Pattern = regexp.MustCompile(`(?i)\.vol\d+\+\d+\.par2?$`)
partPattern = regexp.MustCompile(`(?i)\.part\d+\.[^ "\.]*$`)
regularExtPattern = regexp.MustCompile(`\.[^ "\.]*$`)
)
type PositionTracker struct {
reader io.Reader
position int64
}
func (pt *PositionTracker) Read(p []byte) (n int, err error) {
n, err = pt.reader.Read(p)
pt.position += int64(n)
return n, err
}
func (pt *PositionTracker) Position() int64 {
return pt.position
}
func (p *NZBParser) Parse(ctx context.Context, filename string, category string, content []byte) (*NZB, error) {
// Parse raw XML
raw, err := nzbparser.Parse(bytes.NewReader(content))
if err != nil {
return nil, fmt.Errorf("failed to parse NZB content: %w", err)
}
// Create base NZB structure
nzb := &NZB{
Files: []NZBFile{},
Status: "parsed",
Category: category,
Name: determineNZBName(filename, raw.Meta),
Title: raw.Meta["title"],
Password: raw.Meta["password"],
}
// Group files by base name and type
fileGroups := p.groupFiles(ctx, raw.Files)
// Process each group
files := p.processFileGroups(ctx, fileGroups, nzb.Password)
nzb.ID = generateID(nzb)
if len(files) == 0 {
return nil, fmt.Errorf("no valid files found in NZB")
}
// Calculate total size
for _, file := range files {
nzb.TotalSize += file.Size
file.NzbID = nzb.ID
nzb.Files = append(nzb.Files, file)
}
return nzb, nil
}
func (p *NZBParser) groupFiles(ctx context.Context, files nzbparser.NzbFiles) map[string]*FileGroup {
var unknownFiles []nzbparser.NzbFile
var knownFiles []struct {
file nzbparser.NzbFile
fileType FileType
}
for _, file := range files {
if len(file.Segments) == 0 {
continue
}
fileType := p.detectFileType(file.Filename)
if fileType == FileTypeUnknown {
unknownFiles = append(unknownFiles, file)
} else {
knownFiles = append(knownFiles, struct {
file nzbparser.NzbFile
fileType FileType
}{file, fileType})
}
}
p.logger.Info().
Int("known_files", len(knownFiles)).
Int("unknown_files", len(unknownFiles)).
Msg("File type detection")
unknownResults := p.batchDetectContentTypes(ctx, unknownFiles)
allFiles := make([]struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}, 0, len(knownFiles)+len(unknownResults))
// Add known files
for _, known := range knownFiles {
allFiles = append(allFiles, struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}{known.file, known.fileType, known.file.Filename})
}
// Add unknown results
allFiles = append(allFiles, unknownResults...)
return p.groupProcessedFiles(allFiles)
}
// Batch process unknown files in parallel
func (p *NZBParser) batchDetectContentTypes(ctx context.Context, unknownFiles []nzbparser.NzbFile) []struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
} {
if len(unknownFiles) == 0 {
return nil
}
// Use worker pool for parallel processing
workers := min(len(unknownFiles), 10) // Max 10 concurrent downloads
workerPool := pool.New().WithMaxGoroutines(workers).WithContext(ctx)
type result struct {
index int
file nzbparser.NzbFile
fileType FileType
actualFilename string
}
results := make([]result, len(unknownFiles))
var mu sync.Mutex
// Process each unknown file
for i, file := range unknownFiles {
i, file := i, file // Capture loop variables
workerPool.Go(func(ctx context.Context) error {
detectedType, actualFilename := p.detectFileTypeByContent(ctx, file)
mu.Lock()
results[i] = result{
index: i,
file: file,
fileType: detectedType,
actualFilename: actualFilename,
}
mu.Unlock()
return nil // Don't fail the entire batch for one file
})
}
// Wait for all to complete
if err := workerPool.Wait(); err != nil {
return nil
}
// Convert results
processedFiles := make([]struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}, 0, len(results))
for _, result := range results {
if result.fileType != FileTypeUnknown {
processedFiles = append(processedFiles, struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}{result.file, result.fileType, result.actualFilename})
}
}
return processedFiles
}
// Group already processed files (fast)
func (p *NZBParser) groupProcessedFiles(allFiles []struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}) map[string]*FileGroup {
groups := make(map[string]*FileGroup)
for _, item := range allFiles {
// Skip unwanted files
if item.fileType == FileTypeIgnore || item.fileType == FileTypeArchive {
continue
}
var groupKey string
if item.actualFilename != "" && item.actualFilename != item.file.Filename {
groupKey = p.getBaseFilename(item.actualFilename)
} else {
groupKey = item.file.Basefilename
}
group, exists := groups[groupKey]
if !exists {
group = &FileGroup{
ActualFilename: item.actualFilename,
BaseName: groupKey,
Type: item.fileType,
Files: []nzbparser.NzbFile{},
Groups: make(map[string]struct{}),
}
groups[groupKey] = group
}
// Update filename
item.file.Filename = item.actualFilename
group.Files = append(group.Files, item.file)
for _, g := range item.file.Groups {
group.Groups[g] = struct{}{}
}
}
return groups
}
func (p *NZBParser) getBaseFilename(filename string) string {
if filename == "" {
return ""
}
// First remove any quotes and trim spaces
cleaned := strings.Trim(filename, `" -`)
// Check for vol\d+\+\d+\.par2? (PAR2 volume files)
if volPar2Pattern.MatchString(cleaned) {
return volPar2Pattern.ReplaceAllString(cleaned, "")
}
// Check for part\d+\.[^ "\.]* (part files like .part01.rar)
if partPattern.MatchString(cleaned) {
return partPattern.ReplaceAllString(cleaned, "")
}
// Check for [^ "\.]*\.\d+ (extensions with numbers like .7z.001, .r01, etc.)
if extWithNumberPattern.MatchString(cleaned) {
return extWithNumberPattern.ReplaceAllString(cleaned, "")
}
// Check for regular extensions [^ "\.]*
if regularExtPattern.MatchString(cleaned) {
return regularExtPattern.ReplaceAllString(cleaned, "")
}
return cleaned
}
// Simplified file type detection
func (p *NZBParser) detectFileType(filename string) FileType {
lower := strings.ToLower(filename)
// Check for media first
if p.isMediaFile(lower) {
return FileTypeMedia
}
// Check rar next
if p.isRarFile(lower) {
return FileTypeRar
}
// Check for 7z files
if sevenZMainPattern.MatchString(lower) || sevenZPartPattern.MatchString(lower) {
return FileTypeArchive
}
if strings.HasSuffix(lower, ".zip") || strings.HasSuffix(lower, ".tar") ||
strings.HasSuffix(lower, ".gz") || strings.HasSuffix(lower, ".bz2") {
return FileTypeArchive
}
// Check for ignored file types
for _, ext := range ignoreExtensions {
if strings.HasSuffix(lower, ext) {
return FileTypeIgnore
}
}
// Default to unknown type
return FileTypeUnknown
}
// Simplified RAR detection
func (p *NZBParser) isRarFile(filename string) bool {
return rarMainPattern.MatchString(filename) ||
rarPartPattern.MatchString(filename) ||
rarVolumePattern.MatchString(filename)
}
func (p *NZBParser) isMediaFile(filename string) bool {
return utils.IsMediaFile(filename)
}
func (p *NZBParser) processFileGroups(ctx context.Context, groups map[string]*FileGroup, password string) []NZBFile {
if len(groups) == 0 {
return nil
}
// Channel to collect results
results := make(chan *NZBFile, len(groups))
var wg sync.WaitGroup
// Process each group concurrently
for _, group := range groups {
wg.Add(1)
go func(g *FileGroup) {
defer wg.Done()
file := p.processFileGroup(ctx, g, password)
results <- file // nil values are fine, we'll filter later
}(group)
}
// Close results channel when all goroutines complete
go func() {
wg.Wait()
close(results)
}()
// Collect results
var files []NZBFile
for file := range results {
if file != nil {
files = append(files, *file)
}
}
return files
}
// Simplified individual group processing
func (p *NZBParser) processFileGroup(ctx context.Context, group *FileGroup, password string) *NZBFile {
switch group.Type {
case FileTypeMedia:
return p.processMediaFile(group, password)
case FileTypeRar:
return p.processRarArchive(ctx, group, password)
case FileTypeArchive:
return nil
default:
// Treat unknown files as media files with conservative estimation
return p.processMediaFile(group, password)
}
}
// Process regular media files
func (p *NZBParser) processMediaFile(group *FileGroup, password string) *NZBFile {
if len(group.Files) == 0 {
return nil
}
// Sort files for consistent ordering
sort.Slice(group.Files, func(i, j int) bool {
return group.Files[i].Number < group.Files[j].Number
})
// Determine extension
ext := p.determineExtension(group)
file := &NZBFile{
Name: group.BaseName + ext,
Groups: p.getGroupsList(group.Groups),
Segments: []NZBSegment{},
Password: password,
IsRarArchive: false,
}
currentOffset := int64(0)
ratio := 0.968
for _, nzbFile := range group.Files {
sort.Slice(nzbFile.Segments, func(i, j int) bool {
return nzbFile.Segments[i].Number < nzbFile.Segments[j].Number
})
for _, segment := range nzbFile.Segments {
decodedSize := int64(float64(segment.Bytes) * ratio)
seg := NZBSegment{
Number: segment.Number,
MessageID: segment.Id,
Bytes: int64(segment.Bytes),
StartOffset: currentOffset,
EndOffset: currentOffset + decodedSize,
Group: file.Groups[0],
}
file.Segments = append(file.Segments, seg)
currentOffset += decodedSize
}
}
fileInfo, err := p.getFileInfo(context.Background(), group)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to get file info, using fallback")
file.Size = currentOffset
file.SegmentSize = currentOffset / int64(len(file.Segments)) // Average segment size
} else {
file.Size = fileInfo.Size
file.SegmentSize = fileInfo.ChunkSize
}
return file
}
func (p *NZBParser) processRarArchive(ctx context.Context, group *FileGroup, password string) *NZBFile {
if len(group.Files) == 0 {
return nil
}
// Sort RAR files by part number
sort.Slice(group.Files, func(i, j int) bool {
return group.Files[i].Filename < group.Files[j].Filename
})
// Try to extract RAR info during parsing for better accuracy
extractedInfo := p.extractRarInfo(ctx, group, password)
filename := group.BaseName + ".mkv" // Default extension
if extractedInfo != nil && extractedInfo.FileName != "" {
filename = extractedInfo.FileName
}
filename = utils.RemoveInvalidChars(path.Base(filename))
file := &NZBFile{
Name: filename,
Groups: p.getGroupsList(group.Groups),
Segments: []NZBSegment{},
Password: password,
IsRarArchive: true,
}
// Build segments
ratio := 0.968
currentOffset := int64(0)
for _, nzbFile := range group.Files {
sort.Slice(nzbFile.Segments, func(i, j int) bool {
return nzbFile.Segments[i].Number < nzbFile.Segments[j].Number
})
for _, segment := range nzbFile.Segments {
decodedSize := int64(float64(segment.Bytes) * ratio)
seg := NZBSegment{
Number: segment.Number,
MessageID: segment.Id,
Bytes: int64(segment.Bytes),
StartOffset: currentOffset,
EndOffset: currentOffset + decodedSize,
Group: file.Groups[0],
}
file.Segments = append(file.Segments, seg)
currentOffset += decodedSize
}
}
if extractedInfo != nil {
file.Size = extractedInfo.FileSize
file.SegmentSize = extractedInfo.SegmentSize
file.StartOffset = extractedInfo.EstimatedStartOffset
} else {
file.Size = currentOffset
file.SegmentSize = currentOffset / int64(len(file.Segments)) // Average segment size
file.StartOffset = 0 // No accurate start offset available
}
return file
}
func (p *NZBParser) getFileInfo(ctx context.Context, group *FileGroup) (*FileInfo, error) {
if len(group.Files) == 0 {
return nil, fmt.Errorf("no files in group %s", group.BaseName)
}
// Sort files
sort.Slice(group.Files, func(i, j int) bool {
return group.Files[i].Filename < group.Files[j].Filename
})
firstFile := group.Files[0]
lastFile := group.Files[len(group.Files)-1]
firstInfo, err := p.client.DownloadHeader(ctx, firstFile.Segments[0].Id)
if err != nil {
return nil, err
}
lastInfo, err := p.client.DownloadHeader(ctx, lastFile.Segments[len(lastFile.Segments)-1].Id)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to download last segment header")
return nil, err
}
chunkSize := firstInfo.End - (firstInfo.Begin - 1)
totalFileSize := (int64(len(group.Files)-1) * firstInfo.Size) + lastInfo.Size
return &FileInfo{
Size: totalFileSize,
ChunkSize: chunkSize,
Name: firstInfo.Name,
}, nil
}
func (p *NZBParser) extractRarInfo(ctx context.Context, group *FileGroup, password string) *ExtractedFileInfo {
if len(group.Files) == 0 || len(group.Files[0].Segments) == 0 {
return nil
}
firstRarFile := group.Files[0]
segmentsToDownload := min(5, len(firstRarFile.Segments))
headerBuffer, err := p.downloadRarHeaders(ctx, firstRarFile.Segments[:segmentsToDownload])
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to download RAR headers")
return nil
}
fileInfo, err := p.getFileInfo(ctx, group)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to get file info for RAR group")
return nil
}
// Pass the actual RAR size to the analysis function
return p.analyzeRarStructure(headerBuffer, password, fileInfo)
}
func (p *NZBParser) analyzeRarStructure(headerData []byte, password string, fileInfo *FileInfo) *ExtractedFileInfo {
reader := bytes.NewReader(headerData)
tracker := &PositionTracker{reader: reader, position: 0}
rarReader, err := rardecode.NewReader(tracker, rardecode.Password(password))
if err != nil {
return nil
}
for {
header, err := rarReader.Next()
if err != nil {
break
}
if !header.IsDir && p.isMediaFile(header.Name) {
compressionRatio := float64(fileInfo.Size) / float64(header.UnPackedSize)
if compressionRatio > 0.95 {
fileDataOffset := tracker.Position()
p.logger.Info().
Str("file", header.Name).
Int64("accurate_offset", fileDataOffset).
Float64("compression_ratio", compressionRatio).
Msg("Found accurate store RAR offset using position tracking")
return &ExtractedFileInfo{
FileName: header.Name,
FileSize: header.UnPackedSize,
SegmentSize: fileInfo.ChunkSize,
EstimatedStartOffset: fileDataOffset,
}
}
break
}
// Skip file content - this advances the tracker position
io.Copy(io.Discard, rarReader)
}
return nil
}
func (p *NZBParser) determineExtension(group *FileGroup) string {
// Try to determine extension from filenames
for _, file := range group.Files {
ext := filepath.Ext(file.Filename)
if ext != "" {
return ext
}
}
return ".mkv" // Default
}
func (p *NZBParser) getGroupsList(groups map[string]struct{}) []string {
result := make([]string, 0, len(groups))
for g := range groups {
result = append(result, g)
}
return result
}
// Download RAR headers from segments
func (p *NZBParser) downloadRarHeaders(ctx context.Context, segments []nzbparser.NzbSegment) ([]byte, error) {
var headerBuffer bytes.Buffer
for _, segment := range segments {
conn, cleanup, err := p.client.GetConnection(ctx)
if err != nil {
continue
}
data, err := conn.GetBody(segment.Id)
cleanup()
if err != nil {
if !nntp.IsRetryableError(err) {
return nil, err
}
continue
}
if len(data) == 0 {
continue
}
// yEnc decode
part, err := nntp.DecodeYenc(bytes.NewReader(data))
if err != nil || part == nil || len(part.Body) == 0 {
p.logger.Warn().Err(err).Str("segment_id", segment.Id).Msg("Failed to decode RAR header segment")
continue
}
headerBuffer.Write(part.Body)
// Stop if we have enough data (typically first segment is enough for headers)
if headerBuffer.Len() > 32768 { // 32KB should be plenty for RAR headers
break
}
}
if headerBuffer.Len() == 0 {
return nil, fmt.Errorf("no valid header data downloaded")
}
return headerBuffer.Bytes(), nil
}
func (p *NZBParser) detectFileTypeByContent(ctx context.Context, file nzbparser.NzbFile) (FileType, string) {
if len(file.Segments) == 0 {
return FileTypeUnknown, ""
}
// Download first segment to check file signature
firstSegment := file.Segments[0]
data, err := p.downloadFirstSegment(ctx, firstSegment)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to download first segment for content detection")
return FileTypeUnknown, ""
}
if data.Name != "" {
fileType := p.detectFileType(data.Name)
if fileType != FileTypeUnknown {
return fileType, data.Name
}
}
return p.detectFileTypeFromContent(data.Body), data.Name
}
func (p *NZBParser) detectFileTypeFromContent(data []byte) FileType {
if len(data) == 0 {
return FileTypeUnknown
}
// Check for RAR signatures (both RAR 4.x and 5.x)
if len(data) >= 7 {
// RAR 4.x signature
if bytes.Equal(data[:7], []byte("Rar!\x1A\x07\x00")) {
return FileTypeRar
}
}
if len(data) >= 8 {
// RAR 5.x signature
if bytes.Equal(data[:8], []byte("Rar!\x1A\x07\x01\x00")) {
return FileTypeRar
}
}
// Check for ZIP signature
if len(data) >= 4 && bytes.Equal(data[:4], []byte{0x50, 0x4B, 0x03, 0x04}) {
return FileTypeArchive
}
// Check for 7z signature
if len(data) >= 6 && bytes.Equal(data[:6], []byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C}) {
return FileTypeArchive
}
// Check for common media file signatures
if len(data) >= 4 {
// Matroska (MKV/WebM)
if bytes.Equal(data[:4], []byte{0x1A, 0x45, 0xDF, 0xA3}) {
return FileTypeMedia
}
// MP4/MOV (check for 'ftyp' at offset 4)
if len(data) >= 8 && bytes.Equal(data[4:8], []byte("ftyp")) {
return FileTypeMedia
}
// AVI
if len(data) >= 12 && bytes.Equal(data[:4], []byte("RIFF")) &&
bytes.Equal(data[8:12], []byte("AVI ")) {
return FileTypeMedia
}
}
// MPEG checks need more specific patterns
if len(data) >= 4 {
// MPEG-1/2 Program Stream
if bytes.Equal(data[:4], []byte{0x00, 0x00, 0x01, 0xBA}) {
return FileTypeMedia
}
// MPEG-1/2 Video Stream
if bytes.Equal(data[:4], []byte{0x00, 0x00, 0x01, 0xB3}) {
return FileTypeMedia
}
}
// Check for Transport Stream (TS files)
if len(data) >= 1 && data[0] == 0x47 {
// Additional validation for TS files
if len(data) >= 188 && data[188] == 0x47 {
return FileTypeMedia
}
}
return FileTypeUnknown
}
func (p *NZBParser) downloadFirstSegment(ctx context.Context, segment nzbparser.NzbSegment) (*yenc.Part, error) {
conn, cleanup, err := p.client.GetConnection(ctx)
if err != nil {
return nil, err
}
defer cleanup()
data, err := conn.GetBody(segment.Id)
if err != nil {
return nil, err
}
// yEnc decode
part, err := nntp.DecodeYenc(bytes.NewReader(data))
if err != nil || part == nil {
return nil, fmt.Errorf("failed to decode segment")
}
// Return both the filename and decoded data
return part, nil
}
// Calculate total archive size from all RAR parts in the group
func (p *NZBParser) calculateTotalArchiveSize(group *FileGroup) int64 {
var total int64
for _, file := range group.Files {
for _, segment := range file.Segments {
total += int64(segment.Bytes)
}
}
return total
}
func determineNZBName(filename string, meta map[string]string) string {
// Prefer filename if it exists
if filename != "" {
filename = strings.Replace(filename, filepath.Ext(filename), "", 1)
} else {
if name := meta["name"]; name != "" {
filename = name
} else if title := meta["title"]; title != "" {
filename = title
}
}
return utils.RemoveInvalidChars(filename)
}
func generateID(nzb *NZB) string {
h := sha256.New()
h.Write([]byte(nzb.Name))
h.Write([]byte(fmt.Sprintf("%d", nzb.TotalSize)))
h.Write([]byte(nzb.Category))
h.Write([]byte(nzb.Password))
return hex.EncodeToString(h.Sum(nil))[:16]
}

145
pkg/usenet/processor.go Normal file
View File

@@ -0,0 +1,145 @@
package usenet
import (
"context"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/pkg/arr"
"path/filepath"
"time"
)
// Processor handles NZB processing and download orchestration
type Processor struct {
store Store
parser *NZBParser
downloadWorker *DownloadWorker
logger zerolog.Logger
client *nntp.Client
}
// ProcessRequest represents a request to process an NZB
type ProcessRequest struct {
NZBContent []byte
Name string
Arr *arr.Arr
Action string // "download", "symlink", "none"
DownloadDir string
}
// NewProcessor creates a new usenet processor
func NewProcessor(config *config.Usenet, logger zerolog.Logger, store Store, client *nntp.Client) (*Processor, error) {
processor := &Processor{
store: store,
logger: logger.With().Str("component", "usenet-processor").Logger(),
client: client,
}
// Initialize download worker
processor.downloadWorker = NewDownloadWorker(config, client, processor)
processor.parser = NewNZBParser(client, nil, processor.logger)
return processor, nil
}
// Process processes an NZB for download/streaming
func (p *Processor) Process(ctx context.Context, req *ProcessRequest) (*NZB, error) {
if len(req.NZBContent) == 0 {
return nil, fmt.Errorf("NZB content is empty")
}
// Validate NZB content
if err := ValidateNZB(req.NZBContent); err != nil {
return nil, fmt.Errorf("invalid NZB content: %w", err)
}
nzb, err := p.process(ctx, req)
if err != nil {
p.logger.Error().
Err(err).
Msg("Failed to process NZB content")
return nil, fmt.Errorf("failed to process NZB content: %w", err)
}
return nzb, nil
}
func (p *Processor) process(ctx context.Context, req *ProcessRequest) (*NZB, error) {
nzb, err := p.parser.Parse(ctx, req.Name, req.Arr.Name, req.NZBContent)
if err != nil {
p.logger.Error().
Err(err).
Msg("Failed to parse NZB content")
return nil, fmt.Errorf("failed to parse NZB content: %w", err)
}
if nzb == nil {
p.logger.Error().
Msg("Parsed NZB is nil")
return nil, fmt.Errorf("parsed NZB is nil")
}
p.logger.Info().
Str("nzb_id", nzb.ID).
Msg("Successfully parsed NZB content")
if existing := p.store.Get(nzb.ID); existing != nil {
p.logger.Info().Str("nzb_id", nzb.ID).Msg("NZB already exists")
return existing, nil
}
p.logger.Info().
Str("nzb_id", nzb.ID).
Msg("Creating new NZB download job")
downloadDir := req.DownloadDir
if req.Arr != nil {
downloadDir = filepath.Join(downloadDir, req.Arr.Name)
}
job := &DownloadJob{
NZB: nzb,
Action: req.Action,
DownloadDir: downloadDir,
Callback: func(completedNZB *NZB, err error) {
if err != nil {
p.logger.Error().
Err(err).
Str("nzb_id", completedNZB.ID).
Msg("Download job failed")
return
}
p.logger.Info().
Str("nzb_id", completedNZB.ID).
Msg("Download job completed successfully")
},
}
// Check availability before submitting the job
//if err := p.downloadWorker.CheckAvailability(ctx, job); err != nil {
// p.logger.Error().
// Err(err).
// Str("nzb_id", nzb.ID).
// Msg("NZB availability check failed")
// return nil, fmt.Errorf("availability check failed for NZB %s: %w", nzb.ID, err)
//}
// Mark NZB as downloaded but not completed
nzb.Downloaded = true
nzb.AddedOn = time.Now()
p.store.AddToQueue(nzb)
if err := p.store.Add(nzb); err != nil {
return nil, err
} // Add the downloaded NZB to the store asynchronously
p.logger.Info().
Str("nzb_id", nzb.ID).
Msg("NZB added to queue")
go func() {
if err := p.downloadWorker.Process(ctx, job); err != nil {
p.logger.Error().
Err(err).
Str("nzb_id", nzb.ID).
Msg("Failed to submit download job")
}
}()
return nzb, nil
}

336
pkg/usenet/rar.go Normal file
View File

@@ -0,0 +1,336 @@
package usenet
import (
"bytes"
"context"
"fmt"
"github.com/nwaples/rardecode/v2"
"github.com/sirrobot01/decypharr/internal/utils"
"io"
"strings"
"time"
)
type RarParser struct {
streamer *Streamer
}
func NewRarParser(s *Streamer) *RarParser {
return &RarParser{streamer: s}
}
func (p *RarParser) ExtractFileRange(ctx context.Context, file *NZBFile, password string, start, end int64, writer io.Writer) error {
info, err := p.getFileInfo(ctx, file, password)
if err != nil {
return fmt.Errorf("failed to get file info: %w", err)
}
requiredSegments := p.calculateSmartSegmentRanges(file, info, start, end)
return p.extract(ctx, requiredSegments, password, info.FileName, start, end, writer)
}
func (p *RarParser) calculateSmartSegmentRanges(file *NZBFile, fileInfo *ExtractedFileInfo, start, end int64) []SegmentRange {
totalSegments := len(file.Segments)
// For store compression, be more conservative with seeking
compressionOverhead := 1.1 // Increase to 10% overhead
estimatedArchiveStart := int64(float64(start) * compressionOverhead)
estimatedArchiveEnd := int64(float64(end) * compressionOverhead)
startSegmentIndex := int(float64(estimatedArchiveStart) / float64(fileInfo.ArchiveSize) * float64(totalSegments))
endSegmentIndex := int(float64(estimatedArchiveEnd) / float64(fileInfo.ArchiveSize) * float64(totalSegments))
// More conservative buffers for seeking
if start > 0 {
// For seeking, always include more context
headerBuffer := min(10, startSegmentIndex) // Up to 10 segments back
startSegmentIndex = max(0, startSegmentIndex-headerBuffer)
} else {
startSegmentIndex = 0
}
// Larger end buffer for segment boundaries and RAR footer
endBuffer := 10 + int(float64(totalSegments)*0.02) // 2% of total segments as buffer
endSegmentIndex = min(totalSegments-1, endSegmentIndex+endBuffer)
// Ensure minimum segment count for seeking
minSegmentsForSeek := 20
if endSegmentIndex-startSegmentIndex < minSegmentsForSeek {
endSegmentIndex = min(totalSegments-1, startSegmentIndex+minSegmentsForSeek)
}
return convertSegmentIndicesToRanges(file, startSegmentIndex, endSegmentIndex)
}
func (p *RarParser) extract(ctx context.Context, segmentRanges []SegmentRange, password, targetFileName string, start, end int64, writer io.Writer) error {
pipeReader, pipeWriter := io.Pipe()
extractionErr := make(chan error, 1)
streamingErr := make(chan error, 1)
// RAR extraction goroutine
go func() {
defer func() {
pipeReader.Close()
if r := recover(); r != nil {
extractionErr <- fmt.Errorf("extraction panic: %v", r)
}
}()
rarReader, err := rardecode.NewReader(pipeReader, rardecode.Password(password))
if err != nil {
extractionErr <- fmt.Errorf("failed to create RAR reader: %w", err)
return
}
found := false
for {
select {
case <-ctx.Done():
extractionErr <- ctx.Err()
return
default:
}
header, err := rarReader.Next()
if err == io.EOF {
if !found {
extractionErr <- fmt.Errorf("target file %s not found in downloaded segments", targetFileName)
} else {
extractionErr <- fmt.Errorf("reached EOF before completing range extraction")
}
return
}
if err != nil {
extractionErr <- fmt.Errorf("failed to read RAR header: %w", err)
return
}
if header.Name == targetFileName || utils.IsMediaFile(header.Name) {
found = true
err = p.extractRangeFromReader(ctx, rarReader, start, end, writer)
extractionErr <- err
return
} else if !header.IsDir {
err = p.skipFileEfficiently(ctx, rarReader)
if err != nil && ctx.Err() == nil {
extractionErr <- fmt.Errorf("failed to skip file %s: %w", header.Name, err)
return
}
}
}
}()
// Streaming goroutine
go func() {
defer pipeWriter.Close()
err := p.streamer.stream(ctx, segmentRanges, pipeWriter)
streamingErr <- err
}()
// Wait with longer timeout for seeking operations
select {
case err := <-extractionErr:
return err
case err := <-streamingErr:
if err != nil && !p.isSkippableError(err) {
return fmt.Errorf("segment streaming failed: %w", err)
}
// Longer timeout for seeking operations
select {
case err := <-extractionErr:
return err
case <-time.After(30 * time.Second): // Increased from 5 seconds
return fmt.Errorf("extraction timeout after 30 seconds")
}
case <-ctx.Done():
return ctx.Err()
}
}
func (p *RarParser) extractRangeFromReader(ctx context.Context, reader io.Reader, start, end int64, writer io.Writer) error {
// Skip to start position efficiently
if start > 0 {
skipped, err := p.smartSkip(ctx, reader, start)
if err != nil {
return fmt.Errorf("failed to skip to position %d (skipped %d): %w", start, skipped, err)
}
}
// Copy requested range
bytesToCopy := end - start + 1
copied, err := p.smartCopy(ctx, writer, reader, bytesToCopy)
if err != nil && err != io.EOF {
return fmt.Errorf("failed to copy range (copied %d/%d): %w", copied, bytesToCopy, err)
}
return nil
}
func (p *RarParser) smartSkip(ctx context.Context, reader io.Reader, bytesToSkip int64) (int64, error) {
const skipBufferSize = 64 * 1024 // Larger buffer for skipping
buffer := make([]byte, skipBufferSize)
var totalSkipped int64
for totalSkipped < bytesToSkip {
select {
case <-ctx.Done():
return totalSkipped, ctx.Err()
default:
}
toRead := skipBufferSize
if remaining := bytesToSkip - totalSkipped; remaining < int64(toRead) {
toRead = int(remaining)
}
n, err := reader.Read(buffer[:toRead])
if n > 0 {
totalSkipped += int64(n)
}
if err != nil {
if err == io.EOF {
break
}
return totalSkipped, err
}
}
return totalSkipped, nil
}
func (p *RarParser) smartCopy(ctx context.Context, dst io.Writer, src io.Reader, bytesToCopy int64) (int64, error) {
const copyBufferSize = 32 * 1024
buffer := make([]byte, copyBufferSize)
var totalCopied int64
for totalCopied < bytesToCopy {
select {
case <-ctx.Done():
return totalCopied, ctx.Err()
default:
}
toRead := copyBufferSize
if remaining := bytesToCopy - totalCopied; remaining < int64(toRead) {
toRead = int(remaining)
}
n, err := src.Read(buffer[:toRead])
if n > 0 {
written, writeErr := dst.Write(buffer[:n])
if writeErr != nil {
return totalCopied, writeErr
}
totalCopied += int64(written)
}
if err != nil {
if err == io.EOF {
break
}
return totalCopied, err
}
}
return totalCopied, nil
}
func (p *RarParser) skipFileEfficiently(ctx context.Context, reader io.Reader) error {
_, err := p.smartSkip(ctx, reader, 1<<62) // Very large number
if err == io.EOF {
return nil // EOF is expected when skipping
}
return err
}
func (p *RarParser) getFileInfo(ctx context.Context, file *NZBFile, password string) (*ExtractedFileInfo, error) {
headerSegments := p.getMinimalHeaders(file)
var headerBuffer bytes.Buffer
err := p.streamer.stream(ctx, headerSegments, &headerBuffer)
if err != nil {
return nil, fmt.Errorf("failed to download headers: %w", err)
}
reader := bytes.NewReader(headerBuffer.Bytes())
rarReader, err := rardecode.NewReader(reader, rardecode.Password(password))
if err != nil {
return nil, fmt.Errorf("failed to create RAR reader (check password): %w", err)
}
totalArchiveSize := p.calculateTotalSize(file.SegmentSize, file.Segments)
for {
header, err := rarReader.Next()
if err == io.EOF {
break
}
if err != nil {
continue
}
if !header.IsDir && utils.IsMediaFile(header.Name) {
return &ExtractedFileInfo{
FileName: header.Name,
FileSize: header.UnPackedSize,
ArchiveSize: totalArchiveSize,
}, nil
}
}
return nil, fmt.Errorf("no media file found in RAR archive")
}
func (p *RarParser) getMinimalHeaders(file *NZBFile) []SegmentRange {
headerCount := min(len(file.Segments), 4) // Minimal for password+headers
return file.ConvertToSegmentRanges(file.Segments[:headerCount])
}
func (p *RarParser) calculateTotalSize(segmentSize int64, segments []NZBSegment) int64 {
total := int64(0)
for i, seg := range segments {
if segmentSize <= 0 {
segmentSize = seg.Bytes // Fallback to actual segment size if not set
}
if i == len(segments)-1 {
segmentSize = seg.Bytes // Last segment uses actual size
}
total += segmentSize
}
return total
}
func (p *RarParser) isSkippableError(err error) bool {
if err == nil {
return true
}
errStr := err.Error()
return strings.Contains(errStr, "client disconnected") ||
strings.Contains(errStr, "broken pipe") ||
strings.Contains(errStr, "connection reset")
}
func convertSegmentIndicesToRanges(file *NZBFile, startIndex, endIndex int) []SegmentRange {
var segmentRanges []SegmentRange
for i := startIndex; i <= endIndex && i < len(file.Segments); i++ {
segment := file.Segments[i]
// For RAR files, we want the entire segment (no partial byte ranges)
segmentRange := SegmentRange{
Segment: segment,
ByteStart: 0, // Always start at beginning of segment
ByteEnd: segment.Bytes - 1, // Always go to end of segment
TotalStart: 0, // Not used for this approach
TotalEnd: segment.Bytes - 1, // Not used for this approach
}
segmentRanges = append(segmentRanges, segmentRange)
}
return segmentRanges
}

619
pkg/usenet/store.go Normal file
View File

@@ -0,0 +1,619 @@
package usenet
import (
"context"
"encoding/json"
"fmt"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sourcegraph/conc/pool"
"io"
"io/fs"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
)
type fileInfo struct {
id string
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
}
func (fi *fileInfo) Name() string { return fi.name }
func (fi *fileInfo) Size() int64 { return fi.size }
func (fi *fileInfo) Mode() os.FileMode { return fi.mode }
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
func (fi *fileInfo) IsDir() bool { return fi.isDir }
func (fi *fileInfo) ID() string { return fi.id }
func (fi *fileInfo) Sys() interface{} { return nil }
type Store interface {
Add(nzb *NZB) error
Get(nzoID string) *NZB
GetByName(name string) *NZB
Update(nzb *NZB) error
UpdateFile(nzoID string, file *NZBFile) error
Delete(nzoID string) error
Count() int
Filter(category string, limit int, status ...string) []*NZB
GetHistory(category string, limit int) []*NZB
UpdateStatus(nzoID string, status string) error
Close() error
GetListing(folder string) []os.FileInfo
Load() error
// GetQueueItem Queue management
GetQueueItem(nzoID string) *NZB
AddToQueue(nzb *NZB)
RemoveFromQueue(nzoID string)
GetQueue() []*NZB
AtomicDelete(nzoID string) error
RemoveFile(nzoID string, filename string) error
MarkAsCompleted(nzoID string, storage string) error
}
type store struct {
storePath string
listing atomic.Value
badListing atomic.Value
queue *xsync.Map[string, *NZB]
titles *xsync.Map[string, string] // title -> nzoID
config *config.Usenet
logger zerolog.Logger
}
func NewStore(config *config.Config, logger zerolog.Logger) Store {
err := os.MkdirAll(config.NZBsPath(), 0755)
if err != nil {
return nil
}
s := &store{
storePath: config.NZBsPath(),
queue: xsync.NewMap[string, *NZB](),
titles: xsync.NewMap[string, string](),
config: config.Usenet,
logger: logger,
}
return s
}
func (ns *store) Load() error {
ids, err := ns.getAllIDs()
if err != nil {
return err
}
listing := make([]os.FileInfo, 0)
badListing := make([]os.FileInfo, 0)
for _, id := range ids {
nzb, err := ns.loadFromFile(id)
if err != nil {
continue // Skip if file cannot be loaded
}
ns.titles.Store(nzb.Name, nzb.ID)
fileInfo := &fileInfo{
id: nzb.ID,
name: nzb.Name,
size: nzb.TotalSize,
mode: 0644,
modTime: nzb.AddedOn,
isDir: true,
}
listing = append(listing, fileInfo)
if nzb.IsBad {
badListing = append(badListing, fileInfo)
}
}
ns.listing.Store(listing)
ns.badListing.Store(badListing)
return nil
}
// getFilePath returns the file path for an NZB
func (ns *store) getFilePath(nzoID string) string {
return filepath.Join(ns.storePath, nzoID+".json")
}
func (ns *store) loadFromFile(nzoID string) (*NZB, error) {
filePath := ns.getFilePath(nzoID)
data, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
var compact CompactNZB
if err := json.Unmarshal(data, &compact); err != nil {
return nil, err
}
return compact.toNZB(), nil
}
// saveToFile saves an NZB to file
func (ns *store) saveToFile(nzb *NZB) error {
filePath := ns.getFilePath(nzb.ID)
// Ensure directory exists
dir := filepath.Dir(filePath)
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
compact := nzb.toCompact()
data, err := json.Marshal(compact) // Use compact JSON
if err != nil {
return err
}
return os.WriteFile(filePath, data, 0644)
}
func (ns *store) refreshListing() error {
ids, err := ns.getAllIDs()
if err != nil {
return err
}
listing := make([]os.FileInfo, 0, len(ids))
badListing := make([]os.FileInfo, 0, len(ids))
for _, id := range ids {
nzb, err := ns.loadFromFile(id)
if err != nil {
continue // Skip if file cannot be loaded
}
fileInfo := &fileInfo{
id: nzb.ID,
name: nzb.Name,
size: nzb.TotalSize,
mode: 0644,
modTime: nzb.AddedOn,
isDir: true,
}
listing = append(listing, fileInfo)
ns.titles.Store(nzb.Name, nzb.ID)
if nzb.IsBad {
badListing = append(badListing, fileInfo)
}
}
// Update all structures atomically
ns.listing.Store(listing)
ns.badListing.Store(badListing)
// Refresh rclone if configured
go func() {
if err := ns.refreshRclone(); err != nil {
ns.logger.Error().Err(err).Msg("Failed to refresh rclone")
}
}()
return nil
}
func (ns *store) Add(nzb *NZB) error {
if nzb == nil {
return fmt.Errorf("nzb cannot be nil")
}
if err := ns.saveToFile(nzb); err != nil {
return err
}
ns.titles.Store(nzb.Name, nzb.ID)
go func() {
_ = ns.refreshListing()
}()
return nil
}
func (ns *store) GetByName(name string) *NZB {
if nzoID, exists := ns.titles.Load(name); exists {
return ns.Get(nzoID)
}
return nil
}
func (ns *store) GetQueueItem(nzoID string) *NZB {
if item, exists := ns.queue.Load(nzoID); exists {
return item
}
return nil
}
func (ns *store) AddToQueue(nzb *NZB) {
if nzb == nil {
return
}
ns.queue.Store(nzb.ID, nzb)
}
func (ns *store) RemoveFromQueue(nzoID string) {
if nzoID == "" {
return
}
ns.queue.Delete(nzoID)
}
func (ns *store) GetQueue() []*NZB {
var queueItems []*NZB
ns.queue.Range(func(_ string, value *NZB) bool {
queueItems = append(queueItems, value)
return true // continue iteration
})
return queueItems
}
func (ns *store) Get(nzoID string) *NZB {
nzb, err := ns.loadFromFile(nzoID)
if err != nil {
return nil
}
return nzb
}
func (ns *store) Update(nzb *NZB) error {
if err := ns.saveToFile(nzb); err != nil {
return err
}
return nil
}
func (ns *store) Delete(nzoID string) error {
return ns.AtomicDelete(nzoID)
}
// AtomicDelete performs an atomic delete operation across all data structures
func (ns *store) AtomicDelete(nzoID string) error {
if nzoID == "" {
return fmt.Errorf("nzoID cannot be empty")
}
filePath := ns.getFilePath(nzoID)
// Get NZB info before deletion for cleanup
nzb := ns.Get(nzoID)
if nzb == nil {
// Check if file exists on disk even if not in cache
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return nil // Already deleted
}
}
ns.queue.Delete(nzoID)
if nzb != nil {
ns.titles.Delete(nzb.Name)
}
if currentListing := ns.listing.Load(); currentListing != nil {
oldListing := currentListing.([]os.FileInfo)
newListing := make([]os.FileInfo, 0, len(oldListing))
for _, fi := range oldListing {
if fileInfo, ok := fi.(*fileInfo); ok && fileInfo.id != nzoID {
newListing = append(newListing, fi)
}
}
ns.listing.Store(newListing)
}
if currentListing := ns.badListing.Load(); currentListing != nil {
oldListing := currentListing.([]os.FileInfo)
newListing := make([]os.FileInfo, 0, len(oldListing))
for _, fi := range oldListing {
if fileInfo, ok := fi.(*fileInfo); ok && fileInfo.id != nzoID {
newListing = append(newListing, fi)
}
}
ns.badListing.Store(newListing)
}
// Remove file from disk
return os.Remove(filePath)
}
func (ns *store) RemoveFile(nzoID string, filename string) error {
if nzoID == "" || filename == "" {
return fmt.Errorf("nzoID and filename cannot be empty")
}
nzb := ns.Get(nzoID)
if nzb == nil {
return fmt.Errorf("nzb with nzoID %s not found", nzoID)
}
err := nzb.MarkFileAsRemoved(filename)
if err != nil {
return err
}
if err := ns.Update(nzb); err != nil {
return fmt.Errorf("failed to update nzb after removing file %s: %w", filename, err)
}
// Refresh listing after file removal
_ = ns.refreshListing()
// Remove file from rclone cache if configured
return nil
}
func (ns *store) getAllIDs() ([]string, error) {
var ids []string
err := filepath.WalkDir(ns.storePath, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.IsDir() && strings.HasSuffix(d.Name(), ".json") {
id := strings.TrimSuffix(d.Name(), ".json")
ids = append(ids, id)
}
return nil
})
return ids, err
}
func (ns *store) Filter(category string, limit int, status ...string) []*NZB {
ids, err := ns.getAllIDs()
if err != nil {
return nil
}
statusSet := make(map[string]struct{})
for _, s := range status {
statusSet[s] = struct{}{}
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
p := pool.New().WithContext(ctx).WithMaxGoroutines(10)
var results []*NZB
var mu sync.Mutex
var found atomic.Int32
for _, id := range ids {
id := id
p.Go(func(ctx context.Context) error {
// Early exit if limit reached
if limit > 0 && found.Load() >= int32(limit) {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
default:
nzb := ns.Get(id)
if nzb == nil {
return nil
}
// Apply filters
if category != "" && nzb.Category != category {
return nil
}
if len(statusSet) > 0 {
if _, exists := statusSet[nzb.Status]; !exists {
return nil
}
}
// Add to results with limit check
mu.Lock()
if limit == 0 || len(results) < limit {
results = append(results, nzb)
found.Add(1)
// Cancel if we hit the limit
if limit > 0 && len(results) >= limit {
cancel()
}
}
mu.Unlock()
return nil
}
})
}
if err := p.Wait(); err != nil {
return nil
}
return results
}
func (ns *store) Count() int {
ids, err := ns.getAllIDs()
if err != nil {
return 0
}
return len(ids)
}
func (ns *store) GetHistory(category string, limit int) []*NZB {
return ns.Filter(category, limit, "completed", "failed", "error")
}
func (ns *store) UpdateStatus(nzoID string, status string) error {
nzb := ns.Get(nzoID)
if nzb == nil {
return fmt.Errorf("nzb with nzoID %s not found", nzoID)
}
nzb.Status = status
nzb.LastActivity = time.Now()
if status == "completed" {
nzb.CompletedOn = time.Now()
nzb.Progress = 100
nzb.Percentage = 100
}
if status == "failed" {
// Remove from cache if failed
err := ns.Delete(nzb.ID)
if err != nil {
return err
}
}
return ns.Update(nzb)
}
func (ns *store) Close() error {
// Clear cache
ns.queue = xsync.NewMap[string, *NZB]()
// Clear listings
ns.listing = atomic.Value{}
ns.badListing = atomic.Value{}
// Clear titles
ns.titles = xsync.NewMap[string, string]()
return nil
}
func (ns *store) UpdateFile(nzoID string, file *NZBFile) error {
if nzoID == "" || file == nil {
return fmt.Errorf("nzoID and file cannot be empty")
}
nzb := ns.Get(nzoID)
if nzb == nil {
return fmt.Errorf("nzb with nzoID %s not found", nzoID)
}
// Update file in NZB
for i, f := range nzb.Files {
if f.Name == file.Name {
nzb.Files[i] = *file
break
}
}
if err := ns.Update(nzb); err != nil {
return fmt.Errorf("failed to update nzb after updating file %s: %w", file.Name, err)
}
// Refresh listing after file update
return ns.refreshListing()
}
func (ns *store) GetListing(folder string) []os.FileInfo {
switch folder {
case "__bad__":
if badListing, ok := ns.badListing.Load().([]os.FileInfo); ok {
return badListing
}
return []os.FileInfo{}
default:
if listing, ok := ns.listing.Load().([]os.FileInfo); ok {
return listing
}
return []os.FileInfo{}
}
}
func (ns *store) MarkAsCompleted(nzoID string, storage string) error {
if nzoID == "" {
return fmt.Errorf("nzoID cannot be empty")
}
// Get NZB from queue
queueNZB := ns.GetQueueItem(nzoID)
if queueNZB == nil {
return fmt.Errorf("NZB %s not found in queue", nzoID)
}
// Update NZB status
queueNZB.Status = "completed"
queueNZB.Storage = storage
queueNZB.CompletedOn = time.Now()
queueNZB.LastActivity = time.Now()
queueNZB.Progress = 100
queueNZB.Percentage = 100
// Atomically: remove from queue and add to storage
ns.queue.Delete(nzoID)
if err := ns.Add(queueNZB); err != nil {
// Rollback: add back to queue if storage fails
ns.queue.Store(nzoID, queueNZB)
return fmt.Errorf("failed to store completed NZB: %w", err)
}
return nil
}
func (ns *store) refreshRclone() error {
if ns.config.RcUrl == "" {
return nil
}
client := http.DefaultClient
// Create form data
data := ns.buildRcloneRequestData()
if err := ns.sendRcloneRequest(client, "vfs/forget", data); err != nil {
ns.logger.Error().Err(err).Msg("Failed to send rclone vfs/forget request")
}
if err := ns.sendRcloneRequest(client, "vfs/refresh", data); err != nil {
ns.logger.Error().Err(err).Msg("Failed to send rclone vfs/refresh request")
}
return nil
}
func (ns *store) buildRcloneRequestData() string {
return "dir=__all__"
}
func (ns *store) sendRcloneRequest(client *http.Client, endpoint, data string) error {
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", ns.config.RcUrl, endpoint), strings.NewReader(data))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if ns.config.RcUser != "" && ns.config.RcPass != "" {
req.SetBasicAuth(ns.config.RcUser, ns.config.RcPass)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
ns.logger.Error().Err(err).Msg("Failed to close response body")
}
}(resp.Body)
if resp.StatusCode != 200 {
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
}
_, _ = io.Copy(io.Discard, resp.Body)
return nil
}

383
pkg/usenet/stream.go Normal file
View File

@@ -0,0 +1,383 @@
package usenet
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/chrisfarms/yenc"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/nntp"
"io"
"net/http"
"sync"
"time"
)
var groupCache = sync.Map{}
type Streamer struct {
logger zerolog.Logger
client *nntp.Client
store Store
cache *SegmentCache
chunkSize int
maxRetries int
retryDelayMs int
}
type segmentResult struct {
index int
data []byte
err error
}
type FlushingWriter struct {
writer io.Writer
}
func (fw *FlushingWriter) Write(data []byte) (int, error) {
if len(data) == 0 {
return 0, nil
}
written, err := fw.writer.Write(data)
if err != nil {
return written, err
}
if written != len(data) {
return written, io.ErrShortWrite
}
// Auto-flush if possible
if flusher, ok := fw.writer.(http.Flusher); ok {
flusher.Flush()
}
return written, nil
}
func (fw *FlushingWriter) WriteAndFlush(data []byte) (int64, error) {
if len(data) == 0 {
return 0, nil
}
written, err := fw.Write(data)
return int64(written), err
}
func (fw *FlushingWriter) WriteString(s string) (int, error) {
return fw.Write([]byte(s))
}
func (fw *FlushingWriter) WriteBytes(data []byte) (int, error) {
return fw.Write(data)
}
func NewStreamer(client *nntp.Client, cache *SegmentCache, store Store, chunkSize int, logger zerolog.Logger) *Streamer {
return &Streamer{
logger: logger.With().Str("component", "streamer").Logger(),
cache: cache,
store: store,
client: client,
chunkSize: chunkSize,
maxRetries: 3,
retryDelayMs: 2000,
}
}
func (s *Streamer) Stream(ctx context.Context, file *NZBFile, start, end int64, writer io.Writer) error {
if file == nil {
return fmt.Errorf("file cannot be nil")
}
if start < 0 {
start = 0
}
if err := s.getSegmentSize(ctx, file); err != nil {
return fmt.Errorf("failed to get segment size: %w", err)
}
if file.IsRarArchive {
return s.streamRarExtracted(ctx, file, start, end, writer)
}
if end >= file.Size {
end = file.Size - 1
}
if start > end {
return fmt.Errorf("invalid range: start=%d > end=%d", start, end)
}
ranges := file.GetSegmentsInRange(file.SegmentSize, start, end)
if len(ranges) == 0 {
return fmt.Errorf("no segments found for range [%d, %d]", start, end)
}
writer = &FlushingWriter{writer: writer}
return s.stream(ctx, ranges, writer)
}
func (s *Streamer) streamRarExtracted(ctx context.Context, file *NZBFile, start, end int64, writer io.Writer) error {
parser := NewRarParser(s)
return parser.ExtractFileRange(ctx, file, file.Password, start, end, writer)
}
func (s *Streamer) stream(ctx context.Context, ranges []SegmentRange, writer io.Writer) error {
chunkSize := s.chunkSize
for i := 0; i < len(ranges); i += chunkSize {
end := min(i+chunkSize, len(ranges))
chunk := ranges[i:end]
// Download chunk concurrently
results := make([]segmentResult, len(chunk))
var wg sync.WaitGroup
for j, segRange := range chunk {
wg.Add(1)
go func(idx int, sr SegmentRange) {
defer wg.Done()
data, err := s.processSegment(ctx, sr)
results[idx] = segmentResult{index: idx, data: data, err: err}
}(j, segRange)
}
wg.Wait()
// Write chunk sequentially
for j, result := range results {
if result.err != nil {
return fmt.Errorf("segment %d failed: %w", i+j, result.err)
}
if len(result.data) > 0 {
_, err := writer.Write(result.data)
if err != nil {
return err
}
}
}
}
return nil
}
func (s *Streamer) processSegment(ctx context.Context, segRange SegmentRange) ([]byte, error) {
segment := segRange.Segment
// Try cache first
if s.cache != nil {
if cached, found := s.cache.Get(segment.MessageID); found {
return s.extractRangeFromSegment(cached.Data, segRange)
}
}
// Download with retries
decodedData, err := s.downloadSegmentWithRetry(ctx, segment)
if err != nil {
return nil, fmt.Errorf("download failed: %w", err)
}
// Cache full segment for future seeks
if s.cache != nil {
s.cache.Put(segment.MessageID, decodedData, segment.Bytes)
}
// Extract the specific range from this segment
return s.extractRangeFromSegment(decodedData.Body, segRange)
}
func (s *Streamer) extractRangeFromSegment(data []byte, segRange SegmentRange) ([]byte, error) {
// Use the segment range's pre-calculated offsets
startOffset := segRange.ByteStart
endOffset := segRange.ByteEnd + 1 // ByteEnd is inclusive, we need exclusive for slicing
// Bounds check
if startOffset < 0 || startOffset >= int64(len(data)) {
return []byte{}, nil
}
if endOffset > int64(len(data)) {
endOffset = int64(len(data))
}
if startOffset >= endOffset {
return []byte{}, nil
}
// Extract the range
result := make([]byte, endOffset-startOffset)
copy(result, data[startOffset:endOffset])
return result, nil
}
func (s *Streamer) downloadSegmentWithRetry(ctx context.Context, segment NZBSegment) (*yenc.Part, error) {
var lastErr error
for attempt := 0; attempt < s.maxRetries; attempt++ {
// Check cancellation before each retry
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if attempt > 0 {
delay := time.Duration(s.retryDelayMs*(1<<(attempt-1))) * time.Millisecond
if delay > 5*time.Second {
delay = 5 * time.Second
}
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(delay):
}
}
data, err := s.downloadSegment(ctx, segment)
if err == nil {
return data, nil
}
lastErr = err
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
}
}
return nil, fmt.Errorf("segment download failed after %d attempts: %w", s.maxRetries, lastErr)
}
// Updated to work with NZBSegment from SegmentRange
func (s *Streamer) downloadSegment(ctx context.Context, segment NZBSegment) (*yenc.Part, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
downloadCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
conn, cleanup, err := s.client.GetConnection(downloadCtx)
if err != nil {
return nil, err
}
defer cleanup()
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if segment.Group != "" {
if _, exists := groupCache.Load(segment.Group); !exists {
if _, err := conn.SelectGroup(segment.Group); err != nil {
return nil, fmt.Errorf("failed to select group %s: %w", segment.Group, err)
}
groupCache.Store(segment.Group, true)
}
}
body, err := conn.GetBody(segment.MessageID)
if err != nil {
return nil, fmt.Errorf("failed to get body for message %s: %w", segment.MessageID, err)
}
if body == nil || len(body) == 0 {
return nil, fmt.Errorf("no body found for message %s", segment.MessageID)
}
data, err := nntp.DecodeYenc(bytes.NewReader(body))
if err != nil {
return nil, fmt.Errorf("failed to decode yEnc: %w", err)
}
// Adjust begin offset
data.Begin -= 1
return data, nil
}
func (s *Streamer) copySegmentData(writer io.Writer, data []byte) (int64, error) {
if len(data) == 0 {
return 0, nil
}
reader := bytes.NewReader(data)
written, err := io.CopyN(writer, reader, int64(len(data)))
if err != nil {
return 0, fmt.Errorf("copyN failed %w", err)
}
if written != int64(len(data)) {
return 0, fmt.Errorf("expected to copy %d bytes, only copied %d", len(data), written)
}
if fl, ok := writer.(http.Flusher); ok {
fl.Flush()
}
return written, nil
}
func (s *Streamer) extractRangeWithGapHandling(data []byte, segStart, segEnd int64, globalStart, globalEnd int64) ([]byte, error) {
// Calculate intersection using actual bounds
intersectionStart := max(segStart, globalStart)
intersectionEnd := min(segEnd, globalEnd+1) // +1 because globalEnd is inclusive
// No overlap
if intersectionStart >= intersectionEnd {
return []byte{}, nil
}
// Calculate offsets within the actual data
offsetInData := intersectionStart - segStart
dataLength := intersectionEnd - intersectionStart
// Bounds check
if offsetInData < 0 || offsetInData >= int64(len(data)) {
return []byte{}, nil
}
endOffset := offsetInData + dataLength
if endOffset > int64(len(data)) {
endOffset = int64(len(data))
dataLength = endOffset - offsetInData
}
if dataLength <= 0 {
return []byte{}, nil
}
// Extract the range
result := make([]byte, dataLength)
copy(result, data[offsetInData:endOffset])
return result, nil
}
func (s *Streamer) getSegmentSize(ctx context.Context, file *NZBFile) error {
if file.SegmentSize > 0 {
return nil
}
if len(file.Segments) == 0 {
return fmt.Errorf("no segments available for file %s", file.Name)
}
// Fetch the segment size and then store it in the file
firstSegment := file.Segments[0]
firstInfo, err := s.client.DownloadHeader(ctx, firstSegment.MessageID)
if err != nil {
return err
}
chunkSize := firstInfo.End - (firstInfo.Begin - 1)
if chunkSize <= 0 {
return fmt.Errorf("invalid segment size for file %s: %d", file.Name, chunkSize)
}
file.SegmentSize = chunkSize
return s.store.UpdateFile(file.NzbID, file)
}

239
pkg/usenet/types.go Normal file
View File

@@ -0,0 +1,239 @@
package usenet
import "time"
// NZB represents a torrent-like structure for NZB files
type NZB struct {
ID string `json:"id"`
Name string `json:"name"`
Title string `json:"title,omitempty"`
TotalSize int64 `json:"total_size"`
DatePosted time.Time `json:"date_posted"`
Category string `json:"category"`
Groups []string `json:"groups"`
Files []NZBFile `json:"files"`
Downloaded bool `json:"downloaded"` // Whether the NZB has been downloaded
StreamingInfo StreamingInfo `json:"streaming_info"`
AddedOn time.Time `json:"added_on"` // When the NZB was added to the system
LastActivity time.Time `json:"last_activity"` // Last activity timestamp
Status string `json:"status"` // "queued", "downloading", "completed", "failed"
Progress float64 `json:"progress"` // Percentage of download completion
Percentage float64 `json:"percentage"` // Percentage of download completion
SizeDownloaded int64 `json:"size_downloaded"` // Total size downloaded so far
ETA int64 `json:"eta"` // Estimated time of arrival in seconds
Speed int64 `json:"speed"` // Download speed in bytes per second
CompletedOn time.Time `json:"completed_on"` // When the NZB was completed
IsBad bool `json:"is_bad"`
Storage string `json:"storage"`
FailMessage string `json:"fail_message,omitempty"` // Error message if the download failed
Password string `json:"-,omitempty"` // Password for encrypted RAR files
}
// StreamingInfo contains metadata for streaming capabilities
type StreamingInfo struct {
IsStreamable bool `json:"is_streamable"`
MainFileIndex int `json:"main_file_index"` // Index of the main media file
HasParFiles bool `json:"has_par_files"`
HasRarFiles bool `json:"has_rar_files"`
TotalSegments int `json:"total_segments"`
EstimatedTime int64 `json:"estimated_time"` // Estimated download time in seconds
}
type SegmentValidationInfo struct {
ExpectedSize int64
ActualSize int64
Validated bool
}
// NZBFile represents a grouped file with its segments
type NZBFile struct {
NzbID string `json:"nzo_id"`
Name string `json:"name"`
Size int64 `json:"size"`
StartOffset int64 `json:"start_offset"` // This is useful for removing rar headers
Segments []NZBSegment `json:"segments"`
Groups []string `json:"groups"`
SegmentValidation map[string]*SegmentValidationInfo `json:"-"`
IsRarArchive bool `json:"is_rar_archive"` // Whether this file is a RAR archive that needs extraction
Password string `json:"password,omitempty"` // Password for encrypted RAR files
IsDeleted bool `json:"is_deleted"`
SegmentSize int64 `json:"segment_size,omitempty"` // Size of each segment in bytes, if applicable
}
// NZBSegment represents a segment with all necessary download info
type NZBSegment struct {
Number int `json:"number"`
MessageID string `json:"message_id"`
Bytes int64 `json:"bytes"`
StartOffset int64 `json:"start_offset"` // Byte offset within the file
EndOffset int64 `json:"end_offset"` // End byte offset within the file
Group string `json:"group"`
}
// CompactNZB is a space-optimized version of NZB for storage
type CompactNZB struct {
ID string `json:"i"`
Name string `json:"n"`
Status string `json:"s"`
Category string `json:"c"`
Size int64 `json:"sz"`
Progress float64 `json:"p"`
Speed int64 `json:"sp,omitempty"`
ETA int64 `json:"e,omitempty"`
Added int64 `json:"a"` // Unix timestamp
Modified int64 `json:"m"` // Unix timestamp
Complete int64 `json:"co,omitempty"` // Unix timestamp
Groups []string `json:"g,omitempty"`
Files []CompactFile `json:"f,omitempty"`
Storage string `json:"st,omitempty"` // Storage path
FailMessage string `json:"fm,omitempty"` // Error message if the download failed
Downloaded bool `json:"d,omitempty"`
}
// CompactFile represents a file in compact format
type CompactFile struct {
Name string `json:"n"`
Size int64 `json:"s"`
Type string `json:"t"`
Main bool `json:"m,omitempty"`
Offset int64 `json:"o"`
Segments []CompactSegment `json:"seg,omitempty"`
IsRar bool `json:"r,omitempty"`
Password string `json:"p,omitempty"`
IsDeleted bool `json:"del,omitempty"` // Whether the file is marked as deleted
ExtractedFileInfo *ExtractedFileInfo `json:"efi,omitempty"` // Pre-extracted RAR file info
SegmentSize int64 `json:"ss,omitempty"` // Size of each segment in bytes, if applicable
}
// CompactSegment represents a segment in compact format
type CompactSegment struct {
Number int `json:"n"` // Segment number
MessageID string `json:"mid"` // Message-ID of the segment
Bytes int64 `json:"b"` // Size in bytes
StartOffset int64 `json:"so"` // Start byte offset within the file
EndOffset int64 `json:"eo"` // End byte offset within the file
Group string `json:"g,omitempty"` // Group associated with this segment
}
type ExtractedFileInfo struct {
FileName string `json:"fn,omitempty"`
FileSize int64 `json:"fs,omitempty"`
ArchiveSize int64 `json:"as,omitempty"` // Total size of the RAR archive
EstimatedStartOffset int64 `json:"eso,omitempty"` // Estimated start offset in the archive
SegmentSize int64 `json:"ss,omitempty"` // Size of each segment in the archive
}
// toCompact converts NZB to compact format
func (nzb *NZB) toCompact() *CompactNZB {
compact := &CompactNZB{
ID: nzb.ID,
Name: nzb.Name,
Status: nzb.Status,
Category: nzb.Category,
Size: nzb.TotalSize,
Progress: nzb.Progress,
Speed: nzb.Speed,
ETA: nzb.ETA,
Added: nzb.AddedOn.Unix(),
Modified: nzb.LastActivity.Unix(),
Storage: nzb.Storage,
Downloaded: nzb.Downloaded,
FailMessage: nzb.FailMessage,
}
if !nzb.CompletedOn.IsZero() {
compact.Complete = nzb.CompletedOn.Unix()
}
// Only store essential groups (first 3)
if len(nzb.Groups) > 0 {
maxGroups := 3
if len(nzb.Groups) < maxGroups {
maxGroups = len(nzb.Groups)
}
compact.Groups = nzb.Groups[:maxGroups]
}
// Store only essential file info
if len(nzb.Files) > 0 {
compact.Files = make([]CompactFile, len(nzb.Files))
for i, file := range nzb.Files {
compact.Files[i] = file.toCompact()
}
}
return compact
}
// fromCompact converts compact format back to NZB
func (compact *CompactNZB) toNZB() *NZB {
nzb := &NZB{
ID: compact.ID,
Name: compact.Name,
Status: compact.Status,
Category: compact.Category,
TotalSize: compact.Size,
Progress: compact.Progress,
Percentage: compact.Progress,
Speed: compact.Speed,
ETA: compact.ETA,
Groups: compact.Groups,
AddedOn: time.Unix(compact.Added, 0),
LastActivity: time.Unix(compact.Modified, 0),
Storage: compact.Storage,
Downloaded: compact.Downloaded,
FailMessage: compact.FailMessage,
StreamingInfo: StreamingInfo{
MainFileIndex: -1,
},
}
if compact.Complete > 0 {
nzb.CompletedOn = time.Unix(compact.Complete, 0)
}
// Reconstruct files
if len(compact.Files) > 0 {
nzb.Files = make([]NZBFile, len(compact.Files))
for i, file := range compact.Files {
nzb.Files[i] = file.toNZB()
}
// Set streaming info
nzb.StreamingInfo.TotalSegments = len(compact.Files)
nzb.StreamingInfo.IsStreamable = nzb.StreamingInfo.MainFileIndex >= 0
}
return nzb
}
func (nf *NZBFile) toCompact() CompactFile {
compact := CompactFile{
Name: nf.Name,
Size: nf.Size,
Offset: nf.StartOffset,
IsRar: nf.IsRarArchive,
IsDeleted: nf.IsDeleted,
Password: nf.Password,
SegmentSize: nf.SegmentSize,
}
for _, seg := range nf.Segments {
compact.Segments = append(compact.Segments, CompactSegment(seg))
}
return compact
}
func (compact *CompactFile) toNZB() NZBFile {
f := NZBFile{
Name: compact.Name,
Size: compact.Size,
StartOffset: compact.Offset,
IsRarArchive: compact.IsRar,
Password: compact.Password,
IsDeleted: compact.IsDeleted,
SegmentSize: compact.SegmentSize,
}
for _, seg := range compact.Segments {
f.Segments = append(f.Segments, NZBSegment(seg))
}
return f
}

180
pkg/usenet/usenet.go Normal file
View File

@@ -0,0 +1,180 @@
package usenet
import (
"context"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/nntp"
"io"
"os"
)
// Usenet interface for usenet operations
type Usenet interface {
Start(ctx context.Context) error
IsReady() chan struct{}
ProcessNZB(ctx context.Context, req *ProcessRequest) (*NZB, error)
GetDownloadByteRange(nzoID string, filename string) (int64, int64, error)
Close()
Logger() zerolog.Logger
Stream(ctx context.Context, nzbID string, filename string, start, end int64, writer io.Writer) error
Store() Store
Client() *nntp.Client
}
// Client implements UsenetClient
type usenet struct {
client *nntp.Client
store Store
processor *Processor
parser *NZBParser
streamer *Streamer
cache *SegmentCache
logger zerolog.Logger
ready chan struct{}
}
// New creates a new usenet client
func New() Usenet {
cfg := config.Get()
usenetConfig := cfg.Usenet
if usenetConfig == nil || len(usenetConfig.Providers) == 0 {
// No usenet providers configured, return nil
return nil
}
_logger := logger.New("usenet")
client, err := nntp.NewClient(usenetConfig.Providers)
if err != nil {
_logger.Error().Err(err).Msg("Failed to create usenet client")
return nil
}
store := NewStore(cfg, _logger)
processor, err := NewProcessor(usenetConfig, _logger, store, client)
if err != nil {
_logger.Error().Err(err).Msg("Failed to create usenet processor")
return nil
}
// Create cache and components
cache := NewSegmentCache(_logger)
parser := NewNZBParser(client, cache, _logger)
streamer := NewStreamer(client, cache, store, usenetConfig.Chunks, _logger)
return &usenet{
store: store,
client: client,
processor: processor,
parser: parser,
streamer: streamer,
cache: cache,
logger: _logger,
ready: make(chan struct{}),
}
}
func (c *usenet) Start(ctx context.Context) error {
// Init the client
if err := c.client.InitPools(); err != nil {
c.logger.Error().Err(err).Msg("Failed to initialize usenet client pools")
return fmt.Errorf("failed to initialize usenet client pools: %w", err)
}
// Initialize the store
if err := c.store.Load(); err != nil {
c.logger.Error().Err(err).Msg("Failed to initialize usenet store")
return fmt.Errorf("failed to initialize usenet store: %w", err)
}
close(c.ready)
c.logger.Info().Msg("Usenet client initialized")
return nil
}
func (c *usenet) IsReady() chan struct{} {
return c.ready
}
func (c *usenet) Store() Store {
return c.store
}
func (c *usenet) Client() *nntp.Client {
return c.client
}
func (c *usenet) Logger() zerolog.Logger {
return c.logger
}
func (c *usenet) ProcessNZB(ctx context.Context, req *ProcessRequest) (*NZB, error) {
return c.processor.Process(ctx, req)
}
// GetNZB retrieves an NZB by ID
func (c *usenet) GetNZB(nzoID string) *NZB {
return c.store.Get(nzoID)
}
// DeleteNZB deletes an NZB
func (c *usenet) DeleteNZB(nzoID string) error {
return c.store.Delete(nzoID)
}
// PauseNZB pauses an NZB download
func (c *usenet) PauseNZB(nzoID string) error {
return c.store.UpdateStatus(nzoID, "paused")
}
// ResumeNZB resumes an NZB download
func (c *usenet) ResumeNZB(nzoID string) error {
return c.store.UpdateStatus(nzoID, "downloading")
}
func (c *usenet) Close() {
if c.store != nil {
if err := c.store.Close(); err != nil {
c.logger.Error().Err(err).Msg("Failed to close store")
}
}
c.logger.Info().Msg("Usenet client closed")
}
// GetListing returns the file listing of the NZB directory
func (c *usenet) GetListing(folder string) []os.FileInfo {
return c.store.GetListing(folder)
}
func (c *usenet) GetDownloadByteRange(nzoID string, filename string) (int64, int64, error) {
return int64(0), int64(0), nil
}
func (c *usenet) RemoveNZB(nzoID string) error {
if err := c.store.Delete(nzoID); err != nil {
return fmt.Errorf("failed to delete NZB %s: %w", nzoID, err)
}
c.logger.Info().Msgf("NZB %s deleted successfully", nzoID)
return nil
}
// Stream streams a file using the new simplified streaming system
func (c *usenet) Stream(ctx context.Context, nzbID string, filename string, start, end int64, writer io.Writer) error {
// Get NZB from store
nzb := c.GetNZB(nzbID)
if nzb == nil {
return fmt.Errorf("NZB %s not found", nzbID)
}
// Get file
file := nzb.GetFileByName(filename)
if file == nil {
return fmt.Errorf("file %s not found in NZB %s", filename, nzbID)
}
if file.NzbID == "" {
file.NzbID = nzbID // Ensure NZB ID is set for the file
}
// Stream using the new streamer
return c.streamer.Stream(ctx, file, start, end, writer)
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -400,7 +400,7 @@ class DecypharrUtils {
if (data.channel === 'beta') {
versionBadge.classList.add('badge-warning');
} else if (data.channel === 'nightly') {
} else if (data.channel === 'experimental') {
versionBadge.classList.add('badge-error');
}
}

View File

@@ -3,6 +3,7 @@ class ConfigManager {
constructor() {
this.debridCount = 0;
this.arrCount = 0;
this.usenetProviderCount = 0;
this.debridDirectoryCounts = {};
this.directoryFilterCounts = {};
@@ -11,8 +12,10 @@ class ConfigManager {
loadingOverlay: document.getElementById('loadingOverlay'),
debridConfigs: document.getElementById('debridConfigs'),
arrConfigs: document.getElementById('arrConfigs'),
usenetConfigs: document.getElementById('usenetConfigs'),
addDebridBtn: document.getElementById('addDebridBtn'),
addArrBtn: document.getElementById('addArrBtn')
addArrBtn: document.getElementById('addArrBtn'),
addUsenetBtn: document.getElementById('addUsenetBtn')
};
this.init();
@@ -40,6 +43,7 @@ class ConfigManager {
// Add buttons
this.refs.addDebridBtn.addEventListener('click', () => this.addDebridConfig());
this.refs.addArrBtn.addEventListener('click', () => this.addArrConfig());
this.refs.addUsenetBtn.addEventListener('click', () => this.addUsenetConfig());
// WebDAV toggle handlers
document.addEventListener('change', (e) => {
@@ -82,6 +86,12 @@ class ConfigManager {
config.arrs.forEach(arr => this.addArrConfig(arr));
}
// Load usenet config
this.populateUsenetSettings(config.usenet);
// Load SABnzbd config
this.populateSABnzbdSettings(config.sabnzbd);
// Load repair config
this.populateRepairSettings(config.repair);
}
@@ -139,6 +149,26 @@ class ConfigManager {
});
}
populateUsenetSettings(usenetConfig) {
if (!usenetConfig) return;
// Populate general Usenet settings
let fields = ["mount_folder", "chunks", "skip_pre_cache", "rc_url", "rc_user", "rc_pass"];
fields.forEach(field => {
const element = document.querySelector(`[name="usenet.${field}"]`);
if (element && usenetConfig[field] !== undefined) {
if (element.type === 'checkbox') {
element.checked = usenetConfig[field];
} else {
element.value = usenetConfig[field];
}
}
});
if (usenetConfig.providers && Array.isArray(usenetConfig.providers)) {
usenetConfig.providers.forEach(usenet => this.addUsenetConfig(usenet));
}
}
addDebridConfig(data = {}) {
const debridHtml = this.getDebridTemplate(this.debridCount, data);
this.refs.debridConfigs.insertAdjacentHTML('beforeend', debridHtml);
@@ -228,7 +258,7 @@ class ConfigManager {
<span class="label-text font-medium">API Key</span>
</label>
<div class="password-toggle-container">
<input type="password" class="input input-bordered input-has-toggle"
<input autocomplete="off" type="password" class="input input-bordered input-has-toggle"
name="debrid[${index}].api_key" id="debrid[${index}].api_key" required>
<button type="button" class="password-toggle-btn">
<i class="bi bi-eye" id="debrid[${index}].api_key_icon"></i>
@@ -448,7 +478,7 @@ class ConfigManager {
<span class="label-text font-medium">RC Password</span>
</label>
<div class="password-toggle-container">
<input type="password" class="input input-bordered webdav-field input-has-toggle"
<input autocomplete="off" type="password" class="input input-bordered webdav-field input-has-toggle"
name="debrid[${index}].rc_pass" id="debrid[${index}].rc_pass">
<button type="button" class="password-toggle-btn">
<i class="bi bi-eye" id="debrid[${index}].rc_pass_icon"></i>
@@ -745,9 +775,9 @@ class ConfigManager {
modal.className = 'modal';
modal.innerHTML = `
<div class="modal-box max-w-2xl">
<form method="dialog">
<div method="dialog">
<button class="btn btn-sm btn-circle btn-ghost absolute right-2 top-2">✕</button>
</form>
</div>
<h3 class="font-bold text-lg mb-4">Directory Filter Types</h3>
<div class="space-y-4">
<div>
@@ -779,7 +809,7 @@ class ConfigManager {
<li>Examples: 24h, 7d, 30d</li>
</ul>
</div>
<div class="alert alert-info">
<div class="alert alert-warning">
<i class="bi bi-info-circle"></i>
<span>Negative filters (Not...) will exclude matches instead of including them.</span>
</div>
@@ -868,7 +898,7 @@ class ConfigManager {
<span class="label-text font-medium">API Token</span>
</label>
<div class="password-toggle-container">
<input type="password" class="input input-bordered input-has-toggle ${isAutoDetected ? 'input-disabled' : ''}"
<input autocomplete="off" type="password" class="input input-bordered input-has-toggle ${isAutoDetected ? 'input-disabled' : ''}"
name="arr[${index}].token" id="arr[${index}].token"
${isAutoDetected ? 'readonly' : 'required'}>
<button type="button" class="password-toggle-btn ${isAutoDetected ? 'opacity-50 cursor-not-allowed' : ''}"
@@ -882,7 +912,7 @@ class ConfigManager {
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4 mt-4">
<div class="form-control">
<label class="label" for="arr[${index}].selected_debrid">
<span class="label-text font-medium">Preferred Debrid Service</span>
<span class="label-text font-medium">Preferred Service</span>
</label>
<select class="select select-bordered" name="arr[${index}].selected_debrid" id="arr[${index}].selected_debrid">
<option value="" selected>Auto-select</option>
@@ -890,6 +920,7 @@ class ConfigManager {
<option value="alldebrid">AllDebrid</option>
<option value="debrid_link">Debrid Link</option>
<option value="torbox">Torbox</option>
<option value="usenet">Usenet</option>
</select>
<div class="label">
<span class="label-text-alt">Which debrid service this Arr should prefer</span>
@@ -990,6 +1021,23 @@ class ConfigManager {
}
});
// Validate Usenet servers
if (config.usenet) {
config.usenet.providers.forEach((usenet, index) => {
if (!usenet.host) {
errors.push(`Usenet server #${index + 1}: Host is required`);
}
if (usenet.port && (usenet.port < 1 || usenet.port > 65535)) {
errors.push(`Usenet server #${index + 1}: Port must be between 1 and 65535`);
}
if (usenet.connections && (usenet.connections < 1 )) {
errors.push(`Usenet server #${index + 1}: Connections must be more than 0`);
}
});
}
// Validate repair settings
if (config.repair.enabled) {
if (!config.repair.interval) {
@@ -1038,6 +1086,12 @@ class ConfigManager {
// Arr configurations
arrs: this.collectArrConfigs(),
// Usenet configurations
usenet: this.collectUsenetConfig(),
// SABnzbd configuration
sabnzbd: this.collectSABnzbdConfig(),
// Repair configuration
repair: this.collectRepairConfig()
};
@@ -1153,6 +1207,211 @@ class ConfigManager {
return arrs;
}
addUsenetConfig(data = {}) {
const usenetHtml = this.getUsenetTemplate(this.usenetProviderCount, data);
this.refs.usenetConfigs.insertAdjacentHTML('beforeend', usenetHtml);
// Populate data if provided
if (Object.keys(data).length > 0) {
this.populateUsenetData(this.usenetProviderCount, data);
}
this.usenetProviderCount++;
}
populateUsenetData(index, data) {
Object.entries(data).forEach(([key, value]) => {
const input = document.querySelector(`[name="usenet[${index}].${key}"]`);
if (input) {
if (input.type === 'checkbox') {
input.checked = value;
} else {
input.value = value;
}
}
});
}
getUsenetTemplate(index, data = {}) {
return `
<div class="card bg-base-100 border border-base-300 shadow-sm usenet-config" data-index="${index}">
<div class="card-body">
<div class="flex justify-between items-start mb-4">
<h3 class="card-title text-lg">
<i class="bi bi-globe mr-2 text-info"></i>
Usenet Server #${index + 1}
</h3>
<button type="button" class="btn btn-error btn-sm" onclick="this.closest('.usenet-config').remove();">
<i class="bi bi-trash"></i>
</button>
</div>
<div class="grid grid-cols-1 lg:grid-cols-3 gap-4">
<div class="form-control">
<label class="label" for="usenet[${index}].name">
<span class="label-text font-medium">Name</span>
</label>
<input type="text" class="input input-bordered"
name="usenet[${index}].name" id="usenet[${index}].name"
placeholder="provider name, e.g easynews" required>
<div class="label">
<span class="label-text-alt">Usenet Name</span>
</div>
</div>
<div class="form-control">
<label class="label" for="usenet[${index}].host">
<span class="label-text font-medium">Host</span>
</label>
<input type="text" class="input input-bordered"
name="usenet[${index}].host" id="usenet[${index}].host"
placeholder="news.provider.com" required>
<div class="label">
<span class="label-text-alt">Usenet server hostname</span>
</div>
</div>
<div class="form-control">
<label class="label" for="usenet[${index}].port">
<span class="label-text font-medium">Port</span>
</label>
<input type="number" class="input input-bordered"
name="usenet[${index}].port" id="usenet[${index}].port"
placeholder="119" value="119" min="1" max="65535">
<div class="label">
<span class="label-text-alt">Server port (119 for standard, 563 for SSL)</span>
</div>
</div>
<div class="form-control">
<label class="label" for="usenet[${index}].connections">
<span class="label-text font-medium">Connections</span>
</label>
<input type="number" class="input input-bordered"
name="usenet[${index}].connections" id="usenet[${index}].connections"
placeholder="30" value="30" min="1" max="50">
<div class="label">
<span class="label-text-alt">Maximum simultaneous connections</span>
</div>
</div>
<div class="form-control">
<label class="label" for="usenet[${index}].username">
<span class="label-text font-medium">Username</span>
</label>
<input type="text" class="input input-bordered"
name="usenet[${index}].username" id="usenet[${index}].username">
<div class="label">
<span class="label-text-alt">Username for authentication</span>
</div>
</div>
<div class="form-control">
<label class="label" for="usenet[${index}].password">
<span class="label-text font-medium">Password</span>
</label>
<div class="password-toggle-container">
<input autocomplete="off" type="password" class="input input-bordered input-has-toggle"
name="usenet[${index}].password" id="usenet[${index}].password">
<button type="button" class="password-toggle-btn">
<i class="bi bi-eye" id="usenet[${index}].password_icon"></i>
</button>
</div>
<div class="label">
<span class="label-text-alt">Password for authentication</span>
</div>
</div>
</div>
<div class="grid grid-cols-2 lg:grid-cols-4 gap-4 mt-4">
<div class="form-control">
<label class="label cursor-pointer justify-start gap-2">
<input type="checkbox" class="checkbox"
name="usenet[${index}].ssl" id="usenet[${index}].ssl">
<span class="label-text font-medium">Use SSL</span>
</label>
<div class="label">
<span class="label-text-alt">Use SSL encryption</span>
</div>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-2">
<input type="checkbox" class="checkbox"
name="usenet[${index}].use_tls" id="usenet[${index}].use_tls">
<span class="label-text font-medium">Use TLS</span>
</label>
<div class="label">
<span class="label-text-alt">Use TLS encryption</span>
</div>
</div>
</div>
</div>
</div>
`;
}
populateSABnzbdSettings(sabnzbdConfig) {
if (!sabnzbdConfig) return;
const fields = ['download_folder', 'refresh_interval'];
fields.forEach(field => {
const element = document.querySelector(`[name="sabnzbd.${field}"]`);
if (element && sabnzbdConfig[field] !== undefined) {
if (element.type === 'checkbox') {
element.checked = sabnzbdConfig[field];
} else {
element.value = sabnzbdConfig[field];
}
}
});
const categoriesEl = document.querySelector('[name="sabnzbd.categories"]');
if (categoriesEl && sabnzbdConfig.categories) {
categoriesEl.value = sabnzbdConfig.categories.join(', ');
}
}
collectUsenetConfig() {
const providers = [];
for (let i = 0; i < this.usenetProviderCount; i++) {
const hostEl = document.querySelector(`[name="usenet[${i}].host"]`);
if (!hostEl || !hostEl.closest('.usenet-config')) continue;
const usenet = {
host: hostEl.value,
port: parseInt(document.querySelector(`[name="usenet[${i}].port"]`).value) || 119,
username: document.querySelector(`[name="usenet[${i}].username"]`).value,
password: document.querySelector(`[name="usenet[${i}].password"]`).value,
connections: parseInt(document.querySelector(`[name="usenet[${i}].connections"]`).value) || 30,
name: document.querySelector(`[name="usenet[${i}].name"]`).value,
ssl: document.querySelector(`[name="usenet[${i}].ssl"]`).checked,
use_tls: document.querySelector(`[name="usenet[${i}].use_tls"]`).checked,
};
if (usenet.host) {
providers.push(usenet);
}
}
return {
"providers": providers,
"chunks": parseInt(document.querySelector('[name="usenet.chunks"]').value) || 15,
"mount_folder": document.querySelector('[name="usenet.mount_folder"]').value,
"skip_pre_cache": document.querySelector('[name="usenet.skip_pre_cache"]').checked,
"rc_url": document.querySelector('[name="usenet.rc_url"]').value,
"rc_user": document.querySelector('[name="usenet.rc_user"]').value,
"rc_pass": document.querySelector('[name="usenet.rc_pass"]').value,
};
}
collectSABnzbdConfig() {
return {
download_folder: document.querySelector('[name="sabnzbd.download_folder"]').value,
refresh_interval: parseInt(document.querySelector('[name="sabnzbd.refresh_interval"]').value) || 15,
categories: document.querySelector('[name="sabnzbd.categories"]').value
.split(',').map(ext => ext.trim()).filter(Boolean)
};
}
collectRepairConfig() {
return {
enabled: document.querySelector('[name="repair.enabled"]').checked,

View File

@@ -1,32 +1,51 @@
// Dashboard functionality for torrent management
class TorrentDashboard {
// Dashboard functionality for torrent and NZB management
class Dashboard {
constructor() {
this.state = {
mode: 'torrents', // 'torrents' or 'nzbs'
torrents: [],
selectedTorrents: new Set(),
nzbs: [],
selectedItems: new Set(),
categories: new Set(),
filteredTorrents: [],
filteredItems: [],
selectedCategory: '',
selectedState: '',
sortBy: 'added_on',
itemsPerPage: 20,
currentPage: 1,
selectedTorrentContextMenu: null
selectedItemContextMenu: null
};
this.refs = {
torrentsList: document.getElementById('torrentsList'),
// Mode switching
torrentsMode: document.getElementById('torrentsMode'),
nzbsMode: document.getElementById('nzbsMode'),
// Table elements
dataList: document.getElementById('dataList'),
torrentsHeaders: document.getElementById('torrentsHeaders'),
nzbsHeaders: document.getElementById('nzbsHeaders'),
// Controls
categoryFilter: document.getElementById('categoryFilter'),
stateFilter: document.getElementById('stateFilter'),
sortSelector: document.getElementById('sortSelector'),
selectAll: document.getElementById('selectAll'),
selectAllNzb: document.getElementById('selectAllNzb'),
batchDeleteBtn: document.getElementById('batchDeleteBtn'),
batchDeleteDebridBtn: document.getElementById('batchDeleteDebridBtn'),
refreshBtn: document.getElementById('refreshBtn'),
// Context menus
torrentContextMenu: document.getElementById('torrentContextMenu'),
nzbContextMenu: document.getElementById('nzbContextMenu'),
// Pagination and empty state
paginationControls: document.getElementById('paginationControls'),
paginationInfo: document.getElementById('paginationInfo'),
emptyState: document.getElementById('emptyState')
emptyState: document.getElementById('emptyState'),
emptyStateTitle: document.getElementById('emptyStateTitle'),
emptyStateMessage: document.getElementById('emptyStateMessage')
};
this.init();
@@ -34,20 +53,26 @@ class TorrentDashboard {
init() {
this.bindEvents();
this.loadTorrents();
this.loadModeFromURL();
this.loadData();
this.startAutoRefresh();
}
bindEvents() {
// Mode switching
this.refs.torrentsMode.addEventListener('click', () => this.switchMode('torrents'));
this.refs.nzbsMode.addEventListener('click', () => this.switchMode('nzbs'));
// Refresh button
this.refs.refreshBtn.addEventListener('click', () => this.loadTorrents());
this.refs.refreshBtn.addEventListener('click', () => this.loadData());
// Batch delete
this.refs.batchDeleteBtn.addEventListener('click', () => this.deleteSelectedTorrents());
this.refs.batchDeleteDebridBtn.addEventListener('click', () => this.deleteSelectedTorrents(true));
this.refs.batchDeleteBtn.addEventListener('click', () => this.deleteSelectedItems());
this.refs.batchDeleteDebridBtn.addEventListener('click', () => this.deleteSelectedItems(true));
// Select all checkbox
// Select all checkboxes
this.refs.selectAll.addEventListener('change', (e) => this.toggleSelectAll(e.target.checked));
this.refs.selectAllNzb.addEventListener('change', (e) => this.toggleSelectAll(e.target.checked));
// Filters
this.refs.categoryFilter.addEventListener('change', (e) => this.setFilter('category', e.target.value));
@@ -57,18 +82,333 @@ class TorrentDashboard {
// Context menu
this.bindContextMenu();
// Torrent selection
this.refs.torrentsList.addEventListener('change', (e) => {
if (e.target.classList.contains('torrent-select')) {
this.toggleTorrentSelection(e.target.dataset.hash, e.target.checked);
// Item selection
this.refs.dataList.addEventListener('change', (e) => {
if (e.target.classList.contains('item-select')) {
this.toggleItemSelection(e.target.dataset.id, e.target.checked);
}
});
}
switchMode(mode) {
if (this.state.mode === mode) return;
this.state.mode = mode;
this.state.selectedItems.clear();
// Update URL parameter
this.updateURL(mode);
// Update button states
if (mode === 'torrents') {
this.refs.torrentsMode.classList.remove('btn-outline');
this.refs.torrentsMode.classList.add('btn-primary');
this.refs.nzbsMode.classList.remove('btn-primary');
this.refs.nzbsMode.classList.add('btn-outline');
// Show torrent headers, hide NZB headers
this.refs.torrentsHeaders.classList.remove('hidden');
this.refs.nzbsHeaders.classList.add('hidden');
// Update empty state
this.refs.emptyStateTitle.textContent = 'No Torrents Found';
this.refs.emptyStateMessage.textContent = "You haven't added any torrents yet. Start by adding your first download!";
// Show debrid batch delete button
this.refs.batchDeleteDebridBtn.classList.remove('hidden');
} else {
this.refs.nzbsMode.classList.remove('btn-outline');
this.refs.nzbsMode.classList.add('btn-primary');
this.refs.torrentsMode.classList.remove('btn-primary');
this.refs.torrentsMode.classList.add('btn-outline');
// Show NZB headers, hide torrent headers
this.refs.nzbsHeaders.classList.remove('hidden');
this.refs.torrentsHeaders.classList.add('hidden');
// Update empty state
this.refs.emptyStateTitle.textContent = 'No NZBs Found';
this.refs.emptyStateMessage.textContent = "You haven't added any NZB downloads yet. Start by adding your first NZB!";
// Hide debrid batch delete button (not relevant for NZBs)
this.refs.batchDeleteDebridBtn.classList.add('hidden');
}
// Reset filters and reload data
this.state.selectedCategory = '';
this.state.selectedState = '';
this.state.currentPage = 1;
this.refs.categoryFilter.value = '';
this.refs.stateFilter.value = '';
this.loadData();
this.updateBatchActions();
}
updateBatchActions() {
const hasSelection = this.state.selectedItems.size > 0;
// Show/hide batch delete button
if (this.refs.batchDeleteBtn) {
this.refs.batchDeleteBtn.classList.toggle('hidden', !hasSelection);
}
// Show/hide debrid batch delete button (only for torrents)
if (this.refs.batchDeleteDebridBtn) {
const showDebridButton = hasSelection && this.state.mode === 'torrents';
this.refs.batchDeleteDebridBtn.classList.toggle('hidden', !showDebridButton);
}
// Update button text with count
if (hasSelection) {
const count = this.state.selectedItems.size;
const itemType = this.state.mode === 'torrents' ? 'Torrent' : 'NZB';
const itemTypePlural = this.state.mode === 'torrents' ? 'Torrents' : 'NZBs';
if (this.refs.batchDeleteBtn) {
const deleteText = count === 1 ? `Delete ${itemType}` : `Delete ${count} ${itemTypePlural}`;
const deleteSpan = this.refs.batchDeleteBtn.querySelector('span');
if (deleteSpan) {
deleteSpan.textContent = deleteText;
}
}
if (this.refs.batchDeleteDebridBtn && this.state.mode === 'torrents') {
const debridText = count === 1 ? 'Remove From Debrid' : `Remove ${count} From Debrid`;
const debridSpan = this.refs.batchDeleteDebridBtn.querySelector('span');
if (debridSpan) {
debridSpan.textContent = debridText;
}
}
} else {
// Reset button text when no selection
if (this.refs.batchDeleteBtn) {
const deleteSpan = this.refs.batchDeleteBtn.querySelector('span');
if (deleteSpan) {
deleteSpan.textContent = 'Delete Selected';
}
}
if (this.refs.batchDeleteDebridBtn) {
const debridSpan = this.refs.batchDeleteDebridBtn.querySelector('span');
if (debridSpan) {
debridSpan.textContent = 'Remove From Debrid';
}
}
}
}
loadData() {
if (this.state.mode === 'torrents') {
this.loadTorrents();
} else {
this.loadNZBs();
}
}
async loadNZBs() {
try {
const response = await window.decypharrUtils.fetcher('/api/nzbs');
if (!response.ok) {
throw new Error('Failed to fetch NZBs');
}
const data = await response.json();
this.state.nzbs = data.nzbs || [];
this.updateCategories();
this.applyFilters();
this.renderData();
} catch (error) {
console.error('Error loading NZBs:', error);
window.decypharrUtils.createToast('Error loading NZBs', 'error');
}
}
updateCategories() {
const items = this.state.mode === 'torrents' ? this.state.torrents : this.state.nzbs;
this.state.categories = new Set(items.map(item => item.category).filter(Boolean));
}
applyFilters() {
if (this.state.mode === 'torrents') {
this.filterTorrents();
} else {
this.filterNZBs();
}
}
filterNZBs() {
let filtered = [...this.state.nzbs];
if (this.state.selectedCategory) {
filtered = filtered.filter(n => n.category === this.state.selectedCategory);
}
if (this.state.selectedState) {
filtered = filtered.filter(n => n.status === this.state.selectedState);
}
// Apply sorting
filtered.sort((a, b) => {
switch (this.state.sortBy) {
case 'added_on':
return new Date(b.added_on) - new Date(a.added_on);
case 'added_on_asc':
return new Date(a.added_on) - new Date(b.added_on);
case 'name_asc':
return a.name.localeCompare(b.name);
case 'name_desc':
return b.name.localeCompare(a.name);
case 'size_desc':
return (b.total_size || 0) - (a.total_size || 0);
case 'size_asc':
return (a.total_size || 0) - (b.total_size || 0);
case 'progress_desc':
return (b.progress || 0) - (a.progress || 0);
case 'progress_asc':
return (a.progress || 0) - (b.progress || 0);
default:
return 0;
}
});
this.state.filteredItems = filtered;
}
renderData() {
if (this.state.mode === 'torrents') {
this.renderTorrents();
} else {
this.renderNZBs();
}
}
renderNZBs() {
const startIndex = (this.state.currentPage - 1) * this.state.itemsPerPage;
const endIndex = startIndex + this.state.itemsPerPage;
const pageItems = this.state.filteredItems.slice(startIndex, endIndex);
const tbody = this.refs.dataList;
tbody.innerHTML = '';
if (pageItems.length === 0) {
this.refs.emptyState.classList.remove('hidden');
} else {
this.refs.emptyState.classList.add('hidden');
pageItems.forEach(nzb => {
const row = document.createElement('tr');
row.className = 'hover cursor-pointer';
row.setAttribute('data-id', nzb.id);
row.setAttribute('data-name', nzb.name);
row.setAttribute('data-category', nzb.category || '');
const progressPercent = Math.round(nzb.progress || 0);
const sizeFormatted = this.formatBytes(nzb.total_size || 0);
const etaFormatted = this.formatETA(nzb.eta || 0);
const ageFormatted = this.formatAge(nzb.date_posted);
const statusBadge = this.getStatusBadge(nzb.status);
row.innerHTML = `
<td class="w-12">
<label class="cursor-pointer">
<input type="checkbox" class="checkbox checkbox-sm item-select" data-id="${nzb.id}">
</label>
</td>
<td class="font-medium max-w-xs">
<div class="truncate" title="${nzb.name}">${nzb.name}</div>
</td>
<td>${sizeFormatted}</td>
<td>
<div class="flex items-center gap-2">
<div class="w-16 bg-base-300 rounded-full h-2">
<div class="bg-primary h-2 rounded-full transition-all duration-300" style="width: ${progressPercent}%"></div>
</div>
<span class="text-sm font-medium">${progressPercent}%</span>
</div>
</td>
<td>${etaFormatted}</td>
<td>
<span class="badge badge-ghost badge-sm">${nzb.category || 'N/A'}</span>
</td>
<td>${statusBadge}</td>
<td>${ageFormatted}</td>
<td>
<div class="flex gap-1">
<button class="btn btn-ghost btn-xs" onclick="window.dashboard.deleteNZB('${nzb.id}');" title="Delete">
<i class="bi bi-trash"></i>
</button>
</div>
</td>
`;
tbody.appendChild(row);
});
}
this.updatePagination();
this.updateSelectionUI();
}
getStatusBadge(status) {
const statusMap = {
'downloading': '<span class="badge badge-info badge-sm">Downloading</span>',
'completed': '<span class="badge badge-success badge-sm">Completed</span>',
'paused': '<span class="badge badge-warning badge-sm">Paused</span>',
'failed': '<span class="badge badge-error badge-sm">Failed</span>',
'queued': '<span class="badge badge-ghost badge-sm">Queued</span>',
'processing': '<span class="badge badge-info badge-sm">Processing</span>',
'verifying': '<span class="badge badge-info badge-sm">Verifying</span>',
'repairing': '<span class="badge badge-warning badge-sm">Repairing</span>',
'extracting': '<span class="badge badge-info badge-sm">Extracting</span>'
};
return statusMap[status] || '<span class="badge badge-ghost badge-sm">Unknown</span>';
}
formatETA(seconds) {
if (!seconds || seconds <= 0) return 'N/A';
const hours = Math.floor(seconds / 3600);
const minutes = Math.floor((seconds % 3600) / 60);
if (hours > 0) {
return `${hours}h ${minutes}m`;
} else {
return `${minutes}m`;
}
}
formatAge(datePosted) {
if (!datePosted) return 'N/A';
const now = new Date();
const posted = new Date(datePosted);
const diffMs = now - posted;
const diffDays = Math.floor(diffMs / (1000 * 60 * 60 * 24));
if (diffDays === 0) {
return 'Today';
} else if (diffDays === 1) {
return '1 day';
} else {
return `${diffDays} days`;
}
}
formatBytes(bytes) {
if (!bytes || bytes === 0) return '0 B';
const k = 1024;
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}
bindContextMenu() {
// Show context menu
this.refs.torrentsList.addEventListener('contextmenu', (e) => {
const row = e.target.closest('tr[data-hash]');
this.refs.dataList.addEventListener('contextmenu', (e) => {
const row = e.target.closest('tr[data-id]');
if (!row) return;
e.preventDefault();
@@ -77,12 +417,14 @@ class TorrentDashboard {
// Hide context menu
document.addEventListener('click', (e) => {
if (!this.refs.torrentContextMenu.contains(e.target)) {
const torrentMenu = this.refs.torrentContextMenu;
const nzbMenu = this.refs.nzbContextMenu;
if (!torrentMenu.contains(e.target) && !nzbMenu.contains(e.target)) {
this.hideContextMenu();
}
});
// Context menu actions
// Context menu actions for torrents
this.refs.torrentContextMenu.addEventListener('click', (e) => {
const action = e.target.closest('[data-action]')?.dataset.action;
if (action) {
@@ -90,37 +432,72 @@ class TorrentDashboard {
this.hideContextMenu();
}
});
// Context menu actions for NZBs
this.refs.nzbContextMenu.addEventListener('click', (e) => {
const action = e.target.closest('[data-action]')?.dataset.action;
if (action) {
this.handleContextAction(action);
this.hideContextMenu();
}
});
}
showContextMenu(event, row) {
this.state.selectedTorrentContextMenu = {
hash: row.dataset.hash,
name: row.dataset.name,
category: row.dataset.category || ''
};
this.refs.torrentContextMenu.querySelector('.torrent-name').textContent =
this.state.selectedTorrentContextMenu.name;
const { pageX, pageY } = event;
const { clientWidth, clientHeight } = document.documentElement;
const menu = this.refs.torrentContextMenu;
if (this.state.mode === 'torrents') {
this.state.selectedItemContextMenu = {
id: row.dataset.hash,
name: row.dataset.name,
category: row.dataset.category || '',
type: 'torrent'
};
// Position the menu
menu.style.left = `${Math.min(pageX, clientWidth - 200)}px`;
menu.style.top = `${Math.min(pageY, clientHeight - 150)}px`;
const menu = this.refs.torrentContextMenu;
menu.querySelector('.torrent-name').textContent = this.state.selectedItemContextMenu.name;
// Position the menu
menu.style.left = `${Math.min(pageX, clientWidth - 200)}px`;
menu.style.top = `${Math.min(pageY, clientHeight - 150)}px`;
menu.classList.remove('hidden');
} else {
this.state.selectedItemContextMenu = {
id: row.dataset.id,
name: row.dataset.name,
category: row.dataset.category || '',
type: 'nzb'
};
menu.classList.remove('hidden');
const menu = this.refs.nzbContextMenu;
menu.querySelector('.nzb-name').textContent = this.state.selectedItemContextMenu.name;
// Position the menu
menu.style.left = `${Math.min(pageX, clientWidth - 200)}px`;
menu.style.top = `${Math.min(pageY, clientHeight - 150)}px`;
menu.classList.remove('hidden');
}
}
hideContextMenu() {
this.refs.torrentContextMenu.classList.add('hidden');
this.state.selectedTorrentContextMenu = null;
this.refs.nzbContextMenu.classList.add('hidden');
this.state.selectedItemContextMenu = null;
}
async handleContextAction(action) {
const torrent = this.state.selectedTorrentContextMenu;
if (!torrent) return;
const item = this.state.selectedItemContextMenu;
if (!item) return;
if (item.type === 'torrent') {
await this.handleTorrentAction(action, item);
} else {
await this.handleNZBAction(action, item);
}
}
async handleTorrentAction(action, torrent) {
const actions = {
'copy-magnet': async () => {
@@ -149,6 +526,87 @@ class TorrentDashboard {
}
}
async handleNZBAction(action, nzb) {
const actions = {
'pause': async () => {
try {
const response = await window.decypharrUtils.fetcher(`/api/nzbs/${nzb.id}/pause`, {
method: 'POST'
});
if (response.ok) {
window.decypharrUtils.createToast('NZB paused successfully');
this.loadData();
} else {
throw new Error('Failed to pause NZB');
}
} catch (error) {
window.decypharrUtils.createToast('Failed to pause NZB', 'error');
}
},
'resume': async () => {
try {
const response = await window.decypharrUtils.fetcher(`/api/nzbs/${nzb.id}/resume`, {
method: 'POST'
});
if (response.ok) {
window.decypharrUtils.createToast('NZB resumed successfully');
this.loadData();
} else {
throw new Error('Failed to resume NZB');
}
} catch (error) {
window.decypharrUtils.createToast('Failed to resume NZB', 'error');
}
},
'retry': async () => {
try {
const response = await window.decypharrUtils.fetcher(`/api/nzbs/${nzb.id}/retry`, {
method: 'POST'
});
if (response.ok) {
window.decypharrUtils.createToast('NZB retry started successfully');
this.loadData();
} else {
throw new Error('Failed to retry NZB');
}
} catch (error) {
window.decypharrUtils.createToast('Failed to retry NZB', 'error');
}
},
'copy-name': async () => {
try {
await navigator.clipboard.writeText(nzb.name);
window.decypharrUtils.createToast('NZB name copied to clipboard');
} catch (error) {
window.decypharrUtils.createToast('Failed to copy NZB name', 'error');
}
},
'delete': async () => {
await this.deleteNZB(nzb.id);
}
};
if (actions[action]) {
await actions[action]();
}
}
async deleteNZB(nzbId) {
try {
const response = await window.decypharrUtils.fetcher(`/api/nzbs/${nzbId}`, {
method: 'DELETE'
});
if (response.ok) {
window.decypharrUtils.createToast('NZB deleted successfully');
this.loadData();
} else {
throw new Error('Failed to delete NZB');
}
} catch (error) {
window.decypharrUtils.createToast('Failed to delete NZB', 'error');
}
}
async loadTorrents() {
try {
// Show loading state
@@ -173,14 +631,14 @@ class TorrentDashboard {
}
updateUI() {
// Filter torrents
this.filterTorrents();
// Apply filters based on current mode
this.applyFilters();
// Update category dropdown
this.updateCategoryFilter();
// Render torrents table
this.renderTorrents();
// Render data table
this.renderData();
// Update pagination
this.updatePagination();
@@ -206,7 +664,7 @@ class TorrentDashboard {
// Sort torrents
filtered = this.sortTorrents(filtered);
this.state.filteredTorrents = filtered;
this.state.filteredItems = filtered;
}
sortTorrents(torrents) {
@@ -253,27 +711,27 @@ class TorrentDashboard {
renderTorrents() {
const startIndex = (this.state.currentPage - 1) * this.state.itemsPerPage;
const endIndex = Math.min(startIndex + this.state.itemsPerPage, this.state.filteredTorrents.length);
const pageItems = this.state.filteredTorrents.slice(startIndex, endIndex);
const endIndex = Math.min(startIndex + this.state.itemsPerPage, this.state.filteredItems.length);
const pageItems = this.state.filteredItems.slice(startIndex, endIndex);
this.refs.torrentsList.innerHTML = pageItems.map(torrent => this.torrentRowTemplate(torrent)).join('');
this.refs.dataList.innerHTML = pageItems.map(torrent => this.torrentRowTemplate(torrent)).join('');
}
torrentRowTemplate(torrent) {
const progressPercent = (torrent.progress * 100).toFixed(1);
const isSelected = this.state.selectedTorrents.has(torrent.hash);
const isSelected = this.state.selectedItems.has(torrent.hash);
let addedOn = new Date(torrent.added_on).toLocaleString();
return `
<tr data-hash="${torrent.hash}"
<tr data-id="${torrent.hash}"
data-name="${this.escapeHtml(torrent.name)}"
data-category="${torrent.category || ''}"
class="hover:bg-base-200 transition-colors">
<td>
<label class="cursor-pointer">
<input type="checkbox"
class="checkbox checkbox-sm torrent-select"
data-hash="${torrent.hash}"
class="checkbox checkbox-sm item-select"
data-id="${torrent.hash}"
${isSelected ? 'checked' : ''}>
</label>
</td>
@@ -358,13 +816,13 @@ class TorrentDashboard {
}
updatePagination() {
const totalPages = Math.ceil(this.state.filteredTorrents.length / this.state.itemsPerPage);
const totalPages = Math.ceil(this.state.filteredItems.length / this.state.itemsPerPage);
const startIndex = (this.state.currentPage - 1) * this.state.itemsPerPage;
const endIndex = Math.min(startIndex + this.state.itemsPerPage, this.state.filteredTorrents.length);
const endIndex = Math.min(startIndex + this.state.itemsPerPage, this.state.filteredItems.length);
// Update pagination info
this.refs.paginationInfo.textContent =
`Showing ${this.state.filteredTorrents.length > 0 ? startIndex + 1 : 0}-${endIndex} of ${this.state.filteredTorrents.length} torrents`;
`Showing ${this.state.filteredItems.length > 0 ? startIndex + 1 : 0}-${endIndex} of ${this.state.filteredItems.length} torrents`;
// Clear pagination controls
this.refs.paginationControls.innerHTML = '';
@@ -412,33 +870,42 @@ class TorrentDashboard {
updateSelectionUI() {
// Clean up selected torrents that no longer exist
const currentHashes = new Set(this.state.filteredTorrents.map(t => t.hash));
this.state.selectedTorrents.forEach(hash => {
const currentHashes = new Set(this.state.filteredItems.map(t => t.hash));
this.state.selectedItems.forEach(hash => {
if (!currentHashes.has(hash)) {
this.state.selectedTorrents.delete(hash);
this.state.selectedItems.delete(hash);
}
});
// Update batch delete button
this.refs.batchDeleteBtn.classList.toggle('hidden', this.state.selectedTorrents.size === 0);
this.refs.batchDeleteDebridBtn.classList.toggle('hidden', this.state.selectedTorrents.size === 0);
this.refs.batchDeleteBtn.classList.toggle('hidden', this.state.selectedItems.size === 0);
this.refs.batchDeleteDebridBtn.classList.toggle('hidden', this.state.selectedItems.size === 0);
// Update select all checkbox
const visibleTorrents = this.state.filteredTorrents.slice(
const visibleTorrents = this.state.filteredItems.slice(
(this.state.currentPage - 1) * this.state.itemsPerPage,
this.state.currentPage * this.state.itemsPerPage
);
this.refs.selectAll.checked = visibleTorrents.length > 0 &&
visibleTorrents.every(torrent => this.state.selectedTorrents.has(torrent.hash));
this.refs.selectAll.indeterminate = visibleTorrents.some(torrent => this.state.selectedTorrents.has(torrent.hash)) &&
!visibleTorrents.every(torrent => this.state.selectedTorrents.has(torrent.hash));
visibleTorrents.every(torrent => this.state.selectedItems.has(torrent.hash));
this.refs.selectAll.indeterminate = visibleTorrents.some(torrent => this.state.selectedItems.has(torrent.hash)) &&
!visibleTorrents.every(torrent => this.state.selectedItems.has(torrent.hash));
}
toggleEmptyState() {
const isEmpty = this.state.torrents.length === 0;
this.refs.emptyState.classList.toggle('hidden', !isEmpty);
document.querySelector('.card:has(#torrentsList)').classList.toggle('hidden', isEmpty);
const items = this.state.mode === 'torrents' ? this.state.torrents : this.state.nzbs;
const isEmpty = items.length === 0;
if (this.refs.emptyState) {
this.refs.emptyState.classList.toggle('hidden', !isEmpty);
}
// Find the main data table card and toggle its visibility
const dataTableCard = document.querySelector('.card:has(#dataList)');
if (dataTableCard) {
dataTableCard.classList.toggle('hidden', isEmpty);
}
}
// Event handlers
@@ -459,29 +926,30 @@ class TorrentDashboard {
}
toggleSelectAll(checked) {
const visibleTorrents = this.state.filteredTorrents.slice(
const visibleTorrents = this.state.filteredItems.slice(
(this.state.currentPage - 1) * this.state.itemsPerPage,
this.state.currentPage * this.state.itemsPerPage
);
visibleTorrents.forEach(torrent => {
if (checked) {
this.state.selectedTorrents.add(torrent.hash);
this.state.selectedItems.add(torrent.hash);
} else {
this.state.selectedTorrents.delete(torrent.hash);
this.state.selectedItems.delete(torrent.hash);
}
});
this.updateUI();
}
toggleTorrentSelection(hash, checked) {
toggleItemSelection(id, checked) {
if (checked) {
this.state.selectedTorrents.add(hash);
this.state.selectedItems.add(id);
} else {
this.state.selectedTorrents.delete(hash);
this.state.selectedItems.delete(id);
}
this.updateSelectionUI();
this.updateBatchActions();
}
async deleteTorrent(hash, category, removeFromDebrid = false) {
@@ -504,38 +972,55 @@ class TorrentDashboard {
}
}
async deleteSelectedTorrents(removeFromDebrid = false) {
const count = this.state.selectedTorrents.size;
async deleteSelectedItems(removeFromDebrid = false) {
const count = this.state.selectedItems.size;
if (count === 0) {
window.decypharrUtils.createToast('No torrents selected for deletion', 'warning');
const itemType = this.state.mode === 'torrents' ? 'torrents' : 'NZBs';
window.decypharrUtils.createToast(`No ${itemType} selected for deletion`, 'warning');
return;
}
if (!confirm(`Are you sure you want to delete ${count} torrent${count > 1 ? 's' : ''}${removeFromDebrid ? ' from debrid' : ''}?`)) {
const itemType = this.state.mode === 'torrents' ? 'torrent' : 'NZB';
const itemTypePlural = this.state.mode === 'torrents' ? 'torrents' : 'NZBs';
if (!confirm(`Are you sure you want to delete ${count} ${count > 1 ? itemTypePlural : itemType}${removeFromDebrid ? ' from debrid' : ''}?`)) {
return;
}
try {
const hashes = Array.from(this.state.selectedTorrents).join(',');
const response = await window.decypharrUtils.fetcher(
`/api/torrents/?hashes=${encodeURIComponent(hashes)}&removeFromDebrid=${removeFromDebrid}`,
{ method: 'DELETE' }
);
if (this.state.mode === 'torrents') {
const hashes = Array.from(this.state.selectedItems).join(',');
const response = await window.decypharrUtils.fetcher(
`/api/torrents/?hashes=${encodeURIComponent(hashes)}&removeFromDebrid=${removeFromDebrid}`,
{ method: 'DELETE' }
);
if (!response.ok) throw new Error(await response.text());
if (!response.ok) throw new Error(await response.text());
} else {
// Delete NZBs one by one
const promises = Array.from(this.state.selectedItems).map(id =>
window.decypharrUtils.fetcher(`/api/nzbs/${id}`, { method: 'DELETE' })
);
const responses = await Promise.all(promises);
for (const response of responses) {
if (!response.ok) throw new Error(await response.text());
}
}
window.decypharrUtils.createToast(`${count} torrent${count > 1 ? 's' : ''} deleted successfully`);
this.state.selectedTorrents.clear();
await this.loadTorrents();
window.decypharrUtils.createToast(`${count} ${count > 1 ? itemTypePlural : itemType} deleted successfully`);
this.state.selectedItems.clear();
await this.loadData();
} catch (error) {
console.error('Error deleting torrents:', error);
window.decypharrUtils.createToast(`Failed to delete some torrents: ${error.message}`, 'error');
console.error(`Error deleting ${itemTypePlural}:`, error);
window.decypharrUtils.createToast(`Failed to delete some ${itemTypePlural}: ${error.message}`, 'error');
}
}
startAutoRefresh() {
this.refreshInterval = setInterval(() => {
this.loadTorrents();
this.loadData();
}, 5000);
// Clean up on page unload
@@ -556,4 +1041,54 @@ class TorrentDashboard {
};
return text ? text.replace(/[&<>"']/g, (m) => map[m]) : '';
}
loadModeFromURL() {
const urlParams = new URLSearchParams(window.location.search);
const mode = urlParams.get('mode');
if (mode === 'nzbs' || mode === 'torrents') {
this.state.mode = mode;
} else {
this.state.mode = 'torrents'; // Default mode
}
// Set the initial UI state without triggering reload
this.setModeUI(this.state.mode);
}
setModeUI(mode) {
if (mode === 'torrents') {
this.refs.torrentsMode.classList.remove('btn-outline');
this.refs.torrentsMode.classList.add('btn-primary');
this.refs.nzbsMode.classList.remove('btn-primary');
this.refs.nzbsMode.classList.add('btn-outline');
this.refs.torrentsHeaders.classList.remove('hidden');
this.refs.nzbsHeaders.classList.add('hidden');
this.refs.emptyStateTitle.textContent = 'No Torrents Found';
this.refs.emptyStateMessage.textContent = "You haven't added any torrents yet. Start by adding your first download!";
this.refs.batchDeleteDebridBtn.classList.remove('hidden');
} else {
this.refs.nzbsMode.classList.remove('btn-outline');
this.refs.nzbsMode.classList.add('btn-primary');
this.refs.torrentsMode.classList.remove('btn-primary');
this.refs.torrentsMode.classList.add('btn-outline');
this.refs.nzbsHeaders.classList.remove('hidden');
this.refs.torrentsHeaders.classList.add('hidden');
this.refs.emptyStateTitle.textContent = 'No NZBs Found';
this.refs.emptyStateMessage.textContent = "You haven't added any NZB downloads yet. Start by adding your first NZB!";
this.refs.batchDeleteDebridBtn.classList.add('hidden');
}
}
updateURL(mode) {
const url = new URL(window.location);
url.searchParams.set('mode', mode);
window.history.replaceState({}, '', url);
}
}

View File

@@ -2,16 +2,29 @@
class DownloadManager {
constructor(downloadFolder) {
this.downloadFolder = downloadFolder;
this.currentMode = 'torrent'; // Default mode
this.refs = {
downloadForm: document.getElementById('downloadForm'),
// Mode controls
torrentMode: document.getElementById('torrentMode'),
nzbMode: document.getElementById('nzbMode'),
// Torrent inputs
magnetURI: document.getElementById('magnetURI'),
torrentFiles: document.getElementById('torrentFiles'),
torrentInputs: document.getElementById('torrentInputs'),
// NZB inputs
nzbURLs: document.getElementById('nzbURLs'),
nzbFiles: document.getElementById('nzbFiles'),
nzbInputs: document.getElementById('nzbInputs'),
// Common form elements
arr: document.getElementById('arr'),
downloadAction: document.getElementById('downloadAction'),
downloadUncached: document.getElementById('downloadUncached'),
downloadFolder: document.getElementById('downloadFolder'),
downloadFolderHint: document.getElementById('downloadFolderHint'),
debrid: document.getElementById('debrid'),
submitBtn: document.getElementById('submitDownload'),
submitButtonText: document.getElementById('submitButtonText'),
activeCount: document.getElementById('activeCount'),
completedCount: document.getElementById('completedCount'),
totalSize: document.getElementById('totalSize')
@@ -24,12 +37,17 @@ class DownloadManager {
this.loadSavedOptions();
this.bindEvents();
this.handleMagnetFromURL();
this.loadModeFromURL();
}
bindEvents() {
// Form submission
this.refs.downloadForm.addEventListener('submit', (e) => this.handleSubmit(e));
// Mode switching
this.refs.torrentMode.addEventListener('click', () => this.switchMode('torrent'));
this.refs.nzbMode.addEventListener('click', () => this.switchMode('nzb'));
// Save options on change
this.refs.arr.addEventListener('change', () => this.saveOptions());
this.refs.downloadAction.addEventListener('change', () => this.saveOptions());
@@ -38,6 +56,7 @@ class DownloadManager {
// File input enhancement
this.refs.torrentFiles.addEventListener('change', (e) => this.handleFileSelection(e));
this.refs.nzbFiles.addEventListener('change', (e) => this.handleFileSelection(e));
// Drag and drop
this.setupDragAndDrop();
@@ -48,13 +67,15 @@ class DownloadManager {
category: localStorage.getItem('downloadCategory') || '',
action: localStorage.getItem('downloadAction') || 'symlink',
uncached: localStorage.getItem('downloadUncached') === 'true',
folder: localStorage.getItem('downloadFolder') || this.downloadFolder
folder: localStorage.getItem('downloadFolder') || this.downloadFolder,
mode: localStorage.getItem('downloadMode') || 'torrent'
};
this.refs.arr.value = savedOptions.category;
this.refs.downloadAction.value = savedOptions.action;
this.refs.downloadUncached.checked = savedOptions.uncached;
this.refs.downloadFolder.value = savedOptions.folder;
this.currentMode = savedOptions.mode;
}
saveOptions() {
@@ -62,6 +83,7 @@ class DownloadManager {
localStorage.setItem('downloadAction', this.refs.downloadAction.value);
localStorage.setItem('downloadUncached', this.refs.downloadUncached.checked.toString());
localStorage.setItem('downloadFolder', this.refs.downloadFolder.value);
localStorage.setItem('downloadMode', this.currentMode);
}
handleMagnetFromURL() {
@@ -81,31 +103,57 @@ class DownloadManager {
e.preventDefault();
const formData = new FormData();
let urls = [];
let files = [];
let endpoint = '/api/add';
let itemType = 'torrent';
// Get URLs
const urls = this.refs.magnetURI.value
.split('\n')
.map(url => url.trim())
.filter(url => url.length > 0);
if (this.currentMode === 'torrent') {
// Get torrent URLs
urls = this.refs.magnetURI.value
.split('\n')
.map(url => url.trim())
.filter(url => url.length > 0);
if (urls.length > 0) {
formData.append('urls', urls.join('\n'));
}
if (urls.length > 0) {
formData.append('urls', urls.join('\n'));
}
// Get files
for (let i = 0; i < this.refs.torrentFiles.files.length; i++) {
formData.append('files', this.refs.torrentFiles.files[i]);
// Get torrent files
for (let i = 0; i < this.refs.torrentFiles.files.length; i++) {
formData.append('files', this.refs.torrentFiles.files[i]);
files.push(this.refs.torrentFiles.files[i]);
}
} else if (this.currentMode === 'nzb') {
// Get NZB URLs
urls = this.refs.nzbURLs.value
.split('\n')
.map(url => url.trim())
.filter(url => url.length > 0);
if (urls.length > 0) {
formData.append('nzbUrls', urls.join('\n'));
}
// Get NZB files
for (let i = 0; i < this.refs.nzbFiles.files.length; i++) {
formData.append('nzbFiles', this.refs.nzbFiles.files[i]);
files.push(this.refs.nzbFiles.files[i]);
}
endpoint = '/api/nzbs/add';
itemType = 'NZB';
}
// Validation
const totalItems = urls.length + this.refs.torrentFiles.files.length;
const totalItems = urls.length + files.length;
if (totalItems === 0) {
window.decypharrUtils.createToast('Please provide at least one torrent', 'warning');
window.decypharrUtils.createToast(`Please provide at least one ${itemType}`, 'warning');
return;
}
if (totalItems > 100) {
window.decypharrUtils.createToast('Please submit up to 100 torrents at a time', 'warning');
window.decypharrUtils.createToast(`Please submit up to 100 ${itemType}s at a time`, 'warning');
return;
}
@@ -123,7 +171,7 @@ class DownloadManager {
// Set loading state
window.decypharrUtils.setButtonLoading(this.refs.submitBtn, true);
const response = await window.decypharrUtils.fetcher('/api/add', {
const response = await window.decypharrUtils.fetcher(endpoint, {
method: 'POST',
body: formData,
headers: {} // Remove Content-Type to let browser set it for FormData
@@ -137,19 +185,19 @@ class DownloadManager {
// Handle partial success
if (result.errors && result.errors.length > 0) {
console.log(result.errors);
let errorMessage = ` ${result.errors.join('\n')}`;
if (result.results.length > 0) {
window.decypharrUtils.createToast(
`Added ${result.results.length} torrents with ${result.errors.length} errors`,
`Added ${result.results.length} ${itemType}s with ${result.errors.length} errors \n${errorMessage}`,
'warning'
);
this.showErrorDetails(result.errors);
} else {
window.decypharrUtils.createToast('Failed to add torrents', 'error');
this.showErrorDetails(result.errors);
window.decypharrUtils.createToast(`Failed to add ${itemType}s \n${errorMessage}`, 'error');
}
} else {
window.decypharrUtils.createToast(
`Successfully added ${result.results.length} torrent${result.results.length > 1 ? 's' : ''}!`
`Successfully added ${result.results.length} ${itemType}${result.results.length > 1 ? 's' : ''}!`
);
this.clearForm();
}
@@ -162,22 +210,49 @@ class DownloadManager {
}
}
showErrorDetails(errors) {
// Create a modal or detailed view for errors
const errorList = errors.map(error => `${error}`).join('\n');
console.error('Download errors:', errorList);
switchMode(mode) {
this.currentMode = mode;
this.saveOptions();
this.updateURL(mode);
// You could also show this in a modal for better UX
setTimeout(() => {
if (confirm('Some torrents failed to add. Would you like to see the details?')) {
alert(errorList);
}
}, 1000);
// Update button states
if (mode === 'torrent') {
this.refs.torrentMode.classList.remove('btn-outline');
this.refs.torrentMode.classList.add('btn-primary');
this.refs.nzbMode.classList.remove('btn-primary');
this.refs.nzbMode.classList.add('btn-outline');
// Show/hide sections
this.refs.torrentInputs.classList.remove('hidden');
this.refs.nzbInputs.classList.add('hidden');
// Update UI text
this.refs.submitButtonText.textContent = 'Add to Download Queue';
this.refs.downloadFolderHint.textContent = 'Leave empty to use default qBittorrent folder';
} else {
this.refs.nzbMode.classList.remove('btn-outline');
this.refs.nzbMode.classList.add('btn-primary');
this.refs.torrentMode.classList.remove('btn-primary');
this.refs.torrentMode.classList.add('btn-outline');
// Show/hide sections
this.refs.nzbInputs.classList.remove('hidden');
this.refs.torrentInputs.classList.add('hidden');
// Update UI text
this.refs.submitButtonText.textContent = 'Add to NZB Queue';
this.refs.downloadFolderHint.textContent = 'Leave empty to use default SABnzbd folder';
}
}
clearForm() {
this.refs.magnetURI.value = '';
this.refs.torrentFiles.value = '';
if (this.currentMode === 'torrent') {
this.refs.magnetURI.value = '';
this.refs.torrentFiles.value = '';
} else {
this.refs.nzbURLs.value = '';
this.refs.nzbFiles.value = '';
}
}
handleFileSelection(e) {
@@ -226,20 +301,84 @@ class DownloadManager {
const dt = e.dataTransfer;
const files = dt.files;
// Filter for .torrent files
const torrentFiles = Array.from(files).filter(file =>
file.name.toLowerCase().endsWith('.torrent')
);
if (this.currentMode === 'torrent') {
// Filter for .torrent files
const torrentFiles = Array.from(files).filter(file =>
file.name.toLowerCase().endsWith('.torrent')
);
if (torrentFiles.length > 0) {
// Create a new FileList-like object
const dataTransfer = new DataTransfer();
torrentFiles.forEach(file => dataTransfer.items.add(file));
this.refs.torrentFiles.files = dataTransfer.files;
if (torrentFiles.length > 0) {
// Create a new FileList-like object
const dataTransfer = new DataTransfer();
torrentFiles.forEach(file => dataTransfer.items.add(file));
this.refs.torrentFiles.files = dataTransfer.files;
this.handleFileSelection({ target: { files: torrentFiles } });
this.handleFileSelection({ target: { files: torrentFiles } });
} else {
window.decypharrUtils.createToast('Please drop .torrent files only', 'warning');
}
} else {
window.decypharrUtils.createToast('Please drop .torrent files only', 'warning');
// Filter for .nzb files
const nzbFiles = Array.from(files).filter(file =>
file.name.toLowerCase().endsWith('.nzb')
);
if (nzbFiles.length > 0) {
// Create a new FileList-like object
const dataTransfer = new DataTransfer();
nzbFiles.forEach(file => dataTransfer.items.add(file));
this.refs.nzbFiles.files = dataTransfer.files;
this.handleFileSelection({ target: { files: nzbFiles } });
} else {
window.decypharrUtils.createToast('Please drop .nzb files only', 'warning');
}
}
}
loadModeFromURL() {
const urlParams = new URLSearchParams(window.location.search);
const mode = urlParams.get('mode');
if (mode === 'nzb' || mode === 'torrent') {
this.currentMode = mode;
} else {
this.currentMode = this.currentMode || 'torrent'; // Use saved preference or default
}
// Initialize the mode without updating URL again
this.setModeUI(this.currentMode);
}
setModeUI(mode) {
if (mode === 'torrent') {
this.refs.torrentMode.classList.remove('btn-outline');
this.refs.torrentMode.classList.add('btn-primary');
this.refs.nzbMode.classList.remove('btn-primary');
this.refs.nzbMode.classList.add('btn-outline');
this.refs.torrentInputs.classList.remove('hidden');
this.refs.nzbInputs.classList.add('hidden');
this.refs.submitButtonText.textContent = 'Add to Download Queue';
this.refs.downloadFolderHint.textContent = 'Leave empty to use default qBittorrent folder';
} else {
this.refs.nzbMode.classList.remove('btn-outline');
this.refs.nzbMode.classList.add('btn-primary');
this.refs.torrentMode.classList.remove('btn-primary');
this.refs.torrentMode.classList.add('btn-outline');
this.refs.nzbInputs.classList.remove('hidden');
this.refs.torrentInputs.classList.add('hidden');
this.refs.submitButtonText.textContent = 'Add to NZB Queue';
this.refs.downloadFolderHint.textContent = 'Leave empty to use default SABnzbd folder';
}
}
updateURL(mode) {
const url = new URL(window.location);
url.searchParams.set('mode', mode);
window.history.replaceState({}, '', url);
}
}

View File

@@ -3,8 +3,12 @@ package web
import (
"fmt"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"io"
"mime/multipart"
"net/http"
"strings"
"sync"
"time"
"encoding/json"
@@ -28,6 +32,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
return
}
_store := store.Get()
cfg := config.Get()
results := make([]*store.ImportRequest, 0)
errs := make([]string, 0)
@@ -37,8 +42,8 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
debridName := r.FormValue("debrid")
callbackUrl := r.FormValue("callbackUrl")
downloadFolder := r.FormValue("downloadFolder")
if downloadFolder == "" {
downloadFolder = config.Get().QBitTorrent.DownloadFolder
if downloadFolder == "" && cfg.QBitTorrent != nil {
downloadFolder = cfg.QBitTorrent.DownloadFolder
}
downloadUncached := r.FormValue("downloadUncached") == "true"
@@ -236,8 +241,6 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
currentConfig.RemoveStalledAfter = updatedConfig.RemoveStalledAfter
currentConfig.AllowedExt = updatedConfig.AllowedExt
currentConfig.DiscordWebhook = updatedConfig.DiscordWebhook
// Should this be added?
currentConfig.URLBase = updatedConfig.URLBase
currentConfig.BindAddress = updatedConfig.BindAddress
currentConfig.Port = updatedConfig.Port
@@ -251,9 +254,11 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
// Update Debrids
if len(updatedConfig.Debrids) > 0 {
currentConfig.Debrids = updatedConfig.Debrids
// Clear legacy single debrid if using array
}
currentConfig.Usenet = updatedConfig.Usenet
currentConfig.SABnzbd = updatedConfig.SABnzbd
// Update Arrs through the service
storage := store.Get()
arrStorage := storage.Arr()
@@ -359,3 +364,198 @@ func (wb *Web) handleStopRepairJob(w http.ResponseWriter, r *http.Request) {
}
w.WriteHeader(http.StatusOK)
}
// NZB API Handlers
func (wb *Web) handleGetNZBs(w http.ResponseWriter, r *http.Request) {
// Get query parameters for filtering
status := r.URL.Query().Get("status")
category := r.URL.Query().Get("category")
nzbs := wb.usenet.Store().GetQueue()
// Apply filters if provided
filteredNZBs := make([]*usenet.NZB, 0)
for _, nzb := range nzbs {
if status != "" && nzb.Status != status {
continue
}
if category != "" && nzb.Category != category {
continue
}
filteredNZBs = append(filteredNZBs, nzb)
}
response := map[string]interface{}{
"nzbs": filteredNZBs,
"count": len(filteredNZBs),
}
request.JSONResponse(w, response, http.StatusOK)
}
func (wb *Web) handleDeleteNZB(w http.ResponseWriter, r *http.Request) {
nzbID := chi.URLParam(r, "id")
if nzbID == "" {
http.Error(w, "No NZB ID provided", http.StatusBadRequest)
return
}
wb.usenet.Store().RemoveFromQueue(nzbID)
wb.logger.Info().Str("nzb_id", nzbID).Msg("NZB delete requested")
request.JSONResponse(w, map[string]string{"status": "success"}, http.StatusOK)
}
func (wb *Web) handleAddNZBContent(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
cfg := config.Get()
_store := store.Get()
if err := r.ParseMultipartForm(32 << 20); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
results := make([]interface{}, 0)
errs := make([]string, 0)
arrName := r.FormValue("arr")
action := r.FormValue("action")
downloadFolder := r.FormValue("downloadFolder")
if downloadFolder == "" {
downloadFolder = cfg.SABnzbd.DownloadFolder
}
_arr := _store.Arr().Get(arrName)
if _arr == nil {
// These are not found in the config. They are throwaway arrs.
_arr = arr.New(arrName, "", "", false, false, nil, "", "")
}
_nzbURLS := r.FormValue("nzbUrls")
urlList := make([]string, 0)
if _nzbURLS != "" {
for _, u := range strings.Split(_nzbURLS, "\n") {
if trimmed := strings.TrimSpace(u); trimmed != "" {
urlList = append(urlList, trimmed)
}
}
}
files := r.MultipartForm.File["nzbFiles"]
totalItems := len(files) + len(urlList)
if totalItems == 0 {
request.JSONResponse(w, map[string]any{
"results": nil,
"errors": "No NZB URLs or files provided",
}, http.StatusBadRequest)
return
}
var wg sync.WaitGroup
for _, url := range urlList {
wg.Add(1)
go func(url string) {
defer wg.Done()
select {
case <-ctx.Done():
return // Exit if context is done
default:
}
if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") {
errs = append(errs, fmt.Sprintf("Invalid URL format: %s", url))
return
}
// Download the NZB file from the URL
filename, content, err := utils.DownloadFile(url)
if err != nil {
wb.logger.Error().Err(err).Str("url", url).Msg("Failed to download NZB from URL")
errs = append(errs, fmt.Sprintf("Failed to download NZB from URL %s: %v", url, err))
return // Continue processing other URLs
}
req := &usenet.ProcessRequest{
NZBContent: content,
Name: filename,
Arr: _arr,
Action: action,
DownloadDir: downloadFolder,
}
nzb, err := wb.usenet.ProcessNZB(ctx, req)
if err != nil {
errs = append(errs, fmt.Sprintf("Failed to process NZB from URL %s: %v", url, err))
return
}
wb.logger.Info().Str("nzb_id", nzb.ID).Str("url", url).Msg("NZB added from URL")
result := map[string]interface{}{
"id": nzb.ID,
"name": "NZB from URL",
"url": url,
"category": arrName,
}
results = append(results, result)
}(url)
}
// Handle NZB files
for _, fileHeader := range files {
wg.Add(1)
go func(fileHeader *multipart.FileHeader) {
defer wg.Done()
select {
case <-ctx.Done():
return
default:
}
file, err := fileHeader.Open()
if err != nil {
errs = append(errs, fmt.Sprintf("failed to open NZB file %s: %v", fileHeader.Filename, err))
return
}
defer file.Close()
content, err := io.ReadAll(file)
if err != nil {
errs = append(errs, fmt.Sprintf("failed to read NZB file %s: %v", fileHeader.Filename, err))
return
}
req := &usenet.ProcessRequest{
NZBContent: content,
Name: fileHeader.Filename,
Arr: _arr,
Action: action,
DownloadDir: downloadFolder,
}
nzb, err := wb.usenet.ProcessNZB(ctx, req)
if err != nil {
errs = append(errs, fmt.Sprintf("failed to process NZB file %s: %v", fileHeader.Filename, err))
return
}
wb.logger.Info().Str("nzb_id", nzb.ID).Str("file", fileHeader.Filename).Msg("NZB added from file")
// Simulate successful addition
result := map[string]interface{}{
"id": nzb.ID,
"name": fileHeader.Filename,
"filename": fileHeader.Filename,
"category": arrName,
}
results = append(results, result)
}(fileHeader)
}
// Wait for all goroutines to finish
wg.Wait()
// Validation
if len(results) == 0 && len(errs) == 0 {
request.JSONResponse(w, map[string]any{
"results": nil,
"errors": "No NZB URLs or files processed successfully",
}, http.StatusBadRequest)
return
}
request.JSONResponse(w, struct {
Results []interface{} `json:"results"`
Errors []string `json:"errors,omitempty"`
}{
Results: results,
Errors: errs,
}, http.StatusOK)
}

View File

@@ -47,6 +47,9 @@ func (wb *Web) Routes() http.Handler {
r.Get("/torrents", wb.handleGetTorrents)
r.Delete("/torrents/{category}/{hash}", wb.handleDeleteTorrent)
r.Delete("/torrents/", wb.handleDeleteTorrents)
r.Get("/nzbs", wb.handleGetNZBs)
r.Post("/nzbs/add", wb.handleAddNZBContent)
r.Delete("/nzbs/{id}", wb.handleDeleteNZB)
r.Get("/config", wb.handleGetConfig)
r.Post("/config", wb.handleUpdateConfig)
})

View File

@@ -24,6 +24,14 @@
<i class="bi bi-collection text-lg"></i>
<span class="hidden sm:inline">*Arrs</span>
</button>
<button type="button" class="tab-button flex items-center gap-2 py-3 px-1 border-b-2 border-transparent text-base-content/70 hover:text-base-content hover:border-base-300 font-medium text-sm transition-colors" data-tab="usenet">
<i class="bi bi-globe text-lg"></i>
<span class="hidden sm:inline">Usenet</span>
</button>
<button type="button" class="tab-button flex items-center gap-2 py-3 px-1 border-b-2 border-transparent text-base-content/70 hover:text-base-content hover:border-base-300 font-medium text-sm transition-colors" data-tab="sabnzbd">
<i class="bi bi-download text-lg"></i>
<span class="hidden sm:inline">SABnzbd</span>
</button>
<button type="button" class="tab-button flex items-center gap-2 py-3 px-1 border-b-2 border-transparent text-base-content/70 hover:text-base-content hover:border-base-300 font-medium text-sm transition-colors" data-tab="repair">
<i class="bi bi-wrench text-lg"></i>
<span class="hidden sm:inline">Repair</span>
@@ -328,6 +336,146 @@
</div>
</div>
<!-- Usenet Tab Content -->
<div class="tab-content hidden" data-tab-content="usenet">
<div class="space-y-6">
<h2 class="text-2xl font-bold flex items-center mb-6">
<i class="bi bi-globe mr-3 text-info"></i>Usenet Settings
</h2>
<!-- Global Usenet Settings -->
<div class="card bg-base-100 border border-base-300 shadow-sm">
<div class="card-body">
<h3 class="card-title text-lg mb-4">
<i class="bi bi-folder mr-2 text-info"></i>
Main Settings
</h3>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="form-control">
<label class="label" for="usenet.mount_folder">
<span class="label-text font-medium">Mount Folder</span>
</label>
<input type="text" class="input input-bordered"
name="usenet.mount_folder" id="usenet.mount_folder"
placeholder="/mnt/usenet">
<div class="label">
<span class="label-text-alt">Path where usenet downloads are mounted</span>
</div>
</div>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="form-control">
<label class="label" for="usenet.chunks">
<span class="label-text font-medium">Download Chunks</span>
</label>
<input type="text" class="input input-bordered"
name="usenet.chunks" id="usenet.chunks"
placeholder="30">
<div class="label">
<span class="label-text-alt">Number of chunks to pre-cache(default 5)</span>
</div>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="usenet.skip_pre_cache" id="usenet.skip_pre_cache">
<div>
<span class="label-text font-medium">Skip Pre-Cache</span>
<div class="label-text-alt">Disabling this speeds up import</div>
</div>
</label>
</div>
</div>
</div>
<div class="grid grid-cols-1 lg:grid-cols-3 gap-4">
<div class="form-control">
<label class="label" for="usenet.rc_url">
<span class="label-text font-medium">Rclone RC URL</span>
</label>
<input type="text" class="input input-bordered"
name="usenet.rc_url" id="usenet.rc_url"
placeholder="http://rclone-usenet:9990">
<div class="label">
<span class="label-text-alt">Rclone RC URL</span>
</div>
</div>
<div class="form-control">
<label class="label" for="usenet.rc_user">
<span class="label-text font-medium">Rclone RC Username</span>
</label>
<input type="text" class="input input-bordered"
name="usenet.rc_user" id="usenet.rc_user"
placeholder="rcuser">
<div class="label">
<span class="label-text-alt">Rclone RC Username</span>
</div>
</div>
<div class="form-control">
<label class="label" for="usenet.rc_pass">
<span class="label-text font-medium">Rclone RC Password</span>
</label>
<div class="password-toggle-container">
<input autocomplete="off" type="password" class="input input-bordered webdav-field input-has-toggle"
name="usenet.rc_pass" id="usenet.rc_pass">
<button type="button" class="password-toggle-btn">
<i class="bi bi-eye" id="usenet.rc_pass_icon"></i>
</button>
</div>
</div>
</div>
</div>
</div>
<!-- Usenet Servers Section -->
<div class="flex justify-between items-center">
<h3 class="text-xl font-bold flex items-center">
<i class="bi bi-server mr-2 text-info"></i>Usenet Servers
</h3>
<button type="button" id="addUsenetBtn" class="btn btn-info">
<i class="bi bi-plus mr-2"></i>Add Usenet Server
</button>
</div>
<div id="usenetConfigs" class="space-y-4">
<!-- Dynamic usenet configurations will be added here -->
</div>
</div>
</div>
<!-- SABnzbd Tab Content -->
<div class="tab-content hidden" data-tab-content="sabnzbd">
<div class="space-y-6">
<h2 class="text-2xl font-bold flex items-center mb-6">
<i class="bi bi-download mr-3 text-accent"></i>SABnzbd Settings
</h2>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="form-control">
<label class="label" for="sabnzbd.download_folder">
<span class="label-text font-medium">Download Folder</span>
</label>
<input type="text" class="input input-bordered" name="sabnzbd.download_folder" id="sabnzbd.download_folder" placeholder="/downloads/sabnzbd">
<div class="label">
<span class="label-text-alt">Folder where SABnzbd downloads files</span>
</div>
</div>
<div class="form-control">
<label class="label" for="sabnzbd.refresh_interval">
<span class="label-text font-medium">Refresh Interval (seconds)</span>
</label>
<input type="number" class="input input-bordered" name="sabnzbd.refresh_interval" id="sabnzbd.refresh_interval" min="1" max="3600">
</div>
<div class="form-control">
<label class="label" for="sabnzbd.categories">
<span class="label-text font-medium">Default Categories</span>
</label>
<input type="text" class="input input-bordered" name="sabnzbd.categories" id="sabnzbd.categories">
</div>
</div>
</div>
</div>
</div> <!-- End tab-content-container -->
</div>
</div>

View File

@@ -4,8 +4,20 @@
<div class="card bg-base-100 shadow-xl">
<div class="card-body">
<form id="downloadForm" enctype="multipart/form-data" class="space-y-3">
<!-- Mode Selection -->
<div class="flex justify-center mb-4">
<div class="join">
<button type="button" class="btn btn-primary join-item" id="torrentMode" data-mode="torrent">
<i class="bi bi-magnet mr-2"></i>Torrents
</button>
<button type="button" class="btn btn-outline join-item" id="nzbMode" data-mode="nzb">
<i class="bi bi-file-zip mr-2"></i>NZBs
</button>
</div>
</div>
<!-- Torrent Input Section -->
<div class="space-y-2">
<div class="space-y-2" id="torrentInputs">
<div class="form-control">
<label class="label" for="magnetURI">
<span class="label-text font-semibold">
@@ -42,6 +54,44 @@
</div>
</div>
<!-- NZB Input Section -->
<div class="space-y-2 hidden" id="nzbInputs">
<div class="form-control">
<label class="label" for="nzbURLs">
<span class="label-text font-semibold">
<i class="bi bi-link-45deg mr-2 text-primary"></i>NZB URLs
</span>
<span class="label-text-alt">Paste NZB download URLs</span>
</label>
<textarea class="textarea textarea-bordered h-32 font-mono text-sm"
id="nzbURLs"
name="nzbUrls"
placeholder="Paste your NZB URLs here, one per line..."></textarea>
</div>
<div class="divider">OR</div>
<div class="form-control">
<label class="label">
<span class="label-text font-semibold">
<i class="bi bi-file-earmark-arrow-up mr-2 text-secondary"></i>Upload NZB Files
</span>
<span class="label-text-alt">Select .nzb files</span>
</label>
<input type="file"
class="file-input file-input-bordered w-full"
id="nzbFiles"
name="nzbs"
multiple
accept=".nzb">
<div class="label">
<span class="label-text-alt">
<i class="bi bi-info-circle mr-1"></i>You can select multiple files at once
</span>
</div>
</div>
</div>
<div class="divider"></div>
<!-- Configuration Section -->
@@ -75,7 +125,7 @@
name="downloadFolder"
placeholder="/downloads/torrents">
<div class="label">
<span class="label-text-alt">Leave empty to use default qBittorrent folder</span>
<span class="label-text-alt" id="downloadFolderHint">Leave empty to use default qBittorrent folder</span>
</div>
</div>
</div>
@@ -131,7 +181,7 @@
<!-- Submit Button -->
<div class="form-control">
<button type="submit" class="btn btn-primary btn-lg" id="submitDownload">
<i class="bi bi-cloud-upload mr-2"></i>Add to Download Queue
<i class="bi bi-cloud-upload mr-2"></i><span id="submitButtonText">Add to Download Queue</span>
</button>
</div>
</form>

View File

@@ -4,6 +4,18 @@
<!-- Controls Section -->
<div class="card bg-base-100 shadow-xl">
<div class="card-body">
<!-- Dashboard Mode Toggle -->
<div class="flex justify-center mb-4">
<div class="join">
<button class="btn btn-primary join-item" id="torrentsMode" data-mode="torrents">
<i class="bi bi-magnet mr-2"></i>Torrents
</button>
<button class="btn btn-outline join-item" id="nzbsMode" data-mode="nzbs">
<i class="bi bi-file-zip mr-2"></i>NZBs
</button>
</div>
</div>
<div class="flex flex-col lg:flex-row justify-between items-start lg:items-center gap-4">
<!-- Batch Actions -->
<div class="flex items-center gap-2">
@@ -47,12 +59,13 @@
</div>
</div>
<!-- Torrents Table -->
<!-- Data Table -->
<div class="card bg-base-100 shadow-xl">
<div class="card-body p-0">
<div class="overflow-x-auto">
<table class="table table-hover">
<thead class="bg-base-200">
<!-- Torrents Headers -->
<thead class="bg-base-200" id="torrentsHeaders">
<tr>
<th class="w-12">
<label class="cursor-pointer">
@@ -86,7 +99,41 @@
<th class="font-semibold w-32">Actions</th>
</tr>
</thead>
<tbody id="torrentsList">
<!-- NZBs Headers -->
<thead class="bg-base-200 hidden" id="nzbsHeaders">
<tr>
<th class="w-12">
<label class="cursor-pointer">
<input type="checkbox" class="checkbox checkbox-sm" id="selectAllNzb">
</label>
</th>
<th class="font-semibold">
<i class="bi bi-file-zip mr-2"></i>Name
</th>
<th class="font-semibold">
<i class="bi bi-hdd mr-2"></i>Size
</th>
<th class="font-semibold">
<i class="bi bi-speedometer2 mr-2"></i>Progress
</th>
<th class="font-semibold">
<i class="bi bi-clock mr-2"></i>ETA
</th>
<th class="font-semibold">
<i class="bi bi-tag mr-2"></i>Category
</th>
<th class="font-semibold">
<i class="bi bi-activity mr-2"></i>Status
</th>
<th class="font-semibold">
<i class="bi bi-calendar mr-2"></i>Age
</th>
<th class="font-semibold w-32">Actions</th>
</tr>
</thead>
<tbody id="dataList">
<!-- Dynamic content will be loaded here -->
</tbody>
</table>
@@ -95,7 +142,7 @@
<!-- Pagination -->
<div class="flex flex-col sm:flex-row justify-between items-center p-6 border-t border-base-200 gap-4">
<div class="text-sm text-base-content/70">
<span id="paginationInfo">Loading torrents...</span>
<span id="paginationInfo">Loading data...</span>
</div>
<div class="join" id="paginationControls"></div>
</div>
@@ -108,8 +155,8 @@
<div class="text-6xl text-base-content/30 mb-4">
<i class="bi bi-inbox"></i>
</div>
<h3 class="text-2xl font-bold mb-2">No Torrents Found</h3>
<p class="text-base-content/70 mb-6">You haven't added any torrents yet. Start by adding your first download!</p>
<h3 class="text-2xl font-bold mb-2" id="emptyStateTitle">No Data Found</h3>
<p class="text-base-content/70 mb-6" id="emptyStateMessage">No downloads found.</p>
<a href="{{.URLBase}}download" class="btn btn-primary">
<i class="bi bi-plus-circle mr-2"></i>Add New Download
</a>
@@ -117,7 +164,7 @@
</div>
</div>
<!-- Context Menu -->
<!-- Torrent Context Menu -->
<ul class="menu bg-base-100 shadow-lg rounded-box context-menu hidden fixed z-50" id="torrentContextMenu">
<li class="menu-title">
<span class="torrent-name text-sm font-bold truncate max-w-48"></span>
@@ -135,9 +182,33 @@
</a></li>
</ul>
<!-- NZB Context Menu -->
<ul class="menu bg-base-100 shadow-lg rounded-box context-menu hidden fixed z-50" id="nzbContextMenu">
<li class="menu-title">
<span class="nzb-name text-sm font-bold truncate max-w-48"></span>
</li>
<hr/>
<li><a class="menu-item text-sm" data-action="pause">
<i class="bi bi-pause text-warning"></i>Pause Download
</a></li>
<li><a class="menu-item text-sm" data-action="resume">
<i class="bi bi-play text-success"></i>Resume Download
</a></li>
<li><a class="menu-item text-sm" data-action="retry">
<i class="bi bi-arrow-clockwise text-info"></i>Retry Download
</a></li>
<li><a class="menu-item text-sm" data-action="copy-name">
<i class="bi bi-clipboard text-info"></i>Copy Name
</a></li>
<hr/>
<li><a class="menu-item text-sm text-error" data-action="delete">
<i class="bi bi-trash"></i>Delete NZB
</a></li>
</ul>
<script>
document.addEventListener('DOMContentLoaded', () => {
window.dashboard = new TorrentDashboard();
window.dashboard = new Dashboard();
});
</script>
{{ end }}

View File

@@ -126,13 +126,17 @@ func (wb *Web) DownloadHandler(w http.ResponseWriter, r *http.Request) {
for _, d := range cfg.Debrids {
debrids = append(debrids, d.Name)
}
downloadFolder := ""
if cfg.QBitTorrent != nil {
downloadFolder = cfg.QBitTorrent.DownloadFolder
}
data := map[string]interface{}{
"URLBase": cfg.URLBase,
"Page": "download",
"Title": "Download",
"Debrids": debrids,
"HasMultiDebrid": len(debrids) > 1,
"DownloadFolder": cfg.QBitTorrent.DownloadFolder,
"DownloadFolder": downloadFolder,
}
_ = wb.templates.ExecuteTemplate(w, "layout", data)
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"html/template"
"os"
)
@@ -61,9 +62,10 @@ type Web struct {
cookie *sessions.CookieStore
templates *template.Template
torrents *store.TorrentStorage
usenet usenet.Usenet
}
func New() *Web {
func New(usenet usenet.Usenet) *Web {
templates := template.Must(template.ParseFS(
content,
"templates/layout.html",
@@ -86,5 +88,6 @@ func New() *Web {
templates: templates,
cookie: cookieStore,
torrents: store.Get().Torrents(),
usenet: usenet,
}
}

View File

@@ -1,472 +1,8 @@
package webdav
import (
"crypto/tls"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
)
var streamingTransport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
MaxIdleConns: 200,
MaxIdleConnsPerHost: 100,
MaxConnsPerHost: 200,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second, // give the upstream a minute to send headers
ExpectContinueTimeout: 1 * time.Second,
DisableKeepAlives: true, // close after each request
ForceAttemptHTTP2: false, // dont speak HTTP/2
// this line is what truly blocks HTTP/2:
TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
}
var sharedClient = &http.Client{
Transport: streamingTransport,
Timeout: 0,
}
type streamError struct {
Err error
StatusCode int
IsClientDisconnection bool
}
func (e *streamError) Error() string {
return e.Err.Error()
}
func (e *streamError) Unwrap() error {
return e.Err
}
type File struct {
name string
torrentName string
link string
downloadLink string
size int64
isDir bool
fileId string
isRar bool
metadataOnly bool
content []byte
children []os.FileInfo // For directories
cache *store.Cache
modTime time.Time
// Minimal state for interface compliance only
readOffset int64 // Only used for Read() method compliance
}
// File interface implementations for File
func (f *File) Close() error {
if f.isDir {
return nil // No resources to close for directories
}
// For files, we don't have any resources to close either
// This is just to satisfy the os.File interface
f.content = nil
f.children = nil
f.downloadLink = ""
f.readOffset = 0
return nil
}
func (f *File) getDownloadLink() (string, error) {
// Check if we already have a final URL cached
if f.downloadLink != "" && isValidURL(f.downloadLink) {
return f.downloadLink, nil
}
downloadLink, err := f.cache.GetDownloadLink(f.torrentName, f.name, f.link)
if err != nil {
return "", err
}
if downloadLink != "" && isValidURL(downloadLink) {
f.downloadLink = downloadLink
return downloadLink, nil
}
return "", os.ErrNotExist
}
func (f *File) getDownloadByteRange() (*[2]int64, error) {
byteRange, err := f.cache.GetDownloadByteRange(f.torrentName, f.name)
if err != nil {
return nil, err
}
return byteRange, nil
}
func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) error {
content := f.content
size := int64(len(content))
// Handle range requests for preloaded content
if rangeHeader := r.Header.Get("Range"); rangeHeader != "" {
ranges, err := parseRange(rangeHeader, size)
if err != nil || len(ranges) != 1 {
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
start, end := ranges[0].start, ranges[0].end
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, size))
w.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusPartialContent)
_, err = w.Write(content[start : end+1])
return err
}
// Full content
w.Header().Set("Content-Length", fmt.Sprintf("%d", size))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusOK)
_, err := w.Write(content)
return err
}
func (f *File) StreamResponse(w http.ResponseWriter, r *http.Request) error {
// Handle preloaded content files
if f.content != nil {
return f.servePreloadedContent(w, r)
}
// Try streaming with retry logic
return f.streamWithRetry(w, r, 0)
}
func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCount int) error {
const maxRetries = 3
_log := f.cache.Logger()
// Get download link (with caching optimization)
downloadLink, err := f.getDownloadLink()
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusPreconditionFailed}
}
if downloadLink == "" {
return &streamError{Err: fmt.Errorf("empty download link"), StatusCode: http.StatusNotFound}
}
// Create upstream request with streaming optimizations
upstreamReq, err := http.NewRequest("GET", downloadLink, nil)
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusInternalServerError}
}
setVideoStreamingHeaders(upstreamReq)
// Handle range requests (critical for video seeking)
isRangeRequest := f.handleRangeRequest(upstreamReq, r, w)
if isRangeRequest == -1 {
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
resp, err := sharedClient.Do(upstreamReq)
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusServiceUnavailable}
}
defer resp.Body.Close()
// Handle upstream errors with retry logic
shouldRetry, retryErr := f.handleUpstream(resp, retryCount, maxRetries)
if shouldRetry && retryCount < maxRetries {
// Retry with new download link
_log.Debug().
Int("retry_count", retryCount+1).
Str("file", f.name).
Msg("Retrying stream request")
return f.streamWithRetry(w, r, retryCount+1)
}
if retryErr != nil {
return retryErr
}
setVideoResponseHeaders(w, resp, isRangeRequest == 1)
return f.streamBuffer(w, resp.Body)
}
func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader) error {
flusher, ok := w.(http.Flusher)
if !ok {
return fmt.Errorf("response does not support flushing")
}
smallBuf := make([]byte, 64*1024) // 64 KB
if n, err := src.Read(smallBuf); n > 0 {
if _, werr := w.Write(smallBuf[:n]); werr != nil {
return werr
}
flusher.Flush()
} else if err != nil && err != io.EOF {
return err
}
buf := make([]byte, 256*1024) // 256 KB
for {
n, readErr := src.Read(buf)
if n > 0 {
if _, writeErr := w.Write(buf[:n]); writeErr != nil {
if isClientDisconnection(writeErr) {
return &streamError{Err: writeErr, StatusCode: 0, IsClientDisconnection: true}
}
return writeErr
}
flusher.Flush()
}
if readErr != nil {
if readErr == io.EOF {
return nil
}
if isClientDisconnection(readErr) {
return &streamError{Err: readErr, StatusCode: 0, IsClientDisconnection: true}
}
return readErr
}
}
}
func (f *File) handleUpstream(resp *http.Response, retryCount, maxRetries int) (shouldRetry bool, err error) {
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent {
return false, nil
}
_log := f.cache.Logger()
// Clean up response body properly
cleanupResp := func(resp *http.Response) {
if resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
switch resp.StatusCode {
case http.StatusServiceUnavailable:
// Read the body to check for specific error messages
body, readErr := io.ReadAll(resp.Body)
cleanupResp(resp)
if readErr != nil {
_log.Error().Err(readErr).Msg("Failed to read response body")
return false, &streamError{
Err: fmt.Errorf("failed to read error response: %w", readErr),
StatusCode: http.StatusServiceUnavailable,
}
}
bodyStr := string(body)
if strings.Contains(bodyStr, "you have exceeded your traffic") {
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Bandwidth exceeded. Marking link as invalid")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "bandwidth_exceeded")
// Retry with a different API key if available and we haven't exceeded retries
if retryCount < maxRetries {
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("bandwidth exceeded after %d retries", retryCount),
StatusCode: http.StatusServiceUnavailable,
}
}
return false, &streamError{
Err: fmt.Errorf("service unavailable: %s", bodyStr),
StatusCode: http.StatusServiceUnavailable,
}
case http.StatusNotFound:
cleanupResp(resp)
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Link not found (404). Marking link as invalid and regenerating")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "link_not_found")
// Try to regenerate download link if we haven't exceeded retries
if retryCount < maxRetries {
// Clear cached link to force regeneration
f.downloadLink = ""
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("file not found after %d retries", retryCount),
StatusCode: http.StatusNotFound,
}
default:
body, _ := io.ReadAll(resp.Body)
cleanupResp(resp)
_log.Error().
Int("status_code", resp.StatusCode).
Str("file", f.name).
Str("response_body", string(body)).
Msg("Unexpected upstream error")
return false, &streamError{
Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)),
StatusCode: http.StatusBadGateway,
}
}
}
func (f *File) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w http.ResponseWriter) int {
rangeHeader := r.Header.Get("Range")
if rangeHeader == "" {
// For video files, apply byte range if exists
if byteRange, _ := f.getDownloadByteRange(); byteRange != nil {
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", byteRange[0], byteRange[1]))
}
return 0 // No range request
}
// Parse range request
ranges, err := parseRange(rangeHeader, f.size)
if err != nil || len(ranges) != 1 {
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", f.size))
return -1 // Invalid range
}
// Apply byte range offset if exists
byteRange, _ := f.getDownloadByteRange()
start, end := ranges[0].start, ranges[0].end
if byteRange != nil {
start += byteRange[0]
end += byteRange[0]
}
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
return 1 // Valid range request
}
/*
These are the methods that implement the os.File interface for the File type.
Only Stat and ReadDir are used
*/
func (f *File) Stat() (os.FileInfo, error) {
if f.isDir {
return &FileInfo{
name: f.name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: f.modTime,
isDir: true,
}, nil
}
return &FileInfo{
name: f.name,
size: f.size,
mode: 0644,
modTime: f.modTime,
isDir: false,
}, nil
}
func (f *File) Read(p []byte) (n int, err error) {
if f.isDir {
return 0, os.ErrInvalid
}
if f.metadataOnly {
return 0, io.EOF
}
// For preloaded content files (like version.txt)
if f.content != nil {
if f.readOffset >= int64(len(f.content)) {
return 0, io.EOF
}
n = copy(p, f.content[f.readOffset:])
f.readOffset += int64(n)
return n, nil
}
// For streaming files, return an error to force use of StreamResponse
return 0, fmt.Errorf("use StreamResponse method for streaming files")
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
if f.isDir {
return 0, os.ErrInvalid
}
// Only handle seeking for preloaded content
if f.content != nil {
newOffset := f.readOffset
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset += offset
case io.SeekEnd:
newOffset = int64(len(f.content)) + offset
default:
return 0, os.ErrInvalid
}
if newOffset < 0 {
newOffset = 0
}
if newOffset > int64(len(f.content)) {
newOffset = int64(len(f.content))
}
f.readOffset = newOffset
return f.readOffset, nil
}
// For streaming files, return error to force use of StreamResponse
return 0, fmt.Errorf("use StreamResponse method for streaming files")
}
func (f *File) Write(p []byte) (n int, err error) {
return 0, os.ErrPermission
}
func (f *File) Readdir(count int) ([]os.FileInfo, error) {
if !f.isDir {
return nil, os.ErrInvalid
}
if count <= 0 {
return f.children, nil
}
if len(f.children) == 0 {
return nil, io.EOF
}
if count > len(f.children) {
count = len(f.children)
}
files := f.children[:count]
f.children = f.children[count:]
return files, nil
type File interface {
Name() string
Size() int64
IsDir() bool
ModTime() string
}

View File

@@ -240,3 +240,28 @@ func setVideoResponseHeaders(w http.ResponseWriter, resp *http.Response, isRange
w.WriteHeader(resp.StatusCode)
}
func getContentType(fileName string) string {
contentType := "application/octet-stream"
// Determine content type based on file extension
switch {
case strings.HasSuffix(fileName, ".mp4"):
contentType = "video/mp4"
case strings.HasSuffix(fileName, ".mkv"):
contentType = "video/x-matroska"
case strings.HasSuffix(fileName, ".avi"):
contentType = "video/x-msvideo"
case strings.HasSuffix(fileName, ".mov"):
contentType = "video/quicktime"
case strings.HasSuffix(fileName, ".m4v"):
contentType = "video/x-m4v"
case strings.HasSuffix(fileName, ".ts"):
contentType = "video/mp2t"
case strings.HasSuffix(fileName, ".srt"):
contentType = "application/x-subrip"
case strings.HasSuffix(fileName, ".vtt"):
contentType = "text/vtt"
}
return contentType
}

View File

@@ -2,6 +2,7 @@ package webdav
import (
"context"
"github.com/rs/zerolog"
"github.com/stanNthe5/stringbuf"
"net/http"
"os"
@@ -18,7 +19,7 @@ const (
metadataOnlyKey contextKey = "metadataOnly"
)
func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
func handlePropfind(h Handler, logger zerolog.Logger, w http.ResponseWriter, r *http.Request) {
// Setup context for metadata only
ctx := context.WithValue(r.Context(), metadataOnlyKey, true)
r = r.WithContext(ctx)
@@ -37,7 +38,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
// Always include the resource itself
f, err := h.OpenFile(r.Context(), cleanPath, os.O_RDONLY, 0)
if err != nil {
h.logger.Error().Err(err).Str("path", cleanPath).Msg("Failed to open file")
logger.Error().Err(err).Str("path", cleanPath).Msg("Failed to open file")
http.NotFound(w, r)
return
}
@@ -45,14 +46,14 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
fi, err := f.Stat()
if err != nil {
h.logger.Error().Err(err).Msg("Failed to stat file")
logger.Error().Err(err).Msg("Failed to stat file")
http.Error(w, "Server Error", http.StatusInternalServerError)
return
}
var rawEntries []os.FileInfo
if fi.IsDir() {
rawEntries = append(rawEntries, h.getChildren(cleanPath)...)
rawEntries = append(rawEntries, h.GetChildren(cleanPath)...)
}
entries := make([]entry, 0, len(rawEntries)+1)

472
pkg/webdav/torrent_file.go Normal file
View File

@@ -0,0 +1,472 @@
package webdav
import (
"crypto/tls"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
)
var streamingTransport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
MaxIdleConns: 200,
MaxIdleConnsPerHost: 100,
MaxConnsPerHost: 200,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second, // give the upstream a minute to send headers
ExpectContinueTimeout: 1 * time.Second,
DisableKeepAlives: true, // close after each request
ForceAttemptHTTP2: false, // dont speak HTTP/2
// this line is what truly blocks HTTP/2:
TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
}
var sharedClient = &http.Client{
Transport: streamingTransport,
Timeout: 0,
}
type streamError struct {
Err error
StatusCode int
IsClientDisconnection bool
}
func (e *streamError) Error() string {
return e.Err.Error()
}
func (e *streamError) Unwrap() error {
return e.Err
}
type TorrentFile struct {
name string
torrentName string
link string
downloadLink string
size int64
isDir bool
fileId string
isRar bool
metadataOnly bool
content []byte
children []os.FileInfo // For directories
cache *store.Cache
modTime time.Time
// Minimal state for interface compliance only
readOffset int64 // Only used for Read() method compliance
}
// TorrentFile interface implementations for TorrentFile
func (f *TorrentFile) Close() error {
if f.isDir {
return nil // No resources to close for directories
}
// For files, we don't have any resources to close either
// This is just to satisfy the os.TorrentFile interface
f.content = nil
f.children = nil
f.downloadLink = ""
f.readOffset = 0
return nil
}
func (f *TorrentFile) getDownloadLink() (string, error) {
// Check if we already have a final URL cached
if f.downloadLink != "" && isValidURL(f.downloadLink) {
return f.downloadLink, nil
}
downloadLink, err := f.cache.GetDownloadLink(f.torrentName, f.name, f.link)
if err != nil {
return "", err
}
if downloadLink != "" && isValidURL(downloadLink) {
f.downloadLink = downloadLink
return downloadLink, nil
}
return "", os.ErrNotExist
}
func (f *TorrentFile) getDownloadByteRange() (*[2]int64, error) {
byteRange, err := f.cache.GetDownloadByteRange(f.torrentName, f.name)
if err != nil {
return nil, err
}
return byteRange, nil
}
func (f *TorrentFile) servePreloadedContent(w http.ResponseWriter, r *http.Request) error {
content := f.content
size := int64(len(content))
// Handle range requests for preloaded content
if rangeHeader := r.Header.Get("Range"); rangeHeader != "" {
ranges, err := parseRange(rangeHeader, size)
if err != nil || len(ranges) != 1 {
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
start, end := ranges[0].start, ranges[0].end
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, size))
w.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusPartialContent)
_, err = w.Write(content[start : end+1])
return err
}
// Full content
w.Header().Set("Content-Length", fmt.Sprintf("%d", size))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusOK)
_, err := w.Write(content)
return err
}
func (f *TorrentFile) StreamResponse(w http.ResponseWriter, r *http.Request) error {
// Handle preloaded content files
if f.content != nil {
return f.servePreloadedContent(w, r)
}
// Try streaming with retry logic
return f.streamWithRetry(w, r, 0)
}
func (f *TorrentFile) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCount int) error {
const maxRetries = 3
_log := f.cache.Logger()
// Get download link (with caching optimization)
downloadLink, err := f.getDownloadLink()
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusPreconditionFailed}
}
if downloadLink == "" {
return &streamError{Err: fmt.Errorf("empty download link"), StatusCode: http.StatusNotFound}
}
// Create upstream request with streaming optimizations
upstreamReq, err := http.NewRequest("GET", downloadLink, nil)
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusInternalServerError}
}
setVideoStreamingHeaders(upstreamReq)
// Handle range requests (critical for video seeking)
isRangeRequest := f.handleRangeRequest(upstreamReq, r, w)
if isRangeRequest == -1 {
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
resp, err := sharedClient.Do(upstreamReq)
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusServiceUnavailable}
}
defer resp.Body.Close()
// Handle upstream errors with retry logic
shouldRetry, retryErr := f.handleUpstream(resp, retryCount, maxRetries)
if shouldRetry && retryCount < maxRetries {
// Retry with new download link
_log.Debug().
Int("retry_count", retryCount+1).
Str("file", f.name).
Msg("Retrying stream request")
return f.streamWithRetry(w, r, retryCount+1)
}
if retryErr != nil {
return retryErr
}
setVideoResponseHeaders(w, resp, isRangeRequest == 1)
return f.streamBuffer(w, resp.Body)
}
func (f *TorrentFile) streamBuffer(w http.ResponseWriter, src io.Reader) error {
flusher, ok := w.(http.Flusher)
if !ok {
return fmt.Errorf("response does not support flushing")
}
smallBuf := make([]byte, 64*1024) // 64 KB
if n, err := src.Read(smallBuf); n > 0 {
if _, werr := w.Write(smallBuf[:n]); werr != nil {
return werr
}
flusher.Flush()
} else if err != nil && err != io.EOF {
return err
}
buf := make([]byte, 256*1024) // 256 KB
for {
n, readErr := src.Read(buf)
if n > 0 {
if _, writeErr := w.Write(buf[:n]); writeErr != nil {
if isClientDisconnection(writeErr) {
return &streamError{Err: writeErr, StatusCode: 0, IsClientDisconnection: true}
}
return writeErr
}
flusher.Flush()
}
if readErr != nil {
if readErr == io.EOF {
return nil
}
if isClientDisconnection(readErr) {
return &streamError{Err: readErr, StatusCode: 0, IsClientDisconnection: true}
}
return readErr
}
}
}
func (f *TorrentFile) handleUpstream(resp *http.Response, retryCount, maxRetries int) (shouldRetry bool, err error) {
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent {
return false, nil
}
_log := f.cache.Logger()
// Clean up response body properly
cleanupResp := func(resp *http.Response) {
if resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
switch resp.StatusCode {
case http.StatusServiceUnavailable:
// Read the body to check for specific error messages
body, readErr := io.ReadAll(resp.Body)
cleanupResp(resp)
if readErr != nil {
_log.Error().Err(readErr).Msg("Failed to read response body")
return false, &streamError{
Err: fmt.Errorf("failed to read error response: %w", readErr),
StatusCode: http.StatusServiceUnavailable,
}
}
bodyStr := string(body)
if strings.Contains(bodyStr, "you have exceeded your traffic") {
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Bandwidth exceeded. Marking link as invalid")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "bandwidth_exceeded")
// Retry with a different API key if available and we haven't exceeded retries
if retryCount < maxRetries {
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("bandwidth exceeded after %d retries", retryCount),
StatusCode: http.StatusServiceUnavailable,
}
}
return false, &streamError{
Err: fmt.Errorf("service unavailable: %s", bodyStr),
StatusCode: http.StatusServiceUnavailable,
}
case http.StatusNotFound:
cleanupResp(resp)
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Link not found (404). Marking link as invalid and regenerating")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "link_not_found")
// Try to regenerate download link if we haven't exceeded retries
if retryCount < maxRetries {
// Clear cached link to force regeneration
f.downloadLink = ""
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("file not found after %d retries", retryCount),
StatusCode: http.StatusNotFound,
}
default:
body, _ := io.ReadAll(resp.Body)
cleanupResp(resp)
_log.Error().
Int("status_code", resp.StatusCode).
Str("file", f.name).
Str("response_body", string(body)).
Msg("Unexpected upstream error")
return false, &streamError{
Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)),
StatusCode: http.StatusBadGateway,
}
}
}
func (f *TorrentFile) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w http.ResponseWriter) int {
rangeHeader := r.Header.Get("Range")
if rangeHeader == "" {
// For video files, apply byte range if exists
if byteRange, _ := f.getDownloadByteRange(); byteRange != nil {
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", byteRange[0], byteRange[1]))
}
return 0 // No range request
}
// Parse range request
ranges, err := parseRange(rangeHeader, f.size)
if err != nil || len(ranges) != 1 {
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", f.size))
return -1 // Invalid range
}
// Apply byte range offset if exists
byteRange, _ := f.getDownloadByteRange()
start, end := ranges[0].start, ranges[0].end
if byteRange != nil {
start += byteRange[0]
end += byteRange[0]
}
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
return 1 // Valid range request
}
/*
These are the methods that implement the os.TorrentFile interface for the TorrentFile type.
Only Stat and ReadDir are used
*/
func (f *TorrentFile) Stat() (os.FileInfo, error) {
if f.isDir {
return &FileInfo{
name: f.name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: f.modTime,
isDir: true,
}, nil
}
return &FileInfo{
name: f.name,
size: f.size,
mode: 0644,
modTime: f.modTime,
isDir: false,
}, nil
}
func (f *TorrentFile) Read(p []byte) (n int, err error) {
if f.isDir {
return 0, os.ErrInvalid
}
if f.metadataOnly {
return 0, io.EOF
}
// For preloaded content files (like version.txt)
if f.content != nil {
if f.readOffset >= int64(len(f.content)) {
return 0, io.EOF
}
n = copy(p, f.content[f.readOffset:])
f.readOffset += int64(n)
return n, nil
}
// For streaming files, return an error to force use of StreamResponse
return 0, fmt.Errorf("use StreamResponse method for streaming files")
}
func (f *TorrentFile) Seek(offset int64, whence int) (int64, error) {
if f.isDir {
return 0, os.ErrInvalid
}
// Only handle seeking for preloaded content
if f.content != nil {
newOffset := f.readOffset
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset += offset
case io.SeekEnd:
newOffset = int64(len(f.content)) + offset
default:
return 0, os.ErrInvalid
}
if newOffset < 0 {
newOffset = 0
}
if newOffset > int64(len(f.content)) {
newOffset = int64(len(f.content))
}
f.readOffset = newOffset
return f.readOffset, nil
}
// For streaming files, return error to force use of StreamResponse
return 0, fmt.Errorf("use StreamResponse method for streaming files")
}
func (f *TorrentFile) Write(p []byte) (n int, err error) {
return 0, os.ErrPermission
}
func (f *TorrentFile) Readdir(count int) ([]os.FileInfo, error) {
if !f.isDir {
return nil, os.ErrInvalid
}
if count <= 0 {
return f.children, nil
}
if len(f.children) == 0 {
return nil, io.EOF
}
if count > len(f.children) {
count = len(f.children)
}
files := f.children[:count]
f.children = f.children[count:]
return files, nil
}

View File

@@ -24,17 +24,17 @@ import (
const DeleteAllBadTorrentKey = "DELETE_ALL_BAD_TORRENTS"
type Handler struct {
Name string
type TorrentHandler struct {
name string
logger zerolog.Logger
cache *store.Cache
URLBase string
RootPath string
}
func NewHandler(name, urlBase string, cache *store.Cache, logger zerolog.Logger) *Handler {
h := &Handler{
Name: name,
func NewTorrentHandler(name, urlBase string, cache *store.Cache, logger zerolog.Logger) Handler {
h := &TorrentHandler{
name: name,
cache: cache,
logger: logger,
URLBase: urlBase,
@@ -43,15 +43,18 @@ func NewHandler(name, urlBase string, cache *store.Cache, logger zerolog.Logger)
return h
}
// Mkdir implements webdav.FileSystem
func (h *Handler) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
return os.ErrPermission // Read-only filesystem
func (ht *TorrentHandler) Start(ctx context.Context) error {
return ht.cache.Start(ctx)
}
func (h *Handler) readinessMiddleware(next http.Handler) http.Handler {
func (ht *TorrentHandler) Type() string {
return "torrent"
}
func (ht *TorrentHandler) Readiness(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
select {
case <-h.cache.IsReady():
case <-ht.cache.IsReady():
// WebDAV is ready, proceed
next.ServeHTTP(w, r)
default:
@@ -62,13 +65,23 @@ func (h *Handler) readinessMiddleware(next http.Handler) http.Handler {
})
}
// Name returns the name of the handler
func (ht *TorrentHandler) Name() string {
return ht.name
}
// Mkdir implements webdav.FileSystem
func (ht *TorrentHandler) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
return os.ErrPermission // Read-only filesystem
}
// RemoveAll implements webdav.FileSystem
func (h *Handler) RemoveAll(ctx context.Context, name string) error {
func (ht *TorrentHandler) RemoveAll(ctx context.Context, name string) error {
if !strings.HasPrefix(name, "/") {
name = "/" + name
}
name = utils.PathUnescape(path.Clean(name))
rootDir := path.Clean(h.RootPath)
rootDir := path.Clean(ht.RootPath)
if name == rootDir {
return os.ErrPermission
@@ -80,33 +93,33 @@ func (h *Handler) RemoveAll(ctx context.Context, name string) error {
}
// Check if the name is a parent path
if _, ok := h.isParentPath(name); ok {
if _, ok := ht.isParentPath(name); ok {
return os.ErrPermission
}
// Check if the name is a torrent folder
rel := strings.TrimPrefix(name, rootDir+"/")
parts := strings.Split(rel, "/")
if len(parts) == 2 && utils.Contains(h.getParentItems(), parts[0]) {
if len(parts) == 2 && utils.Contains(ht.getParentItems(), parts[0]) {
torrentName := parts[1]
torrent := h.cache.GetTorrentByName(torrentName)
torrent := ht.cache.GetTorrentByName(torrentName)
if torrent == nil {
return os.ErrNotExist
}
// Remove the torrent from the cache and debrid
h.cache.OnRemove(torrent.Id)
ht.cache.OnRemove(torrent.Id)
return nil
}
// If we reach here, it means the path is a file
if len(parts) >= 2 {
if utils.Contains(h.getParentItems(), parts[0]) {
if utils.Contains(ht.getParentItems(), parts[0]) {
torrentName := parts[1]
cached := h.cache.GetTorrentByName(torrentName)
cached := ht.cache.GetTorrentByName(torrentName)
if cached != nil && len(parts) >= 3 {
filename := filepath.Clean(path.Join(parts[2:]...))
if file, ok := cached.GetFile(filename); ok {
if err := h.cache.RemoveFile(cached.Id, file.Name); err != nil {
h.logger.Error().Err(err).Msgf("Failed to remove file %s from torrent %s", file.Name, torrentName)
if err := ht.cache.RemoveFile(cached.Id, file.Name); err != nil {
ht.logger.Error().Err(err).Msgf("Failed to remove file %s from torrent %s", file.Name, torrentName)
return err
}
// If the file was successfully removed, we can return nil
@@ -120,29 +133,29 @@ func (h *Handler) RemoveAll(ctx context.Context, name string) error {
}
// Rename implements webdav.FileSystem
func (h *Handler) Rename(ctx context.Context, oldName, newName string) error {
func (ht *TorrentHandler) Rename(ctx context.Context, oldName, newName string) error {
return os.ErrPermission // Read-only filesystem
}
func (h *Handler) getTorrentsFolders(folder string) []os.FileInfo {
return h.cache.GetListing(folder)
func (ht *TorrentHandler) getTorrentsFolders(folder string) []os.FileInfo {
return ht.cache.GetListing(folder)
}
func (h *Handler) getParentItems() []string {
func (ht *TorrentHandler) getParentItems() []string {
parents := []string{"__all__", "torrents", "__bad__"}
// Add custom folders
parents = append(parents, h.cache.GetCustomFolders()...)
parents = append(parents, ht.cache.GetCustomFolders()...)
// version.txt
parents = append(parents, "version.txt")
return parents
}
func (h *Handler) getParentFiles() []os.FileInfo {
func (ht *TorrentHandler) getParentFiles() []os.FileInfo {
now := time.Now()
rootFiles := make([]os.FileInfo, 0, len(h.getParentItems()))
for _, item := range h.getParentItems() {
rootFiles := make([]os.FileInfo, 0, len(ht.getParentItems()))
for _, item := range ht.getParentItems() {
f := &FileInfo{
name: item,
size: 0,
@@ -159,49 +172,49 @@ func (h *Handler) getParentFiles() []os.FileInfo {
return rootFiles
}
// returns the os.FileInfo slice for “depth-1” children of cleanPath
func (h *Handler) getChildren(name string) []os.FileInfo {
// GetChildren returns the os.FileInfo slice for “depth-1” children of cleanPath
func (ht *TorrentHandler) GetChildren(name string) []os.FileInfo {
if name[0] != '/' {
name = "/" + name
}
name = utils.PathUnescape(path.Clean(name))
root := path.Clean(h.RootPath)
root := path.Clean(ht.RootPath)
// toplevel “parents” (e.g. __all__, torrents etc)
if name == root {
return h.getParentFiles()
return ht.getParentFiles()
}
// one level down (e.g. /root/parentFolder)
if parent, ok := h.isParentPath(name); ok {
return h.getTorrentsFolders(parent)
if parent, ok := ht.isParentPath(name); ok {
return ht.getTorrentsFolders(parent)
}
// torrent-folder level (e.g. /root/parentFolder/torrentName)
rel := strings.TrimPrefix(name, root+"/")
parts := strings.Split(rel, "/")
if len(parts) == 2 && utils.Contains(h.getParentItems(), parts[0]) {
if len(parts) == 2 && utils.Contains(ht.getParentItems(), parts[0]) {
torrentName := parts[1]
if t := h.cache.GetTorrentByName(torrentName); t != nil {
return h.getFileInfos(t)
if t := ht.cache.GetTorrentByName(torrentName); t != nil {
return ht.getFileInfos(t)
}
}
return nil
}
func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
func (ht *TorrentHandler) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
if !strings.HasPrefix(name, "/") {
name = "/" + name
}
name = utils.PathUnescape(path.Clean(name))
rootDir := path.Clean(h.RootPath)
rootDir := path.Clean(ht.RootPath)
metadataOnly := ctx.Value(metadataOnlyKey) != nil
now := time.Now()
// 1) special case version.txt
if name == path.Join(rootDir, "version.txt") {
versionInfo := version.GetInfo().String()
return &File{
cache: h.cache,
return &TorrentFile{
cache: ht.cache,
isDir: false,
content: []byte(versionInfo),
name: "version.txt",
@@ -211,14 +224,14 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F
}, nil
}
// 2) directory case: ask getChildren
if children := h.getChildren(name); children != nil {
// 2) directory case: ask Children
if children := ht.GetChildren(name); children != nil {
displayName := filepath.Clean(path.Base(name))
if name == rootDir {
displayName = "/"
}
return &File{
cache: h.cache,
return &TorrentFile{
cache: ht.cache,
isDir: true,
children: children,
name: displayName,
@@ -233,14 +246,14 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F
rel := strings.TrimPrefix(name, rootDir+"/")
parts := strings.Split(rel, "/")
if len(parts) >= 2 {
if utils.Contains(h.getParentItems(), parts[0]) {
if utils.Contains(ht.getParentItems(), parts[0]) {
torrentName := parts[1]
cached := h.cache.GetTorrentByName(torrentName)
cached := ht.cache.GetTorrentByName(torrentName)
if cached != nil && len(parts) >= 3 {
filename := filepath.Clean(path.Join(parts[2:]...))
if file, ok := cached.GetFile(filename); ok && !file.Deleted {
return &File{
cache: h.cache,
return &TorrentFile{
cache: ht.cache,
torrentName: torrentName,
fileId: file.Id,
isDir: false,
@@ -255,21 +268,19 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F
}
}
}
h.logger.Info().Msgf("File not found: %s", name)
return nil, os.ErrNotExist
}
// Stat implements webdav.FileSystem
func (h *Handler) Stat(ctx context.Context, name string) (os.FileInfo, error) {
f, err := h.OpenFile(ctx, name, os.O_RDONLY, 0)
func (ht *TorrentHandler) Stat(ctx context.Context, name string) (os.FileInfo, error) {
f, err := ht.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
return f.Stat()
}
func (h *Handler) getFileInfos(torrent *store.CachedTorrent) []os.FileInfo {
func (ht *TorrentHandler) getFileInfos(torrent *store.CachedTorrent) []os.FileInfo {
torrentFiles := torrent.GetFiles()
files := make([]os.FileInfo, 0, len(torrentFiles))
@@ -294,33 +305,33 @@ func (h *Handler) getFileInfos(torrent *store.CachedTorrent) []os.FileInfo {
return files
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
func (ht *TorrentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
h.handleGet(w, r)
ht.handleGet(w, r)
return
case "HEAD":
h.handleHead(w, r)
ht.handleHead(w, r)
return
case "OPTIONS":
h.handleOptions(w, r)
ht.handleOptions(w, r)
return
case "PROPFIND":
h.handlePropfind(w, r)
ht.handlePropfind(w, r)
return
case "DELETE":
if err := h.handleDelete(w, r); err == nil {
if err := ht.handleDelete(w, r); err == nil {
return
}
// fallthrough to default
}
handler := &webdav.Handler{
FileSystem: h,
FileSystem: ht,
LockSystem: webdav.NewMemLS(),
Logger: func(r *http.Request, err error) {
if err != nil {
h.logger.Trace().
ht.logger.Trace().
Err(err).
Str("method", r.Method).
Str("path", r.URL.Path).
@@ -331,33 +342,8 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
handler.ServeHTTP(w, r)
}
func getContentType(fileName string) string {
contentType := "application/octet-stream"
// Determine content type based on file extension
switch {
case strings.HasSuffix(fileName, ".mp4"):
contentType = "video/mp4"
case strings.HasSuffix(fileName, ".mkv"):
contentType = "video/x-matroska"
case strings.HasSuffix(fileName, ".avi"):
contentType = "video/x-msvideo"
case strings.HasSuffix(fileName, ".mov"):
contentType = "video/quicktime"
case strings.HasSuffix(fileName, ".m4v"):
contentType = "video/x-m4v"
case strings.HasSuffix(fileName, ".ts"):
contentType = "video/mp2t"
case strings.HasSuffix(fileName, ".srt"):
contentType = "application/x-subrip"
case strings.HasSuffix(fileName, ".vtt"):
contentType = "text/vtt"
}
return contentType
}
func (h *Handler) isParentPath(urlPath string) (string, bool) {
parents := h.getParentItems()
func (ht *TorrentHandler) isParentPath(urlPath string) (string, bool) {
parents := ht.getParentItems()
lastComponent := path.Base(urlPath)
for _, p := range parents {
if p == lastComponent {
@@ -367,9 +353,9 @@ func (h *Handler) isParentPath(urlPath string) (string, bool) {
return "", false
}
func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file webdav.File) {
func (ht *TorrentHandler) serveDirectory(w http.ResponseWriter, r *http.Request, file webdav.File) {
var children []os.FileInfo
if f, ok := file.(*File); ok {
if f, ok := file.(*TorrentFile); ok {
children = f.children
} else {
var err error
@@ -385,7 +371,7 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we
parentPath := path.Dir(cleanPath)
showParent := cleanPath != "/" && parentPath != "." && parentPath != cleanPath
isBadPath := strings.HasSuffix(cleanPath, "__bad__")
_, canDelete := h.isParentPath(cleanPath)
_, canDelete := ht.isParentPath(cleanPath)
// Prepare template data
data := struct {
@@ -402,7 +388,7 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we
ParentPath: parentPath,
ShowParent: showParent,
Children: children,
URLBase: h.URLBase,
URLBase: ht.URLBase,
IsBadPath: isBadPath,
CanDelete: canDelete,
DeleteAllBadTorrentKey: DeleteAllBadTorrentKey,
@@ -416,8 +402,8 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we
// Handlers
func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
fRaw, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
func (ht *TorrentHandler) handleGet(w http.ResponseWriter, r *http.Request) {
fRaw, err := ht.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
if err != nil {
http.NotFound(w, r)
return
@@ -431,7 +417,7 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
}
if fi.IsDir() {
h.serveDirectory(w, r, fRaw)
ht.serveDirectory(w, r, fRaw)
return
}
@@ -448,9 +434,9 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
}
// Handle File struct with direct streaming
if file, ok := fRaw.(*File); ok {
if file, ok := fRaw.(*TorrentFile); ok {
// Handle nginx proxy (X-Accel-Redirect)
if file.content == nil && !file.isRar && h.cache.StreamWithRclone() {
if file.content == nil && !file.isRar && ht.cache.StreamWithRclone() {
link, err := file.getDownloadLink()
if err != nil || link == "" {
http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed)
@@ -475,7 +461,7 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
if streamErr.StatusCode > 0 && !hasHeadersWritten(w) {
http.Error(w, streamErr.Error(), streamErr.StatusCode)
} else {
h.logger.Error().
ht.logger.Error().
Err(streamErr.Err).
Str("path", r.URL.Path).
Msg("Stream error")
@@ -485,7 +471,7 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
if !hasHeadersWritten(w) {
http.Error(w, "Stream error", http.StatusInternalServerError)
} else {
h.logger.Error().
ht.logger.Error().
Err(err).
Str("path", r.URL.Path).
Msg("Stream error after headers written")
@@ -505,10 +491,14 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
}
}
func (h *Handler) handleHead(w http.ResponseWriter, r *http.Request) {
f, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
func (ht *TorrentHandler) handlePropfind(w http.ResponseWriter, r *http.Request) {
handlePropfind(ht, ht.logger, w, r)
}
func (ht *TorrentHandler) handleHead(w http.ResponseWriter, r *http.Request) {
f, err := ht.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
if err != nil {
h.logger.Error().Err(err).Str("path", r.URL.Path).Msg("Failed to open file")
ht.logger.Error().Err(err).Str("path", r.URL.Path).Msg("Failed to open file")
http.NotFound(w, r)
return
}
@@ -521,7 +511,7 @@ func (h *Handler) handleHead(w http.ResponseWriter, r *http.Request) {
fi, err := f.Stat()
if err != nil {
h.logger.Error().Err(err).Msg("Failed to stat file")
ht.logger.Error().Err(err).Msg("Failed to stat file")
http.Error(w, "Server Error", http.StatusInternalServerError)
return
}
@@ -532,14 +522,14 @@ func (h *Handler) handleHead(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) {
func (ht *TorrentHandler) handleOptions(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Allow", "OPTIONS, GET, HEAD, PUT, DELETE, MKCOL, COPY, MOVE, PROPFIND")
w.Header().Set("DAV", "1, 2")
w.WriteHeader(http.StatusOK)
}
// handleDelete deletes a torrent by id, or all bad torrents if the id is DeleteAllBadTorrentKey
func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) error {
func (ht *TorrentHandler) handleDelete(w http.ResponseWriter, r *http.Request) error {
cleanPath := path.Clean(r.URL.Path) // Remove any leading slashes
_, torrentId := path.Split(cleanPath)
@@ -548,25 +538,25 @@ func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) error {
}
if torrentId == DeleteAllBadTorrentKey {
return h.handleDeleteAll(w)
return ht.handleDeleteAll(w)
}
return h.handleDeleteById(w, torrentId)
return ht.handleDeleteById(w, torrentId)
}
func (h *Handler) handleDeleteById(w http.ResponseWriter, tId string) error {
cachedTorrent := h.cache.GetTorrent(tId)
func (ht *TorrentHandler) handleDeleteById(w http.ResponseWriter, tId string) error {
cachedTorrent := ht.cache.GetTorrent(tId)
if cachedTorrent == nil {
return os.ErrNotExist
}
h.cache.OnRemove(cachedTorrent.Id)
ht.cache.OnRemove(cachedTorrent.Id)
w.WriteHeader(http.StatusNoContent)
return nil
}
func (h *Handler) handleDeleteAll(w http.ResponseWriter) error {
badTorrents := h.cache.GetListing("__bad__")
func (ht *TorrentHandler) handleDeleteAll(w http.ResponseWriter) error {
badTorrents := ht.cache.GetListing("__bad__")
if len(badTorrents) == 0 {
http.Error(w, "No bad torrents to delete", http.StatusNotFound)
return nil
@@ -574,9 +564,9 @@ func (h *Handler) handleDeleteAll(w http.ResponseWriter) error {
for _, fi := range badTorrents {
tName := strings.TrimSpace(strings.SplitN(fi.Name(), "||", 2)[0])
t := h.cache.GetTorrentByName(tName)
t := ht.cache.GetTorrentByName(tName)
if t != nil {
h.cache.OnRemove(t.Id)
ht.cache.OnRemove(t.Id)
}
}

263
pkg/webdav/usenet_file.go Normal file
View File

@@ -0,0 +1,263 @@
package webdav
import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/usenet"
"io"
"net/http"
"os"
"strings"
"time"
)
type UsenetFile struct {
name string
nzbID string
downloadLink string
size int64
isDir bool
fileId string
metadataOnly bool
content []byte
children []os.FileInfo // For directories
usenet usenet.Usenet
modTime time.Time
readOffset int64
rPipe io.ReadCloser
}
// UsenetFile interface implementations for UsenetFile
func (f *UsenetFile) Close() error {
if f.isDir {
return nil // No resources to close for directories
}
f.content = nil
f.children = nil
f.downloadLink = ""
return nil
}
func (f *UsenetFile) servePreloadedContent(w http.ResponseWriter, r *http.Request) error {
content := f.content
size := int64(len(content))
// Handle range requests for preloaded content
if rangeHeader := r.Header.Get("Range"); rangeHeader != "" {
ranges, err := parseRange(rangeHeader, size)
if err != nil || len(ranges) != 1 {
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
start, end := ranges[0].start, ranges[0].end
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, size))
w.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusPartialContent)
_, err = w.Write(content[start : end+1])
return err
}
// Full content
w.Header().Set("Content-Length", fmt.Sprintf("%d", size))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusOK)
_, err := w.Write(content)
return err
}
func (f *UsenetFile) StreamResponse(w http.ResponseWriter, r *http.Request) error {
// Handle preloaded content files
if f.content != nil {
return f.servePreloadedContent(w, r)
}
// Try streaming with retry logic
return f.streamWithRetry(w, r, 0)
}
func (f *UsenetFile) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCount int) error {
start, end := f.getRange(r)
if retryCount == 0 {
contentLength := end - start + 1
w.Header().Set("Content-Length", fmt.Sprintf("%d", contentLength))
w.Header().Set("Accept-Ranges", "bytes")
if r.Header.Get("Range") != "" {
contentRange := fmt.Sprintf("bytes %d-%d/%d", start, end, f.size)
w.Header().Set("Content-Range", contentRange)
w.WriteHeader(http.StatusPartialContent)
} else {
w.WriteHeader(http.StatusOK)
}
}
err := f.usenet.Stream(r.Context(), f.nzbID, f.name, start, end, w)
if err != nil {
if isConnectionError(err) || strings.Contains(err.Error(), "client disconnected") {
return nil
}
// Don't treat cancellation as an error - it's expected for seek operations
if errors.Is(err, context.Canceled) {
return nil
}
return &streamError{Err: fmt.Errorf("failed to stream file %s: %w", f.name, err), StatusCode: http.StatusInternalServerError}
}
return nil
}
// isConnectionError checks if the error is due to client disconnection
func isConnectionError(err error) bool {
errStr := err.Error()
if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) {
return true // EOF or context cancellation is a common disconnection error
}
return strings.Contains(errStr, "broken pipe") ||
strings.Contains(errStr, "connection reset by peer")
}
func (f *UsenetFile) getRange(r *http.Request) (int64, int64) {
rangeHeader := r.Header.Get("Range")
if rangeHeader == "" {
// No range header - return full file range (0 to size-1)
return 0, f.size - 1
}
// Parse the range header for this specific file
ranges, err := parseRange(rangeHeader, f.size)
if err != nil || len(ranges) != 1 {
return -1, -1
}
// Return the requested range (this is relative to the file, not the entire NZB)
start, end := ranges[0].start, ranges[0].end
if start < 0 || end < 0 || start > end || end >= f.size {
return -1, -1 // Invalid range
}
return start, end
}
func (f *UsenetFile) Stat() (os.FileInfo, error) {
if f.isDir {
return &FileInfo{
name: f.name,
size: f.size,
mode: 0755 | os.ModeDir,
modTime: f.modTime,
isDir: true,
}, nil
}
return &FileInfo{
name: f.name,
size: f.size,
mode: 0644,
modTime: f.modTime,
isDir: false,
}, nil
}
func (f *UsenetFile) Read(p []byte) (int, error) {
if f.isDir {
return 0, os.ErrInvalid
}
// preloaded content (unchanged)
if f.metadataOnly {
return 0, io.EOF
}
if f.content != nil {
if f.readOffset >= int64(len(f.content)) {
return 0, io.EOF
}
n := copy(p, f.content[f.readOffset:])
f.readOffset += int64(n)
return n, nil
}
if f.rPipe == nil {
pr, pw := io.Pipe()
f.rPipe = pr
// start fetch from current offset
go func(start int64) {
err := f.usenet.Stream(context.Background(), f.nzbID, f.name, start, f.size-1, pw)
if err := pw.CloseWithError(err); err != nil {
return
}
}(f.readOffset)
}
n, err := f.rPipe.Read(p)
f.readOffset += int64(n)
return n, err
}
// Seek simply moves the readOffset pointer within [0…size]
func (f *UsenetFile) Seek(offset int64, whence int) (int64, error) {
if f.isDir {
return 0, os.ErrInvalid
}
// preload path (unchanged)
var newOffset int64
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset = f.readOffset + offset
case io.SeekEnd:
newOffset = f.size + offset
default:
return 0, os.ErrInvalid
}
if newOffset < 0 {
newOffset = 0
}
if newOffset > f.size {
newOffset = f.size
}
// drop in-flight stream
if f.rPipe != nil {
f.rPipe.Close()
f.rPipe = nil
}
f.readOffset = newOffset
return f.readOffset, nil
}
func (f *UsenetFile) Write(_ []byte) (n int, err error) {
return 0, os.ErrPermission
}
func (f *UsenetFile) Readdir(count int) ([]os.FileInfo, error) {
if !f.isDir {
return nil, os.ErrInvalid
}
if count <= 0 {
return f.children, nil
}
if len(f.children) == 0 {
return nil, io.EOF
}
if count > len(f.children) {
count = len(f.children)
}
files := f.children[:count]
f.children = f.children[count:]
return files, nil
}

View File

@@ -0,0 +1,529 @@
package webdav
import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/usenet"
"golang.org/x/net/webdav"
"io"
"net/http"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/version"
)
type UsenetHandler struct {
name string
logger zerolog.Logger
usenet usenet.Usenet
URLBase string
RootPath string
}
func NewUsenetHandler(name, urlBase string, usenet usenet.Usenet, logger zerolog.Logger) Handler {
h := &UsenetHandler{
name: name,
usenet: usenet,
logger: logger,
URLBase: urlBase,
RootPath: path.Join(urlBase, "webdav", name),
}
return h
}
func (hu *UsenetHandler) Type() string {
return "usenet"
}
func (hu *UsenetHandler) Name() string {
return hu.name
}
func (hu *UsenetHandler) Start(ctx context.Context) error {
return hu.usenet.Start(ctx)
}
func (hu *UsenetHandler) Readiness(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
select {
case <-hu.usenet.IsReady():
// WebDAV is ready, proceed
next.ServeHTTP(w, r)
default:
// WebDAV is still initializing
w.Header().Set("Retry-After", "5")
http.Error(w, "WebDAV service is initializing, please try again shortly", http.StatusServiceUnavailable)
}
})
}
// Mkdir implements webdav.FileSystem
func (hu *UsenetHandler) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
return os.ErrPermission // Read-only filesystem
}
// RemoveAll implements webdav.FileSystem
func (hu *UsenetHandler) RemoveAll(ctx context.Context, name string) error {
if !strings.HasPrefix(name, "/") {
name = "/" + name
}
name = utils.PathUnescape(path.Clean(name))
rootDir := path.Clean(hu.RootPath)
if name == rootDir {
return os.ErrPermission
}
// Skip if it's version.txt
if name == path.Join(rootDir, "version.txt") {
return os.ErrPermission
}
// Check if the name is a parent path
if _, ok := hu.isParentPath(name); ok {
return os.ErrPermission
}
// Check if the name is a torrent folder
rel := strings.TrimPrefix(name, rootDir+"/")
parts := strings.Split(rel, "/")
if len(parts) == 2 && utils.Contains(hu.getParentItems(), parts[0]) {
nzb := hu.usenet.Store().GetByName(parts[1])
if nzb == nil {
return os.ErrNotExist
}
// Remove the nzb from the store
if err := hu.usenet.Store().Delete(nzb.ID); err != nil {
hu.logger.Error().Err(err).Msgf("Failed to remove torrent %s", parts[1])
return err
}
return nil
}
// If we reach here, it means the path is a file
if len(parts) >= 2 {
if utils.Contains(hu.getParentItems(), parts[0]) {
cached := hu.usenet.Store().GetByName(parts[1])
if cached != nil && len(parts) >= 3 {
filename := filepath.Clean(path.Join(parts[2:]...))
if file := cached.GetFileByName(filename); file != nil {
if err := hu.usenet.Store().RemoveFile(cached.ID, file.Name); err != nil {
hu.logger.Error().Err(err).Msgf("Failed to remove file %s from torrent %s", file.Name, parts[1])
return err
}
// If the file was successfully removed, we can return nil
return nil
}
}
}
}
return nil
}
// Rename implements webdav.FileSystem
func (hu *UsenetHandler) Rename(ctx context.Context, oldName, newName string) error {
return os.ErrPermission // Read-only filesystem
}
func (hu *UsenetHandler) getTorrentsFolders(folder string) []os.FileInfo {
return hu.usenet.Store().GetListing(folder)
}
func (hu *UsenetHandler) getParentItems() []string {
parents := []string{"__all__", "__bad__"}
// version.txt
parents = append(parents, "version.txt")
return parents
}
func (hu *UsenetHandler) getParentFiles() []os.FileInfo {
now := time.Now()
rootFiles := make([]os.FileInfo, 0, len(hu.getParentItems()))
for _, item := range hu.getParentItems() {
f := &FileInfo{
name: item,
size: 0,
mode: 0755 | os.ModeDir,
modTime: now,
isDir: true,
}
if item == "version.txt" {
f.isDir = false
f.size = int64(len(version.GetInfo().String()))
}
rootFiles = append(rootFiles, f)
}
return rootFiles
}
// GetChildren returns the os.FileInfo slice for “depth-1” children of cleanPath
func (hu *UsenetHandler) GetChildren(name string) []os.FileInfo {
if name[0] != '/' {
name = "/" + name
}
name = utils.PathUnescape(path.Clean(name))
root := path.Clean(hu.RootPath)
// toplevel “parents” (e.g. __all__, torrents etc)
if name == root {
return hu.getParentFiles()
}
if parent, ok := hu.isParentPath(name); ok {
return hu.getTorrentsFolders(parent)
}
// torrent-folder level (e.g. /root/parentFolder/torrentName)
rel := strings.TrimPrefix(name, root+"/")
parts := strings.Split(rel, "/")
if len(parts) == 2 && utils.Contains(hu.getParentItems(), parts[0]) {
if u := hu.usenet.Store().GetByName(parts[1]); u != nil {
return hu.getFileInfos(u)
}
}
return nil
}
func (hu *UsenetHandler) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
if !strings.HasPrefix(name, "/") {
name = "/" + name
}
name = utils.PathUnescape(path.Clean(name))
rootDir := path.Clean(hu.RootPath)
metadataOnly := ctx.Value(metadataOnlyKey) != nil
now := time.Now()
// 1) special case version.txt
if name == path.Join(rootDir, "version.txt") {
versionInfo := version.GetInfo().String()
return &UsenetFile{
usenet: hu.usenet,
isDir: false,
content: []byte(versionInfo),
name: "version.txt",
size: int64(len(versionInfo)),
metadataOnly: metadataOnly,
modTime: now,
}, nil
}
// 2) directory case: ask GetChildren
if children := hu.GetChildren(name); children != nil {
displayName := filepath.Clean(path.Base(name))
if name == rootDir {
displayName = "/"
}
return &UsenetFile{
usenet: hu.usenet,
isDir: true,
children: children,
name: displayName,
size: 0,
metadataOnly: metadataOnly,
modTime: now,
}, nil
}
// 3) filewithintorrent case
// everything else must be a file under a torrent folder
rel := strings.TrimPrefix(name, rootDir+"/")
parts := strings.Split(rel, "/")
if len(parts) >= 2 {
if utils.Contains(hu.getParentItems(), parts[0]) {
cached := hu.usenet.Store().GetByName(parts[1])
if cached != nil && len(parts) >= 3 {
filename := filepath.Clean(path.Join(parts[2:]...))
if file := cached.GetFileByName(filename); file != nil {
return &UsenetFile{
usenet: hu.usenet,
nzbID: cached.ID,
fileId: file.Name,
isDir: false,
name: file.Name,
size: file.Size,
metadataOnly: metadataOnly,
modTime: cached.AddedOn,
}, nil
}
}
}
}
return nil, os.ErrNotExist
}
// Stat implements webdav.FileSystem
func (hu *UsenetHandler) Stat(ctx context.Context, name string) (os.FileInfo, error) {
f, err := hu.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
return f.Stat()
}
func (hu *UsenetHandler) getFileInfos(nzb *usenet.NZB) []os.FileInfo {
nzbFiles := nzb.GetFiles()
files := make([]os.FileInfo, 0, len(nzbFiles))
sort.Slice(nzbFiles, func(i, j int) bool {
return nzbFiles[i].Name < nzbFiles[j].Name
})
for _, file := range nzbFiles {
files = append(files, &FileInfo{
name: file.Name,
size: file.Size,
mode: 0644,
modTime: nzb.AddedOn,
isDir: false,
})
}
return files
}
func (hu *UsenetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
hu.handleGet(w, r)
return
case "HEAD":
hu.handleHead(w, r)
return
case "OPTIONS":
hu.handleOptions(w, r)
return
case "PROPFIND":
hu.handlePropfind(w, r)
return
case "DELETE":
if err := hu.handleDelete(w, r); err == nil {
return
}
// fallthrough to default
}
handler := &webdav.Handler{
FileSystem: hu,
LockSystem: webdav.NewMemLS(),
Logger: func(r *http.Request, err error) {
if err != nil {
hu.logger.Trace().
Err(err).
Str("method", r.Method).
Str("path", r.URL.Path).
Msg("WebDAV error")
}
},
}
handler.ServeHTTP(w, r)
}
func (hu *UsenetHandler) isParentPath(urlPath string) (string, bool) {
parents := hu.getParentItems()
lastComponent := path.Base(urlPath)
for _, p := range parents {
if p == lastComponent {
return p, true
}
}
return "", false
}
func (hu *UsenetHandler) serveDirectory(w http.ResponseWriter, r *http.Request, file webdav.File) {
var children []os.FileInfo
if f, ok := file.(*UsenetFile); ok {
children = f.children
} else {
var err error
children, err = file.Readdir(-1)
if err != nil {
http.Error(w, "Failed to list directory", http.StatusInternalServerError)
return
}
}
// Clean and prepare the path
cleanPath := path.Clean(r.URL.Path)
parentPath := path.Dir(cleanPath)
showParent := cleanPath != "/" && parentPath != "." && parentPath != cleanPath
isBadPath := strings.HasSuffix(cleanPath, "__bad__")
_, canDelete := hu.isParentPath(cleanPath)
// Prepare template data
data := struct {
Path string
ParentPath string
ShowParent bool
Children []os.FileInfo
URLBase string
IsBadPath bool
CanDelete bool
DeleteAllBadTorrentKey string
}{
Path: cleanPath,
ParentPath: parentPath,
ShowParent: showParent,
Children: children,
URLBase: hu.URLBase,
IsBadPath: isBadPath,
CanDelete: canDelete,
DeleteAllBadTorrentKey: DeleteAllBadTorrentKey,
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if err := tplDirectory.ExecuteTemplate(w, "directory.html", data); err != nil {
return
}
}
// Handlers
func (hu *UsenetHandler) handlePropfind(w http.ResponseWriter, r *http.Request) {
handlePropfind(hu, hu.logger, w, r)
}
func (hu *UsenetHandler) handleGet(w http.ResponseWriter, r *http.Request) {
fRaw, err := hu.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
if err != nil {
http.NotFound(w, r)
return
}
defer fRaw.Close()
fi, err := fRaw.Stat()
if err != nil {
http.Error(w, "Server Error", http.StatusInternalServerError)
return
}
if fi.IsDir() {
hu.serveDirectory(w, r, fRaw)
return
}
// Set common headers
etag := fmt.Sprintf("\"%x-%x\"", fi.ModTime().Unix(), fi.Size())
ext := path.Ext(fi.Name())
w.Header().Set("ETag", etag)
w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat))
w.Header().Set("Content-Type", getContentType(ext))
w.Header().Set("Connection", "keep-alive")
// Handle File struct with direct streaming
if file, ok := fRaw.(*UsenetFile); ok {
if err := file.StreamResponse(w, r); err != nil {
var streamErr *streamError
if errors.As(err, &streamErr) {
// Handle client disconnections silently (just debug log)
if errors.Is(streamErr.Err, context.Canceled) || errors.Is(streamErr.Err, context.DeadlineExceeded) || streamErr.IsClientDisconnection {
return // Don't log as error or try to write response
}
if streamErr.StatusCode > 0 && !hasHeadersWritten(w) {
return
} else {
hu.logger.Error().
Err(streamErr.Err).
Str("path", r.URL.Path).
Msg("Stream error")
}
} else {
// Generic error
if !hasHeadersWritten(w) {
http.Error(w, "Stream error", http.StatusInternalServerError)
return
} else {
hu.logger.Error().
Err(err).
Str("path", r.URL.Path).
Msg("Stream error after headers written")
}
}
return
}
return
}
// Fallback to ServeContent for other webdav.File implementations
if rs, ok := fRaw.(io.ReadSeeker); ok {
http.ServeContent(w, r, fi.Name(), fi.ModTime(), rs)
} else {
w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size()))
_, _ = io.Copy(w, fRaw)
}
}
func (hu *UsenetHandler) handleHead(w http.ResponseWriter, r *http.Request) {
f, err := hu.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0)
if err != nil {
hu.logger.Error().Err(err).Str("path", r.URL.Path).Msg("Failed to open file")
http.NotFound(w, r)
return
}
defer func(f webdav.File) {
err := f.Close()
if err != nil {
return
}
}(f)
fi, err := f.Stat()
if err != nil {
hu.logger.Error().Err(err).Msg("Failed to stat file")
http.Error(w, "Server Error", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size()))
w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusOK)
}
func (hu *UsenetHandler) handleOptions(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Allow", "OPTIONS, GET, HEAD, PUT, DELETE, MKCOL, COPY, MOVE, PROPFIND")
w.WriteHeader(http.StatusOK)
}
// handleDelete deletes a torrent by id, or all bad torrents if the id is DeleteAllBadTorrentKey
func (hu *UsenetHandler) handleDelete(w http.ResponseWriter, r *http.Request) error {
cleanPath := path.Clean(r.URL.Path) // Remove any leading slashes
_, torrentId := path.Split(cleanPath)
if torrentId == "" {
return os.ErrNotExist
}
if torrentId == DeleteAllBadTorrentKey {
return hu.handleDeleteAll(w)
}
return hu.handleDeleteById(w, torrentId)
}
func (hu *UsenetHandler) handleDeleteById(w http.ResponseWriter, nzID string) error {
cached := hu.usenet.Store().Get(nzID)
if cached == nil {
return os.ErrNotExist
}
err := hu.usenet.Store().Delete(nzID)
if err != nil {
hu.logger.Error().Err(err).Str("nzbID", nzID).Msg("Failed to delete NZB")
http.Error(w, "Failed to delete NZB", http.StatusInternalServerError)
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (hu *UsenetHandler) handleDeleteAll(w http.ResponseWriter) error {
w.WriteHeader(http.StatusNoContent)
return nil
}

View File

@@ -6,8 +6,12 @@ import (
"fmt"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"golang.org/x/net/webdav"
"html/template"
"net/http"
"net/url"
@@ -33,6 +37,10 @@ var (
}
return strings.Join(segments, "/")
},
"split": strings.Split,
"sub": func(a, b int) int {
return a - b
},
"formatSize": func(bytes int64) string {
const (
KB = 1024
@@ -84,21 +92,50 @@ func init() {
chi.RegisterMethod("UNLOCK")
}
type WebDav struct {
Handlers []*Handler
URLBase string
type Handler interface {
http.Handler
Start(ctx context.Context) error
Readiness(next http.Handler) http.Handler
Name() string
OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error)
GetChildren(name string) []os.FileInfo
Type() string
}
func New() *WebDav {
type WebDav struct {
Handlers []Handler
URLBase string
logger zerolog.Logger
}
func New(debridCaches map[string]*store.Cache, usenet usenet.Usenet) *WebDav {
urlBase := config.Get().URLBase
w := &WebDav{
Handlers: make([]*Handler, 0),
Handlers: make([]Handler, 0),
URLBase: urlBase,
logger: logger.New("webdav"),
}
for name, c := range store.Get().Debrid().Caches() {
h := NewHandler(name, urlBase, c, c.Logger())
// Set debrid handlers
for name, c := range debridCaches {
h := NewTorrentHandler(name, urlBase, c, c.Logger())
if h == nil {
w.logger.Warn().Msgf("Debrid handler for %s is nil, skipping", name)
continue
}
w.Handlers = append(w.Handlers, h)
}
// Set usenet handlers
if usenet != nil {
usenetHandler := NewUsenetHandler("usenet", urlBase, usenet, usenet.Logger())
if usenetHandler != nil {
w.Handlers = append(w.Handlers, usenetHandler)
} else {
w.logger.Warn().Msg("Usenet handler is nil, skipping")
}
}
return w
}
@@ -119,9 +156,9 @@ func (wd *WebDav) Start(ctx context.Context) error {
for _, h := range wd.Handlers {
wg.Add(1)
go func(h *Handler) {
go func(h Handler) {
defer wg.Done()
if err := h.cache.Start(ctx); err != nil {
if err := h.Start(ctx); err != nil {
select {
case errChan <- err:
default:
@@ -152,8 +189,8 @@ func (wd *WebDav) Start(ctx context.Context) error {
func (wd *WebDav) mountHandlers(r chi.Router) {
for _, h := range wd.Handlers {
r.Route("/"+h.Name, func(r chi.Router) {
r.Use(h.readinessMiddleware)
r.Route("/"+h.Name(), func(r chi.Router) {
r.Use(h.Readiness)
r.Mount("/", h)
}) // Mount to /name since router is already prefixed with /webdav
}
@@ -166,11 +203,7 @@ func (wd *WebDav) setupRootHandler(r chi.Router) {
func (wd *WebDav) commonMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("DAV", "1, 2")
w.Header().Set("Allow", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK")
w.Header().Set("Access-Control-Allow-Headers", "Depth, Content-Type, Authorization")
next.ServeHTTP(w, r)
})
@@ -181,7 +214,7 @@ func (wd *WebDav) handleGetRoot() http.HandlerFunc {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
data := struct {
Handlers []*Handler
Handlers []Handler
URLBase string
}{
Handlers: wd.Handlers,
@@ -205,7 +238,7 @@ func (wd *WebDav) handleWebdavRoot() http.HandlerFunc {
children := make([]os.FileInfo, 0, len(wd.Handlers))
for _, h := range wd.Handlers {
children = append(children, &FileInfo{
name: h.Name,
name: h.Name(),
size: 0,
mode: 0755 | os.ModeDir,
modTime: time.Now(),

View File

@@ -101,10 +101,10 @@ async function downloadAssets() {
await downloadFile(download.url, download.path);
}
console.log('\nExternal assets downloaded successfully!');
console.log('\nExternal assets downloaded successfully!');
} catch (error) {
console.error('💥 Error downloading assets:', error);
console.error('Error downloading assets:', error);
process.exit(1);
}
}

View File

@@ -105,8 +105,8 @@ async function minifyAllJS() {
if (processedFiles > 0) {
const totalReduction = ((totalOriginal - totalMinified) / totalOriginal * 100).toFixed(1);
console.log(`\nSuccessfully minified ${processedFiles}/${jsFiles.length} JavaScript file(s)`);
console.log(`📊 Total: ${(totalOriginal/1024).toFixed(1)}KB → ${(totalMinified/1024).toFixed(1)}KB (${totalReduction}% reduction)`);
console.log(`\nSuccessfully minified ${processedFiles}/${jsFiles.length} JavaScript file(s)`);
console.log(`Total: ${(totalOriginal/1024).toFixed(1)}KB → ${(totalMinified/1024).toFixed(1)}KB (${totalReduction}% reduction)`);
}
} catch (error) {