diff --git a/Dockerfile b/Dockerfile index afe80a3..b12fe3b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -61,6 +61,4 @@ EXPOSE 8282 VOLUME ["/app"] USER nonroot:nonroot -HEALTHCHECK --interval=3s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app"] - CMD ["/usr/bin/decypharr", "--config", "/app"] \ No newline at end of file diff --git a/cmd/decypharr/main.go b/cmd/decypharr/main.go index 3e243e2..4cbc57a 100644 --- a/cmd/decypharr/main.go +++ b/cmd/decypharr/main.go @@ -7,11 +7,10 @@ import ( "github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/pkg/qbit" "github.com/sirrobot01/decypharr/pkg/server" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/store" "github.com/sirrobot01/decypharr/pkg/version" "github.com/sirrobot01/decypharr/pkg/web" "github.com/sirrobot01/decypharr/pkg/webdav" - "github.com/sirrobot01/decypharr/pkg/worker" "net/http" "os" "runtime" @@ -62,7 +61,7 @@ func Start(ctx context.Context) error { qb := qbit.New() wd := webdav.New() - ui := web.New(qb).Routes() + ui := web.New().Routes() webdavRoutes := wd.Routes() qbitRoutes := qb.Routes() @@ -95,14 +94,14 @@ func Start(ctx context.Context) error { _log.Info().Msg("Restarting Decypharr...") <-done // wait for them to finish qb.Reset() - service.Reset() + store.Reset() // rebuild svcCtx off the original parent svcCtx, cancelSvc = context.WithCancel(ctx) runtime.GC() config.Reload() - service.Reset() + store.Reset() // loop will restart services automatically } } @@ -146,11 +145,7 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e }) safeGo(func() error { - return worker.Start(ctx) - }) - - safeGo(func() error { - arr := service.GetService().Arr + arr := store.GetStore().GetArr() if arr == nil { return nil } @@ -159,9 +154,9 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e if cfg := config.Get(); cfg.Repair.Enabled { safeGo(func() error { - r := service.GetService().Repair - if r != nil { - if err := r.Start(ctx); err != nil { + repair := store.GetStore().GetRepair() + if repair != nil { + if err := repair.Start(ctx); err != nil { _log.Error().Err(err).Msg("repair failed") } } diff --git a/cmd/healthcheck/main.go b/cmd/healthcheck/main.go index d5d740b..3c7253a 100644 --- a/cmd/healthcheck/main.go +++ b/cmd/healthcheck/main.go @@ -145,5 +145,8 @@ func checkWebDAV(ctx context.Context, baseUrl, port, path string) bool { } defer resp.Body.Close() - return resp.StatusCode == 207 || resp.StatusCode == http.StatusOK + return resp.StatusCode == http.StatusMultiStatus || + resp.StatusCode == http.StatusOK || + resp.StatusCode == http.StatusServiceUnavailable // It's still indexing + } diff --git a/docs/docs/guides/rclone.md b/docs/docs/guides/rclone.md index 1fce513..bb0d40e 100644 --- a/docs/docs/guides/rclone.md +++ b/docs/docs/guides/rclone.md @@ -5,7 +5,7 @@ This guide will help you set up Decypharr with Rclone, allowing you to use your #### Rclone Make sure you have Rclone installed and configured on your system. You can follow the [Rclone installation guide](https://rclone.org/install/) for instructions. -It's recommended to use docker version of Rclone, as it provides a consistent environment across different platforms. +It's recommended to use a docker version of Rclone, as it provides a consistent environment across different platforms. ### Steps @@ -35,7 +35,7 @@ Create a `rclone.conf` file in `/opt/rclone/` with your Rclone configuration. ```conf [decypharr] type = webdav -url = https://your-ip-or-domain:8282/webdav/realdebrid +url = http://your-ip-or-domain:8282/webdav/realdebrid vendor = other pacer_min_sleep = 0 ``` @@ -69,13 +69,10 @@ services: decypharr: image: cy01/blackhole:latest container_name: decypharr - user: "1000:1000" volumes: - - /mnt/:/mnt + - /mnt/:/mnt:rslave - /opt/decypharr/:/app environment: - - PUID=1000 - - PGID=1000 - UMASK=002 ports: - "8282:8282/tcp" @@ -87,14 +84,11 @@ services: restart: unless-stopped environment: TZ: UTC - PUID: 1000 - PGID: 1000 ports: - 5572:5572 volumes: - /mnt/remote/realdebrid:/data:rshared - /opt/rclone/rclone.conf:/config/rclone/rclone.conf - - /mnt:/mnt cap_add: - SYS_ADMIN security_opt: @@ -105,9 +99,17 @@ services: decypharr: condition: service_healthy restart: true - command: "mount decypharr: /data --allow-non-empty --allow-other --uid=1000 --gid=1000 --umask=002 --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth " + command: "mount decypharr: /data --allow-non-empty --allow-other --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth " ``` +#### Docker Notes + +- Ensure that the `/mnt/` directory is mounted correctly to access your media files. +- You can check your current user and group IDs and UMASK by running `id -a` and `umask` commands in your terminal. +- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions. +- Also adding `--uid=$YOUR_PUID --gid=$YOUR_PGID` to the `rclone mount` command can help with permissions. +- The `UMASK` environment variable can be set to control file permissions created by Decypharr. + Start the containers: ```bash docker-compose up -d @@ -132,7 +134,7 @@ For each provider, you'll need a different rclone. OR you can change your `rclon ```apache [decypharr] type = webdav -url = https://your-ip-or-domain:8282/webdav/ +url = http://your-ip-or-domain:8282/webdav/ vendor = other pacer_min_sleep = 0 ``` diff --git a/docs/docs/installation.md b/docs/docs/installation.md index 1f731dc..9e703bc 100644 --- a/docs/docs/installation.md +++ b/docs/docs/installation.md @@ -45,7 +45,6 @@ docker run -d \ Create a `docker-compose.yml` file with the following content: ```yaml -version: '3.7' services: decypharr: image: cy01/blackhole:latest @@ -64,20 +63,14 @@ Run the Docker Compose setup: docker-compose up -d ``` -#### Notes for Docker Users - -- Ensure that the `/mnt/` directory is mounted correctly to access your media files. -- The `./config/` directory should contain your `config.json` file. -- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions. -- The `UMASK` environment variable can be set to control file permissions created by Decypharr. - ## Binary Installation If you prefer not to use Docker, you can download and run the binary directly. -Download the binary from the releases page +Download your OS-specific release from the [releases page](https://github.com/sirrobot01/decypharr/releases). Create a configuration file (see Configuration) Run the binary: + ```bash chmod +x decypharr ./decypharr --config /path/to/config/folder @@ -109,4 +102,30 @@ You can also configure Decypharr through the web interface, but it's recommended "log_level": "info", "port": "8282" } -``` \ No newline at end of file +``` + +### Notes for Docker Users + +- Ensure that the `/mnt/` directory is mounted correctly to access your media files. +- The `./config/` directory should contain your `config.json` file. +- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions. +- The `UMASK` environment variable can be set to control file permissions created by Decypharr. + +##### Health Checks +- Health checks are disabled by default. You can enable them by adding a `healthcheck` section in your `docker-compose.yml` file. +- Health checks checks for availability of several parts of the application; + - The main web interface + - The qBittorrent API + - The WebDAV server (if enabled). You should disable health checks for the initial indexes as they can take a long time to complete. + +```yaml +services: + decypharr: + ... + ... + healthcheck: + test: ["CMD", "/usr/bin/healthcheck", "--config", "/app/"] + interval: 5s + timeout: 10s + retries: 3 +``` diff --git a/internal/config/config.go b/internal/config/config.go index d80093e..526d519 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -98,6 +98,10 @@ func (c *Config) AuthFile() string { return filepath.Join(c.Path, "auth.json") } +func (c *Config) TorrentsFile() string { + return filepath.Join(c.Path, "torrents.json") +} + func (c *Config) loadConfig() error { // Load the config file if configPath == "" { diff --git a/internal/utils/file.go b/internal/utils/file.go index f46423a..adc8a2d 100644 --- a/internal/utils/file.go +++ b/internal/utils/file.go @@ -1,7 +1,10 @@ package utils import ( + "fmt" + "io" "net/url" + "os" "strings" ) @@ -19,3 +22,65 @@ func PathUnescape(path string) string { return unescapedPath } + +func PreCacheFile(filePaths []string) error { + if len(filePaths) == 0 { + return fmt.Errorf("no file paths provided") + } + + for _, filePath := range filePaths { + err := func(f string) error { + + file, err := os.Open(f) + if err != nil { + if os.IsNotExist(err) { + // File has probably been moved by arr, return silently + return nil + } + return fmt.Errorf("failed to open file: %s: %v", f, err) + } + defer file.Close() + + // Pre-cache the file header (first 256KB) using 16KB chunks. + if err := readSmallChunks(file, 0, 256*1024, 16*1024); err != nil { + return err + } + if err := readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil { + return err + } + return nil + }(filePath) + if err != nil { + return err + } + } + return nil +} + +func readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error { + _, err := file.Seek(startPos, 0) + if err != nil { + return err + } + + buf := make([]byte, chunkSize) + bytesRemaining := totalToRead + + for bytesRemaining > 0 { + toRead := chunkSize + if bytesRemaining < chunkSize { + toRead = bytesRemaining + } + + n, err := file.Read(buf[:toRead]) + if err != nil { + if err == io.EOF { + break + } + return err + } + + bytesRemaining -= n + } + return nil +} diff --git a/internal/utils/magnet.go b/internal/utils/magnet.go index 02db58f..f9cc5ca 100644 --- a/internal/utils/magnet.go +++ b/internal/utils/magnet.go @@ -25,11 +25,11 @@ var ( ) type Magnet struct { - Name string - InfoHash string - Size int64 - Link string - File []byte + Name string `json:"name"` + InfoHash string `json:"infoHash"` + Size int64 `json:"size"` + Link string `json:"link"` + File []byte `json:"-"` } func (m *Magnet) IsTorrent() bool { @@ -83,7 +83,6 @@ func GetMagnetFromBytes(torrentData []byte) (*Magnet, error) { if err != nil { return nil, err } - log.Println("InfoHash: ", infoHash) magnet := &Magnet{ InfoHash: infoHash, Name: info.Name, diff --git a/pkg/arr/arr.go b/pkg/arr/arr.go index c592194..4b90efd 100644 --- a/pkg/arr/arr.go +++ b/pkg/arr/arr.go @@ -11,7 +11,6 @@ import ( "github.com/sirrobot01/decypharr/internal/request" "io" "net/http" - "strconv" "strings" "sync" "time" @@ -121,10 +120,10 @@ type Storage struct { logger zerolog.Logger } -func (as *Storage) Cleanup() { - as.mu.Lock() - defer as.mu.Unlock() - as.Arrs = make(map[string]*Arr) +func (s *Storage) Cleanup() { + s.mu.Lock() + defer s.mu.Unlock() + s.Arrs = make(map[string]*Arr) } func InferType(host, name string) Type { @@ -154,26 +153,26 @@ func NewStorage() *Storage { } } -func (as *Storage) AddOrUpdate(arr *Arr) { - as.mu.Lock() - defer as.mu.Unlock() +func (s *Storage) AddOrUpdate(arr *Arr) { + s.mu.Lock() + defer s.mu.Unlock() if arr.Name == "" { return } - as.Arrs[arr.Name] = arr + s.Arrs[arr.Name] = arr } -func (as *Storage) Get(name string) *Arr { - as.mu.Lock() - defer as.mu.Unlock() - return as.Arrs[name] +func (s *Storage) Get(name string) *Arr { + s.mu.Lock() + defer s.mu.Unlock() + return s.Arrs[name] } -func (as *Storage) GetAll() []*Arr { - as.mu.Lock() - defer as.mu.Unlock() - arrs := make([]*Arr, 0, len(as.Arrs)) - for _, arr := range as.Arrs { +func (s *Storage) GetAll() []*Arr { + s.mu.Lock() + defer s.mu.Unlock() + arrs := make([]*Arr, 0, len(s.Arrs)) + for _, arr := range s.Arrs { if arr.Host != "" && arr.Token != "" { arrs = append(arrs, arr) } @@ -181,19 +180,19 @@ func (as *Storage) GetAll() []*Arr { return arrs } -func (as *Storage) Clear() { - as.mu.Lock() - defer as.mu.Unlock() - as.Arrs = make(map[string]*Arr) +func (s *Storage) Clear() { + s.mu.Lock() + defer s.mu.Unlock() + s.Arrs = make(map[string]*Arr) } -func (as *Storage) StartSchedule(ctx context.Context) error { +func (s *Storage) StartSchedule(ctx context.Context) error { ticker := time.NewTicker(10 * time.Second) select { case <-ticker.C: - as.cleanupArrsQueue() + s.cleanupArrsQueue() case <-ctx.Done(): ticker.Stop() return nil @@ -201,9 +200,9 @@ func (as *Storage) StartSchedule(ctx context.Context) error { return nil } -func (as *Storage) cleanupArrsQueue() { +func (s *Storage) cleanupArrsQueue() { arrs := make([]*Arr, 0) - for _, arr := range as.Arrs { + for _, arr := range s.Arrs { if !arr.Cleanup { continue } @@ -212,26 +211,18 @@ func (as *Storage) cleanupArrsQueue() { if len(arrs) > 0 { for _, arr := range arrs { if err := arr.CleanupQueue(); err != nil { - as.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name) + s.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name) } } } } -func (a *Arr) Refresh() error { +func (a *Arr) Refresh() { payload := struct { Name string `json:"name"` }{ Name: "RefreshMonitoredDownloads", } - resp, err := a.Request(http.MethodPost, "api/v3/command", payload) - if err == nil && resp != nil { - statusOk := strconv.Itoa(resp.StatusCode)[0] == '2' - if statusOk { - return nil - } - } - - return fmt.Errorf("failed to refresh: %v", err) + _, _ = a.Request(http.MethodPost, "api/v3/command", payload) } diff --git a/pkg/arr/import.go b/pkg/arr/import.go index 9ef651b..b9709b8 100644 --- a/pkg/arr/import.go +++ b/pkg/arr/import.go @@ -205,5 +205,4 @@ func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, e } defer resp.Body.Close() return resp.Body, nil - } diff --git a/pkg/debrid/debrid.go b/pkg/debrid/debrid.go new file mode 100644 index 0000000..0506181 --- /dev/null +++ b/pkg/debrid/debrid.go @@ -0,0 +1,218 @@ +package debrid + +import ( + "context" + "fmt" + "github.com/sirrobot01/decypharr/internal/config" + "github.com/sirrobot01/decypharr/internal/logger" + "github.com/sirrobot01/decypharr/internal/utils" + "github.com/sirrobot01/decypharr/pkg/arr" + "github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid" + "github.com/sirrobot01/decypharr/pkg/debrid/providers/debrid_link" + "github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid" + "github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox" + "github.com/sirrobot01/decypharr/pkg/debrid/store" + "github.com/sirrobot01/decypharr/pkg/debrid/types" + "strings" + "sync" +) + +type Storage struct { + clients map[string]types.Client + clientsLock sync.Mutex + caches map[string]*store.Cache + cachesLock sync.Mutex + LastUsed string +} + +func NewStorage() *Storage { + cfg := config.Get() + clients := make(map[string]types.Client) + + _logger := logger.Default() + + caches := make(map[string]*store.Cache) + + for _, dc := range cfg.Debrids { + client, err := createDebridClient(dc) + if err != nil { + _logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client") + continue + } + _log := client.GetLogger() + if dc.UseWebDav { + caches[dc.Name] = store.NewDebridCache(dc, client) + _log.Info().Msg("Debrid Service started with WebDAV") + } else { + _log.Info().Msg("Debrid Service started") + } + clients[dc.Name] = client + } + + d := &Storage{ + clients: clients, + LastUsed: "", + caches: caches, + } + return d +} + +func (d *Storage) GetClient(name string) types.Client { + d.clientsLock.Lock() + defer d.clientsLock.Unlock() + client, exists := d.clients[name] + if !exists { + return nil + } + return client +} + +func (d *Storage) Reset() { + d.clientsLock.Lock() + d.clients = make(map[string]types.Client) + d.clientsLock.Unlock() + + d.cachesLock.Lock() + d.caches = make(map[string]*store.Cache) + d.cachesLock.Unlock() + d.LastUsed = "" +} + +func (d *Storage) GetClients() map[string]types.Client { + d.clientsLock.Lock() + defer d.clientsLock.Unlock() + clientsCopy := make(map[string]types.Client) + for name, client := range d.clients { + clientsCopy[name] = client + } + return clientsCopy +} + +func (d *Storage) GetCaches() map[string]*store.Cache { + d.clientsLock.Lock() + defer d.clientsLock.Unlock() + cachesCopy := make(map[string]*store.Cache) + for name, cache := range d.caches { + cachesCopy[name] = cache + } + return cachesCopy +} + +func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types.Client { + d.clientsLock.Lock() + defer d.clientsLock.Unlock() + filteredClients := make(map[string]types.Client) + for name, client := range d.clients { + if filter(client) { + filteredClients[name] = client + } + } + return filteredClients +} + +func (d *Storage) FilterCaches(filter func(*store.Cache) bool) map[string]*store.Cache { + d.cachesLock.Lock() + defer d.cachesLock.Unlock() + filteredCaches := make(map[string]*store.Cache) + for name, cache := range d.caches { + if filter(cache) { + filteredCaches[name] = cache + } + } + return filteredCaches +} + +func createDebridClient(dc config.Debrid) (types.Client, error) { + switch dc.Name { + case "realdebrid": + return realdebrid.New(dc) + case "torbox": + return torbox.New(dc) + case "debridlink": + return debrid_link.New(dc) + case "alldebrid": + return alldebrid.New(dc) + default: + return realdebrid.New(dc) + } +} + +func ProcessTorrent(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) { + + debridTorrent := &types.Torrent{ + InfoHash: magnet.InfoHash, + Magnet: magnet, + Name: magnet.Name, + Arr: a, + Size: magnet.Size, + Files: make(map[string]types.File), + } + + clients := store.FilterClients(func(c types.Client) bool { + if selectedDebrid != "" && c.GetName() != selectedDebrid { + return false + } + return true + }) + + if len(clients) == 0 { + return nil, fmt.Errorf("no debrid clients available") + } + + errs := make([]error, 0, len(clients)) + + // Override first, arr second, debrid third + + if overrideDownloadUncached { + debridTorrent.DownloadUncached = true + } else if a.DownloadUncached != nil { + // Arr cached is set + debridTorrent.DownloadUncached = *a.DownloadUncached + } else { + debridTorrent.DownloadUncached = false + } + + for index, db := range clients { + _logger := db.GetLogger() + _logger.Info(). + Str("Debrid", db.GetName()). + Str("Arr", a.Name). + Str("Hash", debridTorrent.InfoHash). + Str("Name", debridTorrent.Name). + Msg("Processing torrent") + + if !overrideDownloadUncached && a.DownloadUncached == nil { + debridTorrent.DownloadUncached = db.GetDownloadUncached() + } + + dbt, err := db.SubmitMagnet(debridTorrent) + if err != nil || dbt == nil || dbt.Id == "" { + errs = append(errs, err) + continue + } + dbt.Arr = a + _logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.GetName()) + store.LastUsed = index + + torrent, err := db.CheckStatus(dbt, isSymlink) + if err != nil && torrent != nil && torrent.Id != "" { + // Delete the torrent if it was not downloaded + go func(id string) { + _ = db.DeleteTorrent(id) + }(torrent.Id) + } + return torrent, err + } + if len(errs) == 0 { + return nil, fmt.Errorf("failed to process torrent: no clients available") + } + if len(errs) == 1 { + return nil, fmt.Errorf("failed to process torrent: %w", errs[0]) + } else { + errStrings := make([]string, 0, len(errs)) + for _, err := range errs { + errStrings = append(errStrings, err.Error()) + } + return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", ")) + } +} diff --git a/pkg/debrid/debrid/debrid.go b/pkg/debrid/debrid/debrid.go deleted file mode 100644 index 8169ae3..0000000 --- a/pkg/debrid/debrid/debrid.go +++ /dev/null @@ -1,103 +0,0 @@ -package debrid - -import ( - "fmt" - "github.com/sirrobot01/decypharr/internal/config" - "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/debrid/alldebrid" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid_link" - "github.com/sirrobot01/decypharr/pkg/debrid/realdebrid" - "github.com/sirrobot01/decypharr/pkg/debrid/torbox" - "github.com/sirrobot01/decypharr/pkg/debrid/types" - "strings" -) - -func createDebridClient(dc config.Debrid) (types.Client, error) { - switch dc.Name { - case "realdebrid": - return realdebrid.New(dc) - case "torbox": - return torbox.New(dc) - case "debridlink": - return debrid_link.New(dc) - case "alldebrid": - return alldebrid.New(dc) - default: - return realdebrid.New(dc) - } -} - -func ProcessTorrent(d *Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) { - - debridTorrent := &types.Torrent{ - InfoHash: magnet.InfoHash, - Magnet: magnet, - Name: magnet.Name, - Arr: a, - Size: magnet.Size, - Files: make(map[string]types.File), - } - - errs := make([]error, 0, len(d.Clients)) - - // Override first, arr second, debrid third - - if overrideDownloadUncached { - debridTorrent.DownloadUncached = true - } else if a.DownloadUncached != nil { - // Arr cached is set - debridTorrent.DownloadUncached = *a.DownloadUncached - } else { - debridTorrent.DownloadUncached = false - } - - for index, db := range d.Clients { - logger := db.GetLogger() - logger.Info().Str("Debrid", db.GetName()).Str("Hash", debridTorrent.InfoHash).Msg("Processing torrent") - - if !overrideDownloadUncached && a.DownloadUncached == nil { - debridTorrent.DownloadUncached = db.GetDownloadUncached() - } - - //if db.GetCheckCached() { - // hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash] - // if !exists || !hash { - // logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name) - // continue - // } else { - // logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name) - // } - //} - - dbt, err := db.SubmitMagnet(debridTorrent) - if err != nil || dbt == nil || dbt.Id == "" { - errs = append(errs, err) - continue - } - dbt.Arr = a - logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.GetName()) - d.LastUsed = index - - torrent, err := db.CheckStatus(dbt, isSymlink) - if err != nil && torrent != nil && torrent.Id != "" { - // Delete the torrent if it was not downloaded - go func(id string) { - _ = db.DeleteTorrent(id) - }(torrent.Id) - } - return torrent, err - } - if len(errs) == 0 { - return nil, fmt.Errorf("failed to process torrent: no clients available") - } - if len(errs) == 1 { - return nil, fmt.Errorf("failed to process torrent: %w", errs[0]) - } else { - errStrings := make([]string, 0, len(errs)) - for _, err := range errs { - errStrings = append(errStrings, err.Error()) - } - return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", ")) - } -} diff --git a/pkg/debrid/debrid/engine.go b/pkg/debrid/debrid/engine.go deleted file mode 100644 index 520ecdb..0000000 --- a/pkg/debrid/debrid/engine.go +++ /dev/null @@ -1,68 +0,0 @@ -package debrid - -import ( - "github.com/sirrobot01/decypharr/internal/config" - "github.com/sirrobot01/decypharr/internal/logger" - "github.com/sirrobot01/decypharr/pkg/debrid/types" - "sync" -) - -type Engine struct { - Clients map[string]types.Client - clientsMu sync.Mutex - Caches map[string]*Cache - cacheMu sync.Mutex - LastUsed string -} - -func NewEngine() *Engine { - cfg := config.Get() - clients := make(map[string]types.Client) - - _logger := logger.Default() - - caches := make(map[string]*Cache) - - for _, dc := range cfg.Debrids { - client, err := createDebridClient(dc) - if err != nil { - _logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client") - continue - } - _log := client.GetLogger() - if dc.UseWebDav { - caches[dc.Name] = New(dc, client) - _log.Info().Msg("Debrid Service started with WebDAV") - } else { - _log.Info().Msg("Debrid Service started") - } - clients[dc.Name] = client - } - - d := &Engine{ - Clients: clients, - LastUsed: "", - Caches: caches, - } - return d -} - -func (d *Engine) GetClient(name string) types.Client { - d.clientsMu.Lock() - defer d.clientsMu.Unlock() - return d.Clients[name] -} - -func (d *Engine) Reset() { - d.clientsMu.Lock() - d.Clients = make(map[string]types.Client) - d.clientsMu.Unlock() - - d.cacheMu.Lock() - d.Caches = make(map[string]*Cache) - d.cacheMu.Unlock() -} - -func (d *Engine) GetDebrids() map[string]types.Client { - return d.Clients -} diff --git a/pkg/debrid/debrid/xml.go b/pkg/debrid/debrid/xml.go deleted file mode 100644 index 57845ff..0000000 --- a/pkg/debrid/debrid/xml.go +++ /dev/null @@ -1 +0,0 @@ -package debrid diff --git a/pkg/debrid/alldebrid/alldebrid.go b/pkg/debrid/providers/alldebrid/alldebrid.go similarity index 100% rename from pkg/debrid/alldebrid/alldebrid.go rename to pkg/debrid/providers/alldebrid/alldebrid.go diff --git a/pkg/debrid/alldebrid/types.go b/pkg/debrid/providers/alldebrid/types.go similarity index 100% rename from pkg/debrid/alldebrid/types.go rename to pkg/debrid/providers/alldebrid/types.go diff --git a/pkg/debrid/debrid_link/debrid_link.go b/pkg/debrid/providers/debrid_link/debrid_link.go similarity index 100% rename from pkg/debrid/debrid_link/debrid_link.go rename to pkg/debrid/providers/debrid_link/debrid_link.go diff --git a/pkg/debrid/debrid_link/types.go b/pkg/debrid/providers/debrid_link/types.go similarity index 100% rename from pkg/debrid/debrid_link/types.go rename to pkg/debrid/providers/debrid_link/types.go diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/providers/realdebrid/realdebrid.go similarity index 99% rename from pkg/debrid/realdebrid/realdebrid.go rename to pkg/debrid/providers/realdebrid/realdebrid.go index aa873db..5dfa438 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/providers/realdebrid/realdebrid.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "io" "net/http" gourl "net/url" @@ -20,8 +21,6 @@ import ( "github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/debrid/types" - "github.com/sirrobot01/decypharr/pkg/rar" ) diff --git a/pkg/debrid/realdebrid/types.go b/pkg/debrid/providers/realdebrid/types.go similarity index 100% rename from pkg/debrid/realdebrid/types.go rename to pkg/debrid/providers/realdebrid/types.go diff --git a/pkg/debrid/torbox/torbox.go b/pkg/debrid/providers/torbox/torbox.go similarity index 100% rename from pkg/debrid/torbox/torbox.go rename to pkg/debrid/providers/torbox/torbox.go diff --git a/pkg/debrid/torbox/types.go b/pkg/debrid/providers/torbox/types.go similarity index 100% rename from pkg/debrid/torbox/types.go rename to pkg/debrid/providers/torbox/types.go diff --git a/pkg/debrid/debrid/cache.go b/pkg/debrid/store/cache.go similarity index 99% rename from pkg/debrid/debrid/cache.go rename to pkg/debrid/store/cache.go index 0c2d1ae..9d08d7d 100644 --- a/pkg/debrid/debrid/cache.go +++ b/pkg/debrid/store/cache.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "bufio" @@ -6,6 +6,7 @@ import ( "context" "errors" "fmt" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "os" "path" "path/filepath" @@ -22,7 +23,6 @@ import ( "github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/debrid/types" _ "time/tzdata" ) @@ -108,7 +108,7 @@ type Cache struct { customFolders []string } -func New(dc config.Debrid, client types.Client) *Cache { +func NewDebridCache(dc config.Debrid, client types.Client) *Cache { cfg := config.Get() cetSc, err := gocron.NewScheduler(gocron.WithLocation(time.UTC)) if err != nil { @@ -691,7 +691,7 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error { return nil } -func (c *Cache) AddTorrent(t *types.Torrent) error { +func (c *Cache) Add(t *types.Torrent) error { if len(t.Files) == 0 { if err := c.client.UpdateTorrent(t); err != nil { return fmt.Errorf("failed to update torrent: %w", err) diff --git a/pkg/debrid/debrid/download_link.go b/pkg/debrid/store/download_link.go similarity index 99% rename from pkg/debrid/debrid/download_link.go rename to pkg/debrid/store/download_link.go index 53ed4ce..a404d27 100644 --- a/pkg/debrid/debrid/download_link.go +++ b/pkg/debrid/store/download_link.go @@ -1,14 +1,14 @@ -package debrid +package store import ( "errors" "fmt" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "sync" "time" "github.com/sirrobot01/decypharr/internal/request" - "github.com/sirrobot01/decypharr/pkg/debrid/types" ) type linkCache struct { diff --git a/pkg/debrid/debrid/misc.go b/pkg/debrid/store/misc.go similarity index 98% rename from pkg/debrid/debrid/misc.go rename to pkg/debrid/store/misc.go index 501ea08..d0c089b 100644 --- a/pkg/debrid/debrid/misc.go +++ b/pkg/debrid/store/misc.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "github.com/sirrobot01/decypharr/pkg/debrid/types" diff --git a/pkg/debrid/debrid/refresh.go b/pkg/debrid/store/refresh.go similarity index 99% rename from pkg/debrid/debrid/refresh.go rename to pkg/debrid/store/refresh.go index cf9c728..882eb4e 100644 --- a/pkg/debrid/debrid/refresh.go +++ b/pkg/debrid/store/refresh.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "context" diff --git a/pkg/debrid/debrid/repair.go b/pkg/debrid/store/repair.go similarity index 99% rename from pkg/debrid/debrid/repair.go rename to pkg/debrid/store/repair.go index fae4ecd..9234995 100644 --- a/pkg/debrid/debrid/repair.go +++ b/pkg/debrid/store/repair.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "context" diff --git a/pkg/debrid/debrid/torrent.go b/pkg/debrid/store/torrent.go similarity index 99% rename from pkg/debrid/debrid/torrent.go rename to pkg/debrid/store/torrent.go index c782765..332a660 100644 --- a/pkg/debrid/debrid/torrent.go +++ b/pkg/debrid/store/torrent.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "fmt" diff --git a/pkg/debrid/debrid/worker.go b/pkg/debrid/store/worker.go similarity index 99% rename from pkg/debrid/debrid/worker.go rename to pkg/debrid/store/worker.go index b0a3705..8fba929 100644 --- a/pkg/debrid/debrid/worker.go +++ b/pkg/debrid/store/worker.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "context" diff --git a/pkg/debrid/store/xml.go b/pkg/debrid/store/xml.go new file mode 100644 index 0000000..72440ea --- /dev/null +++ b/pkg/debrid/store/xml.go @@ -0,0 +1 @@ +package store diff --git a/pkg/debrid/types/torrent.go b/pkg/debrid/types/torrent.go index 9332943..0975e0d 100644 --- a/pkg/debrid/types/torrent.go +++ b/pkg/debrid/types/torrent.go @@ -34,10 +34,12 @@ type Torrent struct { Debrid string `json:"debrid"` - Arr *arr.Arr `json:"arr"` - Mu sync.Mutex `json:"-"` - SizeDownloaded int64 `json:"-"` // This is used for local download - DownloadUncached bool `json:"-"` + Arr *arr.Arr `json:"arr"` + + SizeDownloaded int64 `json:"-"` // This is used for local download + DownloadUncached bool `json:"-"` + + sync.Mutex } type DownloadLink struct { diff --git a/pkg/qbit/context.go b/pkg/qbit/context.go new file mode 100644 index 0000000..893cc76 --- /dev/null +++ b/pkg/qbit/context.go @@ -0,0 +1,127 @@ +package qbit + +import ( + "context" + "encoding/base64" + "github.com/go-chi/chi/v5" + "github.com/sirrobot01/decypharr/pkg/arr" + "github.com/sirrobot01/decypharr/pkg/store" + "net/http" + "strings" +) + +type contextKey string + +const ( + categoryKey contextKey = "category" + hashesKey contextKey = "hashes" + arrKey contextKey = "arr" +) + +func getCategory(ctx context.Context) string { + if category, ok := ctx.Value(categoryKey).(string); ok { + return category + } + return "" +} + +func getHashes(ctx context.Context) []string { + if hashes, ok := ctx.Value(hashesKey).([]string); ok { + return hashes + } + return nil +} + +func getArr(ctx context.Context) *arr.Arr { + if a, ok := ctx.Value(arrKey).(*arr.Arr); ok { + return a + } + return nil +} + +func decodeAuthHeader(header string) (string, string, error) { + encodedTokens := strings.Split(header, " ") + if len(encodedTokens) != 2 { + return "", "", nil + } + encodedToken := encodedTokens[1] + + bytes, err := base64.StdEncoding.DecodeString(encodedToken) + if err != nil { + return "", "", err + } + + bearer := string(bytes) + + colonIndex := strings.LastIndex(bearer, ":") + host := bearer[:colonIndex] + token := bearer[colonIndex+1:] + + return host, token, nil +} + +func (q *QBit) categoryContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + category := strings.Trim(r.URL.Query().Get("category"), "") + if category == "" { + // Get from form + _ = r.ParseForm() + category = r.Form.Get("category") + if category == "" { + // Get from multipart form + _ = r.ParseMultipartForm(32 << 20) + category = r.FormValue("category") + } + } + ctx := context.WithValue(r.Context(), categoryKey, strings.TrimSpace(category)) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func (q *QBit) authContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + host, token, err := decodeAuthHeader(r.Header.Get("Authorization")) + category := getCategory(r.Context()) + arrs := store.GetStore().GetArr() + // Check if arr exists + a := arrs.Get(category) + if a == nil { + downloadUncached := false + a = arr.New(category, "", "", false, false, &downloadUncached) + } + if err == nil { + host = strings.TrimSpace(host) + if host != "" { + a.Host = host + } + token = strings.TrimSpace(token) + if token != "" { + a.Token = token + } + } + + arrs.AddOrUpdate(a) + ctx := context.WithValue(r.Context(), arrKey, a) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func hashesContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _hashes := chi.URLParam(r, "hashes") + var hashes []string + if _hashes != "" { + hashes = strings.Split(_hashes, "|") + } + if hashes == nil { + // Get hashes from form + _ = r.ParseForm() + hashes = r.Form["hashes"] + } + for i, hash := range hashes { + hashes[i] = strings.TrimSpace(hash) + } + ctx := context.WithValue(r.Context(), hashesKey, hashes) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} diff --git a/pkg/qbit/http.go b/pkg/qbit/http.go index b6efde0..1f81e7f 100644 --- a/pkg/qbit/http.go +++ b/pkg/qbit/http.go @@ -1,107 +1,16 @@ package qbit import ( - "context" - "encoding/base64" - "github.com/go-chi/chi/v5" "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/service" "net/http" "path/filepath" "strings" ) -func decodeAuthHeader(header string) (string, string, error) { - encodedTokens := strings.Split(header, " ") - if len(encodedTokens) != 2 { - return "", "", nil - } - encodedToken := encodedTokens[1] - - bytes, err := base64.StdEncoding.DecodeString(encodedToken) - if err != nil { - return "", "", err - } - - bearer := string(bytes) - - colonIndex := strings.LastIndex(bearer, ":") - host := bearer[:colonIndex] - token := bearer[colonIndex+1:] - - return host, token, nil -} - -func (q *QBit) CategoryContext(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - category := strings.Trim(r.URL.Query().Get("category"), "") - if category == "" { - // Get from form - _ = r.ParseForm() - category = r.Form.Get("category") - if category == "" { - // Get from multipart form - _ = r.ParseMultipartForm(32 << 20) - category = r.FormValue("category") - } - } - ctx := context.WithValue(r.Context(), "category", strings.TrimSpace(category)) - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} - -func (q *QBit) authContext(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - host, token, err := decodeAuthHeader(r.Header.Get("Authorization")) - category := r.Context().Value("category").(string) - svc := service.GetService() - // Check if arr exists - a := svc.Arr.Get(category) - if a == nil { - downloadUncached := false - a = arr.New(category, "", "", false, false, &downloadUncached) - } - if err == nil { - host = strings.TrimSpace(host) - if host != "" { - a.Host = host - } - token = strings.TrimSpace(token) - if token != "" { - a.Token = token - } - } - - svc.Arr.AddOrUpdate(a) - ctx := context.WithValue(r.Context(), "arr", a) - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} - -func HashesCtx(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - _hashes := chi.URLParam(r, "hashes") - var hashes []string - if _hashes != "" { - hashes = strings.Split(_hashes, "|") - } - if hashes == nil { - // Get hashes from form - _ = r.ParseForm() - hashes = r.Form["hashes"] - } - for i, hash := range hashes { - hashes[i] = strings.TrimSpace(hash) - } - ctx := context.WithValue(r.Context(), "hashes", hashes) - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} - func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - _arr := ctx.Value("arr").(*arr.Arr) + _arr := getArr(ctx) if _arr == nil { // No arr _, _ = w.Write([]byte("Ok.")) @@ -122,7 +31,7 @@ func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) { } func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) { - preferences := NewAppPreferences() + preferences := getAppPreferences() preferences.WebUiUsername = q.Username preferences.SavePath = q.DownloadFolder @@ -150,10 +59,10 @@ func (q *QBit) handleShutdown(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) { //log all url params ctx := r.Context() - category := ctx.Value("category").(string) + category := getCategory(ctx) filter := strings.Trim(r.URL.Query().Get("filter"), "") - hashes, _ := ctx.Value("hashes").([]string) - torrents := q.Storage.GetAllSorted(category, filter, hashes, "added_on", false) + hashes := getHashes(ctx) + torrents := q.storage.GetAllSorted(category, filter, hashes, "added_on", false) request.JSONResponse(w, torrents, http.StatusOK) } @@ -180,9 +89,13 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { } isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true" + debridName := r.FormValue("debrid") category := r.FormValue("category") + _arr := getArr(ctx) + if _arr == nil { + _arr = arr.New(category, "", "", false, false, nil) + } atleastOne := false - ctx = context.WithValue(ctx, "isSymlink", isSymlink) // Handle magnet URLs if urls := r.FormValue("urls"); urls != "" { @@ -191,7 +104,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { urlList = append(urlList, strings.TrimSpace(u)) } for _, url := range urlList { - if err := q.AddMagnet(ctx, url, category); err != nil { + if err := q.addMagnet(ctx, url, _arr, debridName, isSymlink); err != nil { q.logger.Info().Msgf("Error adding magnet: %v", err) http.Error(w, err.Error(), http.StatusBadRequest) return @@ -204,7 +117,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { if r.MultipartForm != nil && r.MultipartForm.File != nil { if files := r.MultipartForm.File["torrents"]; len(files) > 0 { for _, fileHeader := range files { - if err := q.AddTorrent(ctx, fileHeader, category); err != nil { + if err := q.addTorrent(ctx, fileHeader, _arr, debridName, isSymlink); err != nil { q.logger.Info().Msgf("Error adding torrent: %v", err) http.Error(w, err.Error(), http.StatusBadRequest) return @@ -224,14 +137,14 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) + hashes := getHashes(ctx) if len(hashes) == 0 { http.Error(w, "No hashes provided", http.StatusBadRequest) return } - category := ctx.Value("category").(string) + category := getCategory(ctx) for _, hash := range hashes { - q.Storage.Delete(hash, category, false) + q.storage.Delete(hash, category, false) } w.WriteHeader(http.StatusOK) @@ -239,10 +152,10 @@ func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) - category := ctx.Value("category").(string) + hashes := getHashes(ctx) + category := getCategory(ctx) for _, hash := range hashes { - torrent := q.Storage.Get(hash, category) + torrent := q.storage.Get(hash, category) if torrent == nil { continue } @@ -254,10 +167,10 @@ func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) - category := ctx.Value("category").(string) + hashes := getHashes(ctx) + category := getCategory(ctx) for _, hash := range hashes { - torrent := q.Storage.Get(hash, category) + torrent := q.storage.Get(hash, category) if torrent == nil { continue } @@ -269,10 +182,10 @@ func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) - category := ctx.Value("category").(string) + hashes := getHashes(ctx) + category := getCategory(ctx) for _, hash := range hashes { - torrent := q.Storage.Get(hash, category) + torrent := q.storage.Get(hash, category) if torrent == nil { continue } @@ -315,7 +228,7 @@ func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) { ctx := r.Context() hash := r.URL.Query().Get("hash") - torrent := q.Storage.Get(hash, ctx.Value("category").(string)) + torrent := q.storage.Get(hash, getCategory(ctx)) properties := q.GetTorrentProperties(torrent) request.JSONResponse(w, properties, http.StatusOK) @@ -324,22 +237,22 @@ func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) { ctx := r.Context() hash := r.URL.Query().Get("hash") - torrent := q.Storage.Get(hash, ctx.Value("category").(string)) + torrent := q.storage.Get(hash, getCategory(ctx)) if torrent == nil { return } - files := q.GetTorrentFiles(torrent) + files := q.getTorrentFiles(torrent) request.JSONResponse(w, files, http.StatusOK) } func (q *QBit) handleSetCategory(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - category := ctx.Value("category").(string) - hashes, _ := ctx.Value("hashes").([]string) - torrents := q.Storage.GetAll("", "", hashes) + category := getCategory(ctx) + hashes := getHashes(ctx) + torrents := q.storage.GetAll("", "", hashes) for _, torrent := range torrents { torrent.Category = category - q.Storage.AddOrUpdate(torrent) + q.storage.AddOrUpdate(torrent) } request.JSONResponse(w, nil, http.StatusOK) } @@ -351,33 +264,33 @@ func (q *QBit) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) { return } ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) + hashes := getHashes(ctx) tags := strings.Split(r.FormValue("tags"), ",") for i, tag := range tags { tags[i] = strings.TrimSpace(tag) } - torrents := q.Storage.GetAll("", "", hashes) + torrents := q.storage.GetAll("", "", hashes) for _, t := range torrents { - q.SetTorrentTags(t, tags) + q.setTorrentTags(t, tags) } request.JSONResponse(w, nil, http.StatusOK) } -func (q *QBit) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) { +func (q *QBit) handleremoveTorrentTags(w http.ResponseWriter, r *http.Request) { err := r.ParseForm() if err != nil { http.Error(w, "Failed to parse form data", http.StatusBadRequest) return } ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) + hashes := getHashes(ctx) tags := strings.Split(r.FormValue("tags"), ",") for i, tag := range tags { tags[i] = strings.TrimSpace(tag) } - torrents := q.Storage.GetAll("", "", hashes) + torrents := q.storage.GetAll("", "", hashes) for _, torrent := range torrents { - q.RemoveTorrentTags(torrent, tags) + q.removeTorrentTags(torrent, tags) } request.JSONResponse(w, nil, http.StatusOK) @@ -397,6 +310,6 @@ func (q *QBit) handleCreateTags(w http.ResponseWriter, r *http.Request) { for i, tag := range tags { tags[i] = strings.TrimSpace(tag) } - q.AddTags(tags) + q.addTags(tags) request.JSONResponse(w, nil, http.StatusOK) } diff --git a/pkg/qbit/import.go b/pkg/qbit/import.go deleted file mode 100644 index 438b141..0000000 --- a/pkg/qbit/import.go +++ /dev/null @@ -1,80 +0,0 @@ -package qbit - -import ( - "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" - "github.com/sirrobot01/decypharr/pkg/service" - "time" - - "github.com/google/uuid" - "github.com/sirrobot01/decypharr/pkg/arr" -) - -type ImportRequest struct { - ID string `json:"id"` - Path string `json:"path"` - Magnet *utils.Magnet `json:"magnet"` - Arr *arr.Arr `json:"arr"` - IsSymlink bool `json:"isSymlink"` - SeriesId int `json:"series"` - Seasons []int `json:"seasons"` - Episodes []string `json:"episodes"` - DownloadUncached bool `json:"downloadUncached"` - - Failed bool `json:"failed"` - FailedAt time.Time `json:"failedAt"` - Reason string `json:"reason"` - Completed bool `json:"completed"` - CompletedAt time.Time `json:"completedAt"` - Async bool `json:"async"` -} - -type ManualImportResponseSchema struct { - Priority string `json:"priority"` - Status string `json:"status"` - Result string `json:"result"` - Queued time.Time `json:"queued"` - Trigger string `json:"trigger"` - SendUpdatesToClient bool `json:"sendUpdatesToClient"` - UpdateScheduledTask bool `json:"updateScheduledTask"` - Id int `json:"id"` -} - -func NewImportRequest(magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool) *ImportRequest { - return &ImportRequest{ - ID: uuid.NewString(), - Magnet: magnet, - Arr: arr, - Failed: false, - Completed: false, - Async: false, - IsSymlink: isSymlink, - DownloadUncached: downloadUncached, - } -} - -func (i *ImportRequest) Fail(reason string) { - i.Failed = true - i.FailedAt = time.Now() - i.Reason = reason -} - -func (i *ImportRequest) Complete() { - i.Completed = true - i.CompletedAt = time.Now() -} - -func (i *ImportRequest) Process(q *QBit) (err error) { - // Use this for now. - // This sends the torrent to the arr - svc := service.GetService() - torrent := createTorrentFromMagnet(i.Magnet, i.Arr.Name, "manual") - debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, i.Magnet, i.Arr, i.IsSymlink, i.DownloadUncached) - if err != nil { - return err - } - torrent = q.UpdateTorrentMin(torrent, debridTorrent) - q.Storage.AddOrUpdate(torrent) - go q.ProcessFiles(torrent, debridTorrent, i.Arr, i.IsSymlink) - return nil -} diff --git a/pkg/qbit/qbit.go b/pkg/qbit/qbit.go index 732d411..04c92a4 100644 --- a/pkg/qbit/qbit.go +++ b/pkg/qbit/qbit.go @@ -1,52 +1,38 @@ package qbit import ( - "cmp" "github.com/rs/zerolog" "github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/logger" - "os" - "path/filepath" + "github.com/sirrobot01/decypharr/pkg/store" ) type QBit struct { - Username string `json:"username"` - Password string `json:"password"` - Port string `json:"port"` - DownloadFolder string `json:"download_folder"` - Categories []string `json:"categories"` - Storage *TorrentStorage - logger zerolog.Logger - Tags []string - RefreshInterval int - SkipPreCache bool - - downloadSemaphore chan struct{} + Username string + Password string + DownloadFolder string + Categories []string + storage *store.TorrentStorage + logger zerolog.Logger + Tags []string } func New() *QBit { _cfg := config.Get() cfg := _cfg.QBitTorrent - port := cmp.Or(_cfg.Port, os.Getenv("QBIT_PORT"), "8282") - refreshInterval := cmp.Or(cfg.RefreshInterval, 10) return &QBit{ - Username: cfg.Username, - Password: cfg.Password, - Port: port, - DownloadFolder: cfg.DownloadFolder, - Categories: cfg.Categories, - Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")), - logger: logger.New("qbit"), - RefreshInterval: refreshInterval, - SkipPreCache: cfg.SkipPreCache, - downloadSemaphore: make(chan struct{}, cmp.Or(cfg.MaxDownloads, 5)), + Username: cfg.Username, + Password: cfg.Password, + DownloadFolder: cfg.DownloadFolder, + Categories: cfg.Categories, + storage: store.GetStore().GetTorrentStorage(), + logger: logger.New("qbit"), } } func (q *QBit) Reset() { - if q.Storage != nil { - q.Storage.Reset() + if q.storage != nil { + q.storage.Reset() } q.Tags = nil - close(q.downloadSemaphore) } diff --git a/pkg/qbit/routes.go b/pkg/qbit/routes.go index ca6f29a..9881247 100644 --- a/pkg/qbit/routes.go +++ b/pkg/qbit/routes.go @@ -7,12 +7,12 @@ import ( func (q *QBit) Routes() http.Handler { r := chi.NewRouter() - r.Use(q.CategoryContext) + r.Use(q.categoryContext) r.Group(func(r chi.Router) { r.Use(q.authContext) r.Post("/auth/login", q.handleLogin) r.Route("/torrents", func(r chi.Router) { - r.Use(HashesCtx) + r.Use(hashesContext) r.Get("/info", q.handleTorrentsInfo) r.Post("/add", q.handleTorrentsAdd) r.Post("/delete", q.handleTorrentsDelete) @@ -20,7 +20,7 @@ func (q *QBit) Routes() http.Handler { r.Post("/createCategory", q.handleCreateCategory) r.Post("/setCategory", q.handleSetCategory) r.Post("/addTags", q.handleAddTorrentTags) - r.Post("/removeTags", q.handleRemoveTorrentTags) + r.Post("/removeTags", q.handleremoveTorrentTags) r.Post("/createTags", q.handleCreateTags) r.Get("/tags", q.handleGetTags) r.Get("/pause", q.handleTorrentsPause) diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index 5b61e39..5e9ae77 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -1,38 +1,35 @@ package qbit import ( - "cmp" "context" "fmt" - "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" - debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/store" "io" "mime/multipart" - "os" - "path/filepath" "strings" "time" ) -// All torrent related helpers goes here - -func (q *QBit) AddMagnet(ctx context.Context, url, category string) error { +// All torrent-related helpers goes here +func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid string, isSymlink bool) error { magnet, err := utils.GetMagnetFromUrl(url) if err != nil { return fmt.Errorf("error parsing magnet link: %w", err) } - err = q.Process(ctx, magnet, category) + _store := store.GetStore() + + importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, isSymlink, false, "", store.ImportTypeQBitTorrent) + + err = _store.AddTorrent(ctx, importReq) if err != nil { return fmt.Errorf("failed to process torrent: %w", err) } return nil } -func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error { +func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, arr *arr.Arr, debrid string, isSymlink bool) error { file, _ := fileHeader.Open() defer file.Close() var reader io.Reader = file @@ -40,226 +37,28 @@ func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, if err != nil { return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err) } - err = q.Process(ctx, magnet, category) + _store := store.GetStore() + importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, isSymlink, false, "", store.ImportTypeQBitTorrent) + err = _store.AddTorrent(ctx, importReq) if err != nil { return fmt.Errorf("failed to process torrent: %w", err) } return nil } -func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error { - svc := service.GetService() - torrent := createTorrentFromMagnet(magnet, category, "auto") - a, ok := ctx.Value("arr").(*arr.Arr) - if !ok { - return fmt.Errorf("arr not found in context") - } - isSymlink := ctx.Value("isSymlink").(bool) - debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, magnet, a, isSymlink, false) - if err != nil || debridTorrent == nil { - if err == nil { - err = fmt.Errorf("failed to process torrent") - } - return err - } - torrent = q.UpdateTorrentMin(torrent, debridTorrent) - q.Storage.AddOrUpdate(torrent) - go q.ProcessFiles(torrent, debridTorrent, a, isSymlink) // We can send async for file processing not to delay the response - return nil -} - -func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debridTypes.Torrent, arr *arr.Arr, isSymlink bool) { - svc := service.GetService() - client := svc.Debrid.GetClient(debridTorrent.Debrid) - downloadingStatuses := client.GetDownloadingStatus() - for debridTorrent.Status != "downloaded" { - q.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress) - dbT, err := client.CheckStatus(debridTorrent, isSymlink) - if err != nil { - if dbT != nil && dbT.Id != "" { - // Delete the torrent if it was not downloaded - go func() { - _ = client.DeleteTorrent(dbT.Id) - }() - } - q.logger.Error().Msgf("Error checking status: %v", err) - q.MarkAsFailed(torrent) - go func() { - if err := arr.Refresh(); err != nil { - q.logger.Error().Msgf("Error refreshing arr: %v", err) - } - }() - return - } - - debridTorrent = dbT - torrent = q.UpdateTorrentMin(torrent, debridTorrent) - - // Exit the loop for downloading statuses to prevent memory buildup - if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) { - break - } - if !utils.Contains(client.GetDownloadingStatus(), debridTorrent.Status) { - break - } - time.Sleep(time.Duration(q.RefreshInterval) * time.Second) - } - var torrentSymlinkPath string - var err error - debridTorrent.Arr = arr - - // Check if debrid supports webdav by checking cache - timer := time.Now() - if isSymlink { - cache, useWebdav := svc.Debrid.Caches[debridTorrent.Debrid] - if useWebdav { - q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid) - - // Use webdav to download the file - - if err := cache.AddTorrent(debridTorrent); err != nil { - q.logger.Error().Msgf("Error adding torrent to cache: %v", err) - q.MarkAsFailed(torrent) - return - } - - rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow - torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name) - torrentSymlinkPath, err = q.createSymlinksWebdav(debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/ - - } else { - // User is using either zurg or debrid webdav - torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/ - } - } else { - torrentSymlinkPath, err = q.ProcessManualFile(torrent) - } - if err != nil { - q.MarkAsFailed(torrent) - go func() { - _ = client.DeleteTorrent(debridTorrent.Id) - }() - q.logger.Info().Msgf("Error: %v", err) - return - } - torrent.TorrentPath = torrentSymlinkPath - q.UpdateTorrent(torrent, debridTorrent) - q.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer)) - go func() { - if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil { - q.logger.Error().Msgf("Error sending discord message: %v", err) - } - }() - if err := arr.Refresh(); err != nil { - q.logger.Error().Msgf("Error refreshing arr: %v", err) - } -} - -func (q *QBit) MarkAsFailed(t *Torrent) *Torrent { - t.State = "error" - q.Storage.AddOrUpdate(t) - go func() { - if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil { - q.logger.Error().Msgf("Error sending discord message: %v", err) - } - }() - return t -} - -func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent { - if debridTorrent == nil { - return t - } - - addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added) - if err != nil { - addedOn = time.Now() - } - totalSize := debridTorrent.Bytes - progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0 - sizeCompleted := int64(float64(totalSize) * progress) - - var speed int64 - if debridTorrent.Speed != 0 { - speed = debridTorrent.Speed - } - var eta int - if speed != 0 { - eta = int((totalSize - sizeCompleted) / speed) - } - t.ID = debridTorrent.Id - t.Name = debridTorrent.Name - t.AddedOn = addedOn.Unix() - t.DebridTorrent = debridTorrent - t.Debrid = debridTorrent.Debrid - t.Size = totalSize - t.Completed = sizeCompleted - t.Downloaded = sizeCompleted - t.DownloadedSession = sizeCompleted - t.Uploaded = sizeCompleted - t.UploadedSession = sizeCompleted - t.AmountLeft = totalSize - sizeCompleted - t.Progress = progress - t.Eta = eta - t.Dlspeed = speed - t.Upspeed = speed - t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator) - t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator) - return t -} - -func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent { - if debridTorrent == nil { - return t - } - - if debridClient := service.GetDebrid().GetClient(debridTorrent.Debrid); debridClient != nil { - if debridTorrent.Status != "downloaded" { - _ = debridClient.UpdateTorrent(debridTorrent) - } - } - t = q.UpdateTorrentMin(t, debridTorrent) - t.ContentPath = t.TorrentPath + string(os.PathSeparator) - - if t.IsReady() { - t.State = "pausedUP" - q.Storage.Update(t) - return t - } - - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if t.IsReady() { - t.State = "pausedUP" - q.Storage.Update(t) - return t - } - updatedT := q.UpdateTorrent(t, debridTorrent) - t = updatedT - - case <-time.After(10 * time.Minute): // Add a timeout - return t - } - } -} - -func (q *QBit) ResumeTorrent(t *Torrent) bool { +func (q *QBit) ResumeTorrent(t *store.Torrent) bool { return true } -func (q *QBit) PauseTorrent(t *Torrent) bool { +func (q *QBit) PauseTorrent(t *store.Torrent) bool { return true } -func (q *QBit) RefreshTorrent(t *Torrent) bool { +func (q *QBit) RefreshTorrent(t *store.Torrent) bool { return true } -func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties { +func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties { return &TorrentProperties{ AdditionDate: t.AddedOn, Comment: "Debrid Blackhole ", @@ -284,7 +83,7 @@ func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties { } } -func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile { +func (q *QBit) getTorrentFiles(t *store.Torrent) []*TorrentFile { files := make([]*TorrentFile, 0) if t.DebridTorrent == nil { return files @@ -298,7 +97,7 @@ func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile { return files } -func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool { +func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool { torrentTags := strings.Split(t.Tags, ",") for _, tag := range tags { if tag == "" { @@ -312,20 +111,20 @@ func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool { } } t.Tags = strings.Join(torrentTags, ",") - q.Storage.Update(t) + q.storage.Update(t) return true } -func (q *QBit) RemoveTorrentTags(t *Torrent, tags []string) bool { +func (q *QBit) removeTorrentTags(t *store.Torrent, tags []string) bool { torrentTags := strings.Split(t.Tags, ",") newTorrentTags := utils.RemoveItem(torrentTags, tags...) q.Tags = utils.RemoveItem(q.Tags, tags...) t.Tags = strings.Join(newTorrentTags, ",") - q.Storage.Update(t) + q.storage.Update(t) return true } -func (q *QBit) AddTags(tags []string) bool { +func (q *QBit) addTags(tags []string) bool { for _, tag := range tags { if tag == "" { continue @@ -337,7 +136,7 @@ func (q *QBit) AddTags(tags []string) bool { return true } -func (q *QBit) RemoveTags(tags []string) bool { +func (q *QBit) removeTags(tags []string) bool { q.Tags = utils.RemoveItem(q.Tags, tags...) return true } diff --git a/pkg/qbit/types.go b/pkg/qbit/types.go index 10179ee..719e75a 100644 --- a/pkg/qbit/types.go +++ b/pkg/qbit/types.go @@ -1,11 +1,5 @@ package qbit -import ( - "fmt" - "github.com/sirrobot01/decypharr/pkg/debrid/types" - "sync" -) - type BuildInfo struct { Libtorrent string `json:"libtorrent"` Bitness int `json:"bitness"` @@ -172,76 +166,6 @@ type TorrentCategory struct { SavePath string `json:"savePath"` } -type Torrent struct { - ID string `json:"id"` - DebridTorrent *types.Torrent `json:"-"` - Debrid string `json:"debrid"` - TorrentPath string `json:"-"` - - AddedOn int64 `json:"added_on,omitempty"` - AmountLeft int64 `json:"amount_left"` - AutoTmm bool `json:"auto_tmm"` - Availability float64 `json:"availability,omitempty"` - Category string `json:"category,omitempty"` - Completed int64 `json:"completed"` - CompletionOn int `json:"completion_on,omitempty"` - ContentPath string `json:"content_path"` - DlLimit int `json:"dl_limit"` - Dlspeed int64 `json:"dlspeed"` - Downloaded int64 `json:"downloaded"` - DownloadedSession int64 `json:"downloaded_session"` - Eta int `json:"eta"` - FlPiecePrio bool `json:"f_l_piece_prio,omitempty"` - ForceStart bool `json:"force_start,omitempty"` - Hash string `json:"hash"` - LastActivity int64 `json:"last_activity,omitempty"` - MagnetUri string `json:"magnet_uri,omitempty"` - MaxRatio int `json:"max_ratio,omitempty"` - MaxSeedingTime int `json:"max_seeding_time,omitempty"` - Name string `json:"name,omitempty"` - NumComplete int `json:"num_complete,omitempty"` - NumIncomplete int `json:"num_incomplete,omitempty"` - NumLeechs int `json:"num_leechs,omitempty"` - NumSeeds int `json:"num_seeds,omitempty"` - Priority int `json:"priority,omitempty"` - Progress float64 `json:"progress"` - Ratio int `json:"ratio,omitempty"` - RatioLimit int `json:"ratio_limit,omitempty"` - SavePath string `json:"save_path"` - SeedingTimeLimit int `json:"seeding_time_limit,omitempty"` - SeenComplete int64 `json:"seen_complete,omitempty"` - SeqDl bool `json:"seq_dl"` - Size int64 `json:"size,omitempty"` - State string `json:"state,omitempty"` - SuperSeeding bool `json:"super_seeding"` - Tags string `json:"tags,omitempty"` - TimeActive int `json:"time_active,omitempty"` - TotalSize int64 `json:"total_size,omitempty"` - Tracker string `json:"tracker,omitempty"` - UpLimit int64 `json:"up_limit,omitempty"` - Uploaded int64 `json:"uploaded,omitempty"` - UploadedSession int64 `json:"uploaded_session,omitempty"` - Upspeed int64 `json:"upspeed,omitempty"` - Source string `json:"source,omitempty"` - - Mu sync.Mutex `json:"-"` -} - -func (t *Torrent) IsReady() bool { - return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != "" -} - -func (t *Torrent) discordContext() string { - format := ` - **Name:** %s - **Arr:** %s - **Hash:** %s - **MagnetURI:** %s - **Debrid:** %s - ` - return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid) -} - type TorrentProperties struct { AdditionDate int64 `json:"addition_date,omitempty"` Comment string `json:"comment,omitempty"` @@ -289,7 +213,7 @@ type TorrentFile struct { Availability float64 `json:"availability,omitempty"` } -func NewAppPreferences() *AppPreferences { +func getAppPreferences() *AppPreferences { preferences := &AppPreferences{ AddTrackers: "", AddTrackersEnabled: false, diff --git a/pkg/repair/misc.go b/pkg/repair/misc.go index 0cce790..f2d2b64 100644 --- a/pkg/repair/misc.go +++ b/pkg/repair/misc.go @@ -3,6 +3,8 @@ package repair import ( "fmt" "github.com/sirrobot01/decypharr/pkg/arr" + "github.com/sirrobot01/decypharr/pkg/debrid/store" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "os" "path/filepath" ) @@ -82,3 +84,96 @@ func collectFiles(media arr.Content) map[string][]arr.ContentFile { } return uniqueParents } + +func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]types.Client, caches map[string]*store.Cache) []arr.ContentFile { + brokenFiles := make([]arr.ContentFile, 0) + + r.logger.Debug().Msgf("Checking %s", torrentPath) + + // Get the debrid client + dir := filepath.Dir(torrentPath) + debridName := r.findDebridForPath(dir, clients) + if debridName == "" { + r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath) + return files // Return all files as broken if no debrid found + } + + cache, ok := caches[debridName] + if !ok { + r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName) + return files // Return all files as broken if no cache found + } + + // Check if torrent exists + torrentName := filepath.Clean(filepath.Base(torrentPath)) + torrent := cache.GetTorrentByName(torrentName) + if torrent == nil { + r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName) + return files // Return all files as broken if torrent not found + } + + // Batch check files + filePaths := make([]string, len(files)) + for i, file := range files { + filePaths[i] = file.TargetPath + } + + brokenFilePaths := cache.GetBrokenFiles(torrent, filePaths) + if len(brokenFilePaths) > 0 { + r.logger.Debug().Msgf("%d broken files found in %s", len(brokenFilePaths), torrentName) + + // Create a set for O(1) lookup + brokenSet := make(map[string]bool, len(brokenFilePaths)) + for _, brokenPath := range brokenFilePaths { + brokenSet[brokenPath] = true + } + + // Filter broken files + for _, contentFile := range files { + if brokenSet[contentFile.TargetPath] { + brokenFiles = append(brokenFiles, contentFile) + } + } + } + + return brokenFiles +} + +func (r *Repair) findDebridForPath(dir string, clients map[string]types.Client) string { + // Check cache first + r.cacheMutex.RLock() + if r.debridPathCache == nil { + r.debridPathCache = make(map[string]string) + } + if debridName, exists := r.debridPathCache[dir]; exists { + r.cacheMutex.RUnlock() + return debridName + } + r.cacheMutex.RUnlock() + + // Find debrid client + for _, client := range clients { + mountPath := client.GetMountPath() + if mountPath == "" { + continue + } + + if filepath.Clean(mountPath) == filepath.Clean(dir) { + debridName := client.GetName() + + // Cache the result + r.cacheMutex.Lock() + r.debridPathCache[dir] = debridName + r.cacheMutex.Unlock() + + return debridName + } + } + + // Cache empty result to avoid repeated lookups + r.cacheMutex.Lock() + r.debridPathCache[dir] = "" + r.cacheMutex.Unlock() + + return "" +} diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 4cc96ec..7a50a54 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -3,6 +3,7 @@ package repair import ( "context" "encoding/json" + "errors" "fmt" "github.com/go-co-op/gocron/v2" "github.com/google/uuid" @@ -12,7 +13,7 @@ import ( "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" + "github.com/sirrobot01/decypharr/pkg/debrid" "golang.org/x/sync/errgroup" "net" "net/http" @@ -29,7 +30,7 @@ import ( type Repair struct { Jobs map[string]*Job arrs *arr.Storage - deb *debrid.Engine + deb *debrid.Storage interval string runOnStart bool ZurgURL string @@ -40,7 +41,10 @@ type Repair struct { filename string workers int scheduler gocron.Scheduler - ctx context.Context + + debridPathCache map[string]string // Cache for path -> debrid name mapping + cacheMutex sync.RWMutex + ctx context.Context } type JobStatus string @@ -51,6 +55,7 @@ const ( JobFailed JobStatus = "failed" JobCompleted JobStatus = "completed" JobProcessing JobStatus = "processing" + JobCancelled JobStatus = "cancelled" ) type Job struct { @@ -66,9 +71,12 @@ type Job struct { Recurrent bool `json:"recurrent"` Error string `json:"error"` + + cancelFunc context.CancelFunc + ctx context.Context } -func New(arrs *arr.Storage, engine *debrid.Engine) *Repair { +func New(arrs *arr.Storage, engine *debrid.Storage) *Repair { cfg := config.Get() workers := runtime.NumCPU() * 20 if cfg.Repair.Workers > 0 { @@ -220,7 +228,8 @@ func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job { func (r *Repair) preRunChecks() error { if r.useWebdav { - if len(r.deb.Caches) == 0 { + caches := r.deb.GetCaches() + if len(caches) == 0 { return fmt.Errorf("no caches found") } return nil @@ -254,21 +263,59 @@ func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess, recu job.AutoProcess = autoProcess job.Recurrent = recurrent r.reset(job) + + job.ctx, job.cancelFunc = context.WithCancel(r.ctx) r.Jobs[key] = job go r.saveToFile() go func() { if err := r.repair(job); err != nil { r.logger.Error().Err(err).Msg("Error running repair") - r.logger.Error().Err(err).Msg("Error running repair") - job.FailedAt = time.Now() - job.Error = err.Error() - job.Status = JobFailed - job.CompletedAt = time.Now() + if !errors.Is(job.ctx.Err(), context.Canceled) { + job.FailedAt = time.Now() + job.Error = err.Error() + job.Status = JobFailed + job.CompletedAt = time.Now() + } else { + job.FailedAt = time.Now() + job.Error = err.Error() + job.Status = JobFailed + job.CompletedAt = time.Now() + } } }() return nil } +func (r *Repair) StopJob(id string) error { + job := r.GetJob(id) + if job == nil { + return fmt.Errorf("job %s not found", id) + } + + // Check if job can be stopped + if job.Status != JobStarted && job.Status != JobProcessing { + return fmt.Errorf("job %s cannot be stopped (status: %s)", id, job.Status) + } + + // Cancel the job + if job.cancelFunc != nil { + job.cancelFunc() + r.logger.Info().Msgf("Job %s cancellation requested", id) + go func() { + if job.Status == JobStarted || job.Status == JobProcessing { + job.Status = JobCancelled + job.CompletedAt = time.Now() + job.Error = "Job was cancelled by user" + r.saveToFile() + } + }() + + return nil + } + + return fmt.Errorf("job %s cannot be cancelled", id) +} + func (r *Repair) repair(job *Job) error { defer r.saveToFile() if err := r.preRunChecks(); err != nil { @@ -278,7 +325,7 @@ func (r *Repair) repair(job *Job) error { // Use a mutex to protect concurrent access to brokenItems var mu sync.Mutex brokenItems := map[string][]arr.ContentFile{} - g, ctx := errgroup.WithContext(r.ctx) + g, ctx := errgroup.WithContext(job.ctx) for _, a := range job.Arrs { a := a // Capture range variable @@ -321,6 +368,14 @@ func (r *Repair) repair(job *Job) error { // Wait for all goroutines to complete and check for errors if err := g.Wait(); err != nil { + // Check if j0b was canceled + if errors.Is(ctx.Err(), context.Canceled) { + job.Status = JobCancelled + job.CompletedAt = time.Now() + job.Error = "Job was cancelled" + return fmt.Errorf("job cancelled") + } + job.FailedAt = time.Now() job.Error = err.Error() job.Status = JobFailed @@ -367,7 +422,7 @@ func (r *Repair) repair(job *Job) error { return nil } -func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) { +func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) { brokenItems := make([]arr.ContentFile, 0) a := r.arrs.Get(_arr) @@ -384,7 +439,7 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil return brokenItems, nil } // Check first media to confirm mounts are accessible - if !r.isMediaAccessible(media[0]) { + if !r.isMediaAccessible(media) { r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts") return brokenItems, nil } @@ -400,14 +455,14 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil defer wg.Done() for m := range workerChan { select { - case <-r.ctx.Done(): + case <-job.ctx.Done(): return default: } - items := r.getBrokenFiles(m) + items := r.getBrokenFiles(job, m) if items != nil { r.logger.Debug().Msgf("Found %d broken files for %s", len(items), m.Title) - if j.AutoProcess { + if job.AutoProcess { r.logger.Info().Msgf("Auto processing %d broken items for %s", len(items), m.Title) // Delete broken items @@ -431,7 +486,7 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil for _, m := range media { select { - case <-r.ctx.Done(): + case <-job.ctx.Done(): break default: workerChan <- m @@ -449,43 +504,49 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil return brokenItems, nil } -func (r *Repair) isMediaAccessible(m arr.Content) bool { - files := m.Files +// isMediaAccessible checks if the mounts are accessible +func (r *Repair) isMediaAccessible(media []arr.Content) bool { + firstMedia := media[0] + for _, m := range media { + if len(m.Files) > 0 { + firstMedia = m + break + } + } + files := firstMedia.Files if len(files) == 0 { return false } firstFile := files[0] - r.logger.Debug().Msgf("Checking parent directory for %s", firstFile.Path) - //if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) { - // r.logger.Debug().Msgf("Parent directory not accessible for %s", firstFile.Path) - // return false - //} - // Check symlink parent directory symlinkPath := getSymlinkTarget(firstFile.Path) r.logger.Debug().Msgf("Checking symlink parent directory for %s", symlinkPath) + parentSymlink := "" if symlinkPath != "" { - parentSymlink := filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents + parentSymlink = filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents + } + if parentSymlink != "" { if _, err := os.Stat(parentSymlink); os.IsNotExist(err) { return false } + return true } - return true + return false } -func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile { +func (r *Repair) getBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { if r.useWebdav { - return r.getWebdavBrokenFiles(media) + return r.getWebdavBrokenFiles(job, media) } else if r.IsZurg { - return r.getZurgBrokenFiles(media) + return r.getZurgBrokenFiles(job, media) } else { - return r.getFileBrokenFiles(media) + return r.getFileBrokenFiles(job, media) } } -func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile { +func (r *Repair) getFileBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { // This checks symlink target, try to get read a tiny bit of the file brokenFiles := make([]arr.ContentFile, 0) @@ -510,7 +571,7 @@ func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile { return brokenFiles } -func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { +func (r *Repair) getZurgBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { // Use zurg setup to check file availability with zurg // This reduces bandwidth usage significantly @@ -550,12 +611,17 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { } if resp.StatusCode < 200 || resp.StatusCode >= 300 { r.logger.Debug().Msgf("Failed to get download url for %s", fullURL) - resp.Body.Close() + if err := resp.Body.Close(); err != nil { + return nil + } brokenFiles = append(brokenFiles, file) continue } downloadUrl := resp.Request.URL.String() - resp.Body.Close() + + if err := resp.Body.Close(); err != nil { + return nil + } if downloadUrl != "" { r.logger.Trace().Msgf("Found download url: %s", downloadUrl) } else { @@ -573,16 +639,16 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { return brokenFiles } -func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile { +func (r *Repair) getWebdavBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { // Use internal webdav setup to check file availability - caches := r.deb.Caches + caches := r.deb.GetCaches() if len(caches) == 0 { r.logger.Info().Msg("No caches found. Can't use webdav") return nil } - clients := r.deb.Clients + clients := r.deb.GetClients() if len(clients) == 0 { r.logger.Info().Msg("No clients found. Can't use webdav") return nil @@ -590,58 +656,36 @@ func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile { brokenFiles := make([]arr.ContentFile, 0) uniqueParents := collectFiles(media) - for torrentPath, f := range uniqueParents { - r.logger.Debug().Msgf("Checking %s", torrentPath) - // Get the debrid first - dir := filepath.Dir(torrentPath) - debridName := "" - for _, client := range clients { - mountPath := client.GetMountPath() - if mountPath == "" { - continue + var brokenFilesMutex sync.Mutex + var wg sync.WaitGroup + + // Limit concurrent torrent checks + semaphore := make(chan struct{}, min(len(uniqueParents), 30)) // Limit to 5 concurrent checks + for torrentPath, files := range uniqueParents { + wg.Add(1) + go func(torrentPath string, files []arr.ContentFile) { + defer wg.Done() + semaphore <- struct{}{} // Acquire + defer func() { <-semaphore }() // Release + + select { + case <-job.ctx.Done(): + return + default: } - if filepath.Clean(mountPath) == filepath.Clean(dir) { - debridName = client.GetName() - break - } - } - if debridName == "" { - r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath) - continue - } - cache, ok := caches[debridName] - if !ok { - r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName) - continue - } - // Check if torrent exists - torrentName := filepath.Clean(filepath.Base(torrentPath)) - torrent := cache.GetTorrentByName(torrentName) - if torrent == nil { - r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName) - brokenFiles = append(brokenFiles, f...) - continue - } - files := make([]string, 0) - for _, file := range f { - files = append(files, file.TargetPath) - } + brokenFilesForTorrent := r.checkTorrentFiles(torrentPath, files, clients, caches) - _brokenFiles := cache.GetBrokenFiles(torrent, files) - totalBrokenFiles := len(_brokenFiles) - if totalBrokenFiles > 0 { - r.logger.Debug().Msgf("%d broken files found in %s", totalBrokenFiles, torrentName) - for _, contentFile := range f { - if utils.Contains(_brokenFiles, contentFile.TargetPath) { - brokenFiles = append(brokenFiles, contentFile) - } + if len(brokenFilesForTorrent) > 0 { + brokenFilesMutex.Lock() + brokenFiles = append(brokenFiles, brokenFilesForTorrent...) + brokenFilesMutex.Unlock() } - } - + }(torrentPath, files) } + + wg.Wait() if len(brokenFiles) == 0 { - r.logger.Debug().Msgf("No broken files found for %s", media.Title) return nil } r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title) @@ -696,7 +740,11 @@ func (r *Repair) ProcessJob(id string) error { return nil } - g, ctx := errgroup.WithContext(r.ctx) + if job.ctx == nil || job.ctx.Err() != nil { + job.ctx, job.cancelFunc = context.WithCancel(r.ctx) + } + + g, ctx := errgroup.WithContext(job.ctx) g.SetLimit(r.workers) for arrName, items := range brokenItems { diff --git a/pkg/server/debug.go b/pkg/server/debug.go index e7d1246..2b20b9a 100644 --- a/pkg/server/debug.go +++ b/pkg/server/debug.go @@ -5,19 +5,20 @@ import ( "github.com/go-chi/chi/v5" "github.com/sirrobot01/decypharr/internal/request" debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/store" "net/http" "runtime" ) func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) { ingests := make([]debridTypes.IngestData, 0) - svc := service.GetService() - if svc.Debrid == nil { + _store := store.GetStore() + debrids := _store.GetDebrid() + if debrids == nil { http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError) return } - for _, cache := range svc.Debrid.Caches { + for _, cache := range debrids.GetCaches() { if cache == nil { s.logger.Error().Msg("Debrid cache is nil, skipping") continue @@ -41,13 +42,17 @@ func (s *Server) handleIngestsByDebrid(w http.ResponseWriter, r *http.Request) { return } - svc := service.GetService() - if svc.Debrid == nil { + _store := store.GetStore() + debrids := _store.GetDebrid() + + if debrids == nil { http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError) return } - cache, exists := svc.Debrid.Caches[debridName] + caches := debrids.GetCaches() + + cache, exists := caches[debridName] if !exists { http.Error(w, "Debrid cache not found: "+debridName, http.StatusNotFound) return @@ -87,12 +92,13 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) { "go_version": runtime.Version(), } - svc := service.GetService() - if svc.Debrid == nil { + debrids := store.GetStore().GetDebrid() + if debrids == nil { request.JSONResponse(w, stats, http.StatusOK) return } - clients := svc.Debrid.GetDebrids() + clients := debrids.GetClients() + caches := debrids.GetCaches() profiles := make([]*debridTypes.Profile, 0) for debridName, client := range clients { profile, err := client.GetProfile() @@ -101,7 +107,7 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) { s.logger.Error().Err(err).Msg("Failed to get debrid profile") continue } - cache, ok := svc.Debrid.Caches[debridName] + cache, ok := caches[debridName] if ok { // Get torrent data profile.LibrarySize = len(cache.GetTorrents()) diff --git a/pkg/server/webhook.go b/pkg/server/webhook.go index 0977a56..bc81ccb 100644 --- a/pkg/server/webhook.go +++ b/pkg/server/webhook.go @@ -3,7 +3,7 @@ package server import ( "cmp" "encoding/json" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/store" "net/http" ) @@ -38,8 +38,7 @@ func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) { http.Error(w, "Invalid ID", http.StatusBadRequest) return } - svc := service.GetService() - repair := svc.Repair + repair := store.GetStore().GetRepair() mediaId := cmp.Or(payload.TmdbID, payload.TvdbID) diff --git a/pkg/service/service.go b/pkg/service/service.go deleted file mode 100644 index d41a3c2..0000000 --- a/pkg/service/service.go +++ /dev/null @@ -1,47 +0,0 @@ -package service - -import ( - "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" - "github.com/sirrobot01/decypharr/pkg/repair" - "sync" -) - -type Service struct { - Repair *repair.Repair - Arr *arr.Storage - Debrid *debrid.Engine -} - -var ( - instance *Service - once sync.Once -) - -// GetService returns the singleton instance -func GetService() *Service { - once.Do(func() { - arrs := arr.NewStorage() - deb := debrid.NewEngine() - instance = &Service{ - Repair: repair.New(arrs, deb), - Arr: arrs, - Debrid: deb, - } - }) - return instance -} - -func Reset() { - if instance != nil { - if instance.Debrid != nil { - instance.Debrid.Reset() - } - } - once = sync.Once{} - instance = nil -} - -func GetDebrid() *debrid.Engine { - return GetService().Debrid -} diff --git a/pkg/qbit/downloader.go b/pkg/store/downloader.go similarity index 61% rename from pkg/qbit/downloader.go rename to pkg/store/downloader.go index 3321031..29692f3 100644 --- a/pkg/qbit/downloader.go +++ b/pkg/store/downloader.go @@ -1,8 +1,8 @@ -package qbit +package store import ( "fmt" - "io" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "net/http" "os" "path/filepath" @@ -11,7 +11,6 @@ import ( "github.com/cavaliergopher/grab/v3" "github.com/sirrobot01/decypharr/internal/utils" - debrid "github.com/sirrobot01/decypharr/pkg/debrid/types" ) func Download(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error { @@ -57,21 +56,21 @@ Loop: return resp.Err() } -func (q *QBit) ProcessManualFile(torrent *Torrent) (string, error) { +func (s *Store) ProcessManualFile(torrent *Torrent) (string, error) { debridTorrent := torrent.DebridTorrent - q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files)) - torrentPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, utils.RemoveExtension(debridTorrent.OriginalFilename)) + s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files)) + torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename)) torrentPath = utils.RemoveInvalidChars(torrentPath) err := os.MkdirAll(torrentPath, os.ModePerm) if err != nil { - // add previous error to the error and return + // add the previous error to the error and return return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err) } - q.downloadFiles(torrent, torrentPath) + s.downloadFiles(torrent, torrentPath) return torrentPath, nil } -func (q *QBit) downloadFiles(torrent *Torrent, parent string) { +func (s *Store) downloadFiles(torrent *Torrent, parent string) { debridTorrent := torrent.DebridTorrent var wg sync.WaitGroup @@ -79,15 +78,15 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { for _, file := range debridTorrent.GetFiles() { totalSize += file.Size } - debridTorrent.Mu.Lock() + debridTorrent.Lock() debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes debridTorrent.Progress = 0 // Reset progress - debridTorrent.Mu.Unlock() + debridTorrent.Unlock() progressCallback := func(downloaded int64, speed int64) { - debridTorrent.Mu.Lock() - defer debridTorrent.Mu.Unlock() - torrent.Mu.Lock() - defer torrent.Mu.Unlock() + debridTorrent.Lock() + defer debridTorrent.Unlock() + torrent.Lock() + defer torrent.Unlock() // Update total downloaded bytes debridTorrent.SizeDownloaded += downloaded @@ -97,7 +96,7 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { if totalSize > 0 { debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100 } - q.UpdateTorrentMin(torrent, debridTorrent) + s.UpdateTorrentMin(torrent, debridTorrent) } client := &grab.Client{ UserAgent: "Decypharr[QBitTorrent]", @@ -110,14 +109,14 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { errChan := make(chan error, len(debridTorrent.Files)) for _, file := range debridTorrent.GetFiles() { if file.DownloadLink == nil { - q.logger.Info().Msgf("No download link found for %s", file.Name) + s.logger.Info().Msgf("No download link found for %s", file.Name) continue } wg.Add(1) - q.downloadSemaphore <- struct{}{} - go func(file debrid.File) { + s.downloadSemaphore <- struct{}{} + go func(file types.File) { defer wg.Done() - defer func() { <-q.downloadSemaphore }() + defer func() { <-s.downloadSemaphore }() filename := file.Name err := Download( @@ -129,10 +128,10 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { ) if err != nil { - q.logger.Error().Msgf("Failed to download %s: %v", filename, err) + s.logger.Error().Msgf("Failed to download %s: %v", filename, err) errChan <- err } else { - q.logger.Info().Msgf("Downloaded %s", filename) + s.logger.Info().Msgf("Downloaded %s", filename) } }(file) } @@ -146,21 +145,21 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { } } if len(errors) > 0 { - q.logger.Error().Msgf("Errors occurred during download: %v", errors) + s.logger.Error().Msgf("Errors occurred during download: %v", errors) return } - q.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name) + s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name) } -func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { +func (s *Store) ProcessSymlink(torrent *Torrent) (string, error) { debridTorrent := torrent.DebridTorrent files := debridTorrent.Files if len(files) == 0 { return "", fmt.Errorf("no video files found") } - q.logger.Info().Msgf("Checking symlinks for %d files...", len(files)) + s.logger.Info().Msgf("Checking symlinks for %d files...", len(files)) rCloneBase := debridTorrent.MountPath - torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/ + torrentPath, err := s.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/ // This returns filename.ext for alldebrid instead of the parent folder filename/ torrentFolder := torrentPath if err != nil { @@ -173,7 +172,7 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { torrentFolder = utils.RemoveExtension(torrentFolder) torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder } - torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ + torrentSymlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ err = os.MkdirAll(torrentSymlinkPath, os.ModePerm) if err != nil { return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err) @@ -192,10 +191,10 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { return nil }) if err != nil { - q.logger.Warn().Msgf("Error while scanning rclone path: %v", err) + s.logger.Warn().Msgf("Error while scanning rclone path: %v", err) } - pending := make(map[string]debrid.File) + pending := make(map[string]types.File) for _, file := range files { if realRelPath, ok := realPaths[file.Name]; ok { file.Path = realRelPath @@ -216,43 +215,43 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) { fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name) if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) { - q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err) + s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err) } else { filePaths = append(filePaths, fileSymlinkPath) delete(pending, path) - q.logger.Info().Msgf("File is ready: %s", file.Name) + s.logger.Info().Msgf("File is ready: %s", file.Name) } } } case <-timeout: - q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending)) + s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending)) return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(pending)) } } - if q.SkipPreCache { + if s.skipPreCache { return torrentSymlinkPath, nil } go func() { - - if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil { - q.logger.Error().Msgf("Failed to pre-cache file: %s", err) + s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name) + if err := utils.PreCacheFile(filePaths); err != nil { + s.logger.Error().Msgf("Failed to pre-cache file: %s", err) } else { - q.logger.Trace().Msgf("Pre-cached %d files", len(filePaths)) + s.logger.Trace().Msgf("Pre-cached %d files", len(filePaths)) } }() return torrentSymlinkPath, nil } -func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) { +func (s *Store) createSymlinksWebdav(torrent *Torrent, debridTorrent *types.Torrent, rclonePath, torrentFolder string) (string, error) { files := debridTorrent.Files - symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ + symlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ err := os.MkdirAll(symlinkPath, os.ModePerm) if err != nil { return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err) } - remainingFiles := make(map[string]debrid.File) + remainingFiles := make(map[string]types.File) for _, file := range files { remainingFiles[file.Name] = file } @@ -278,107 +277,44 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, t fileSymlinkPath := filepath.Join(symlinkPath, file.Name) if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) { - q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err) + s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err) } else { filePaths = append(filePaths, fileSymlinkPath) delete(remainingFiles, filename) - q.logger.Info().Msgf("File is ready: %s", file.Name) + s.logger.Info().Msgf("File is ready: %s", file.Name) } } } case <-timeout: - q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles)) + s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles)) return symlinkPath, fmt.Errorf("timeout waiting for files") } } - if q.SkipPreCache { + if s.skipPreCache { return symlinkPath, nil } go func() { - - if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil { - q.logger.Error().Msgf("Failed to pre-cache file: %s", err) + s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name) + if err := utils.PreCacheFile(filePaths); err != nil { + s.logger.Error().Msgf("Failed to pre-cache file: %s", err) } else { - q.logger.Debug().Msgf("Pre-cached %d files", len(filePaths)) + s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths)) } }() // Pre-cache the files in the background // Pre-cache the first 256KB and 1MB of the file return symlinkPath, nil } -func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) { +func (s *Store) getTorrentPath(rclonePath string, debridTorrent *types.Torrent) (string, error) { for { torrentPath, err := debridTorrent.GetMountFolder(rclonePath) if err == nil { - q.logger.Debug().Msgf("Found torrent path: %s", torrentPath) + s.logger.Debug().Msgf("Found torrent path: %s", torrentPath) return torrentPath, err } time.Sleep(100 * time.Millisecond) } } - -func (q *QBit) preCacheFile(name string, filePaths []string) error { - q.logger.Trace().Msgf("Pre-caching torrent: %s", name) - if len(filePaths) == 0 { - return fmt.Errorf("no file paths provided") - } - - for _, filePath := range filePaths { - err := func(f string) error { - - file, err := os.Open(f) - if err != nil { - if os.IsNotExist(err) { - // File has probably been moved by arr, return silently - return nil - } - return fmt.Errorf("failed to open file: %s: %v", f, err) - } - defer file.Close() - - // Pre-cache the file header (first 256KB) using 16KB chunks. - if err := q.readSmallChunks(file, 0, 256*1024, 16*1024); err != nil { - return err - } - if err := q.readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil { - return err - } - return nil - }(filePath) - if err != nil { - return err - } - } - return nil -} - -func (q *QBit) readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error { - _, err := file.Seek(startPos, 0) - if err != nil { - return err - } - - buf := make([]byte, chunkSize) - bytesRemaining := totalToRead - - for bytesRemaining > 0 { - toRead := chunkSize - if bytesRemaining < chunkSize { - toRead = bytesRemaining - } - - n, err := file.Read(buf[:toRead]) - if err != nil { - if err == io.EOF { - break - } - return err - } - - bytesRemaining -= n - } - return nil -} diff --git a/pkg/qbit/misc.go b/pkg/store/misc.go similarity index 57% rename from pkg/qbit/misc.go rename to pkg/store/misc.go index bcc4d3d..630e269 100644 --- a/pkg/qbit/misc.go +++ b/pkg/store/misc.go @@ -1,18 +1,21 @@ -package qbit +package store import ( - "github.com/sirrobot01/decypharr/internal/utils" + "os" + "path/filepath" "strings" ) -func createTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Torrent { +func createTorrentFromMagnet(req *ImportRequest) *Torrent { + magnet := req.Magnet + arrName := req.Arr.Name torrent := &Torrent{ ID: "", Hash: strings.ToLower(magnet.InfoHash), Name: magnet.Name, Size: magnet.Size, - Category: category, - Source: source, + Category: arrName, + Source: string(req.Type), State: "downloading", MagnetUri: magnet.Link, @@ -22,6 +25,7 @@ func createTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Tor AutoTmm: false, Ratio: 1, RatioLimit: 1, + SavePath: filepath.Join(req.DownloadFolder, arrName) + string(os.PathSeparator), } return torrent } diff --git a/pkg/store/request.go b/pkg/store/request.go new file mode 100644 index 0000000..80439e0 --- /dev/null +++ b/pkg/store/request.go @@ -0,0 +1,103 @@ +package store + +import ( + "bytes" + "encoding/json" + "github.com/sirrobot01/decypharr/internal/request" + "github.com/sirrobot01/decypharr/internal/utils" + "github.com/sirrobot01/decypharr/pkg/arr" + debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types" + "net/http" + "net/url" + "time" +) + +type ImportType string + +const ( + ImportTypeQBitTorrent ImportType = "qbit" + ImportTypeAPI ImportType = "api" +) + +func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest { + return &ImportRequest{ + Status: "started", + DownloadFolder: downloadFolder, + Debrid: debrid, + Magnet: magnet, + Arr: arr, + IsSymlink: isSymlink, + DownloadUncached: downloadUncached, + CallBackUrl: callBackUrl, + Type: importType, + } +} + +type ImportRequest struct { + DownloadFolder string `json:"downloadFolder"` + Debrid string `json:"debrid"` + Magnet *utils.Magnet `json:"magnet"` + Arr *arr.Arr `json:"arr"` + IsSymlink bool `json:"isSymlink"` + DownloadUncached bool `json:"downloadUncached"` + CallBackUrl string `json:"callBackUrl"` + + Status string `json:"status"` + CompletedAt time.Time `json:"completedAt,omitempty"` + Error error `json:"error,omitempty"` + + Type ImportType `json:"type"` + Async bool `json:"async"` +} + +type importResponse struct { + Status string `json:"status"` + CompletedAt time.Time `json:"completedAt"` + Error error `json:"error"` + Torrent *Torrent `json:"torrent"` + Debrid *debridTypes.Torrent `json:"debrid"` +} + +func (i *ImportRequest) sendCallback(torrent *Torrent, debridTorrent *debridTypes.Torrent) { + if i.CallBackUrl == "" { + return + } + + // Check if the callback URL is valid + if _, err := url.ParseRequestURI(i.CallBackUrl); err != nil { + return + } + + client := request.New() + payload, err := json.Marshal(&importResponse{ + Status: i.Status, + Error: i.Error, + CompletedAt: i.CompletedAt, + Torrent: torrent, + Debrid: debridTorrent, + }) + if err != nil { + return + } + req, err := http.NewRequest("POST", i.CallBackUrl, bytes.NewReader(payload)) + if err != nil { + return + } + req.Header.Set("Content-Type", "application/json") + _, _ = client.Do(req) + +} + +func (i *ImportRequest) markAsFailed(err error, torrent *Torrent, debridTorrent *debridTypes.Torrent) { + i.Status = "failed" + i.Error = err + i.CompletedAt = time.Now() + i.sendCallback(torrent, debridTorrent) +} + +func (i *ImportRequest) markAsCompleted(torrent *Torrent, debridTorrent *debridTypes.Torrent) { + i.Status = "completed" + i.Error = nil + i.CompletedAt = time.Now() + i.sendCallback(torrent, debridTorrent) +} diff --git a/pkg/store/store.go b/pkg/store/store.go new file mode 100644 index 0000000..00f25bb --- /dev/null +++ b/pkg/store/store.go @@ -0,0 +1,75 @@ +package store + +import ( + "cmp" + "github.com/rs/zerolog" + "github.com/sirrobot01/decypharr/internal/config" + "github.com/sirrobot01/decypharr/internal/logger" + "github.com/sirrobot01/decypharr/pkg/arr" + "github.com/sirrobot01/decypharr/pkg/debrid" + "github.com/sirrobot01/decypharr/pkg/repair" + "sync" + "time" +) + +type Store struct { + repair *repair.Repair + arr *arr.Storage + debrid *debrid.Storage + torrents *TorrentStorage + logger zerolog.Logger + refreshInterval time.Duration + skipPreCache bool + downloadSemaphore chan struct{} +} + +var ( + instance *Store + once sync.Once +) + +// GetStore returns the singleton instance +func GetStore() *Store { + once.Do(func() { + arrs := arr.NewStorage() + deb := debrid.NewStorage() + cfg := config.Get() + qbitCfg := cfg.QBitTorrent + + instance = &Store{ + repair: repair.New(arrs, deb), + arr: arrs, + debrid: deb, + torrents: newTorrentStorage(cfg.TorrentsFile()), + logger: logger.New("store"), + refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute, + skipPreCache: qbitCfg.SkipPreCache, + downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)), + } + }) + return instance +} + +func Reset() { + if instance != nil { + if instance.debrid != nil { + instance.debrid.Reset() + } + close(instance.downloadSemaphore) + } + once = sync.Once{} + instance = nil +} + +func (s *Store) GetArr() *arr.Storage { + return s.arr +} +func (s *Store) GetDebrid() *debrid.Storage { + return s.debrid +} +func (s *Store) GetRepair() *repair.Repair { + return s.repair +} +func (s *Store) GetTorrentStorage() *TorrentStorage { + return s.torrents +} diff --git a/pkg/store/torrent.go b/pkg/store/torrent.go new file mode 100644 index 0000000..a5c0388 --- /dev/null +++ b/pkg/store/torrent.go @@ -0,0 +1,210 @@ +package store + +import ( + "cmp" + "context" + "fmt" + "github.com/sirrobot01/decypharr/internal/request" + "github.com/sirrobot01/decypharr/internal/utils" + debridTypes "github.com/sirrobot01/decypharr/pkg/debrid" + "github.com/sirrobot01/decypharr/pkg/debrid/types" + "os" + "path/filepath" + "time" +) + +func (s *Store) AddTorrent(ctx context.Context, importReq *ImportRequest) error { + torrent := createTorrentFromMagnet(importReq) + debridTorrent, err := debridTypes.ProcessTorrent(ctx, s.debrid, importReq.Debrid, importReq.Magnet, importReq.Arr, importReq.IsSymlink, importReq.DownloadUncached) + if err != nil || debridTorrent == nil { + if err == nil { + err = fmt.Errorf("failed to process torrent") + } + // This error is returned immediately to the user(no need for callback) + return err + } + torrent = s.UpdateTorrentMin(torrent, debridTorrent) + s.torrents.AddOrUpdate(torrent) + go s.processFiles(torrent, debridTorrent, importReq) // We can send async for file processing not to delay the response + return nil +} + +func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, importReq *ImportRequest) { + client := s.debrid.GetClient(debridTorrent.Debrid) + downloadingStatuses := client.GetDownloadingStatus() + _arr := importReq.Arr + for debridTorrent.Status != "downloaded" { + s.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress) + dbT, err := client.CheckStatus(debridTorrent, importReq.IsSymlink) + if err != nil { + if dbT != nil && dbT.Id != "" { + // Delete the torrent if it was not downloaded + go func() { + _ = client.DeleteTorrent(dbT.Id) + }() + } + s.logger.Error().Msgf("Error checking status: %v", err) + s.markTorrentAsFailed(torrent) + go func() { + _arr.Refresh() + }() + importReq.markAsFailed(err, torrent, debridTorrent) + return + } + + debridTorrent = dbT + torrent = s.UpdateTorrentMin(torrent, debridTorrent) + + // Exit the loop for downloading statuses to prevent memory buildup + if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) { + break + } + if !utils.Contains(client.GetDownloadingStatus(), debridTorrent.Status) { + break + } + time.Sleep(s.refreshInterval) + } + var torrentSymlinkPath string + var err error + debridTorrent.Arr = _arr + + // Check if debrid supports webdav by checking cache + timer := time.Now() + if importReq.IsSymlink { + caches := s.debrid.GetCaches() + cache, useWebdav := caches[debridTorrent.Debrid] + if useWebdav { + s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid) + + // Use webdav to download the file + + if err := cache.Add(debridTorrent); err != nil { + s.logger.Error().Msgf("Error adding torrent to cache: %v", err) + s.markTorrentAsFailed(torrent) + importReq.markAsFailed(err, torrent, debridTorrent) + return + } + + rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow + torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name) + torrentSymlinkPath, err = s.createSymlinksWebdav(torrent, debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/ + + } else { + // User is using either zurg or debrid webdav + torrentSymlinkPath, err = s.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/ + } + } else { + torrentSymlinkPath, err = s.ProcessManualFile(torrent) + } + if err != nil { + s.markTorrentAsFailed(torrent) + go func() { + _ = client.DeleteTorrent(debridTorrent.Id) + }() + s.logger.Info().Msgf("Error: %v", err) + importReq.markAsFailed(err, torrent, debridTorrent) + return + } + torrent.TorrentPath = torrentSymlinkPath + s.UpdateTorrent(torrent, debridTorrent) + s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer)) + + go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed + go func() { + if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil { + s.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() + _arr.Refresh() +} + +func (s *Store) markTorrentAsFailed(t *Torrent) *Torrent { + t.State = "error" + s.torrents.AddOrUpdate(t) + go func() { + if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil { + s.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() + return t +} + +func (s *Store) UpdateTorrentMin(t *Torrent, debridTorrent *types.Torrent) *Torrent { + if debridTorrent == nil { + return t + } + + addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added) + if err != nil { + addedOn = time.Now() + } + totalSize := debridTorrent.Bytes + progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0 + sizeCompleted := int64(float64(totalSize) * progress) + + var speed int64 + if debridTorrent.Speed != 0 { + speed = debridTorrent.Speed + } + var eta int + if speed != 0 { + eta = int((totalSize - sizeCompleted) / speed) + } + t.ID = debridTorrent.Id + t.Name = debridTorrent.Name + t.AddedOn = addedOn.Unix() + t.DebridTorrent = debridTorrent + t.Debrid = debridTorrent.Debrid + t.Size = totalSize + t.Completed = sizeCompleted + t.Downloaded = sizeCompleted + t.DownloadedSession = sizeCompleted + t.Uploaded = sizeCompleted + t.UploadedSession = sizeCompleted + t.AmountLeft = totalSize - sizeCompleted + t.Progress = progress + t.Eta = eta + t.Dlspeed = speed + t.Upspeed = speed + t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator) + return t +} + +func (s *Store) UpdateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent { + if debridTorrent == nil { + return t + } + + if debridClient := s.debrid.GetClients()[debridTorrent.Debrid]; debridClient != nil { + if debridTorrent.Status != "downloaded" { + _ = debridClient.UpdateTorrent(debridTorrent) + } + } + t = s.UpdateTorrentMin(t, debridTorrent) + t.ContentPath = t.TorrentPath + string(os.PathSeparator) + + if t.IsReady() { + t.State = "pausedUP" + s.torrents.Update(t) + return t + } + + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if t.IsReady() { + t.State = "pausedUP" + s.torrents.Update(t) + return t + } + updatedT := s.UpdateTorrent(t, debridTorrent) + t = updatedT + + case <-time.After(10 * time.Minute): // Add a timeout + return t + } + } +} diff --git a/pkg/qbit/storage.go b/pkg/store/torrent_storage.go similarity index 63% rename from pkg/qbit/storage.go rename to pkg/store/torrent_storage.go index e2671bb..c6f9b8f 100644 --- a/pkg/qbit/storage.go +++ b/pkg/store/torrent_storage.go @@ -1,18 +1,15 @@ -package qbit +package store import ( "encoding/json" "fmt" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "os" "sort" "sync" ) func keyPair(hash, category string) string { - if category == "" { - category = "uncategorized" - } return fmt.Sprintf("%s|%s", hash, category) } @@ -36,13 +33,13 @@ func loadTorrentsFromJSON(filename string) (Torrents, error) { return torrents, nil } -func NewTorrentStorage(filename string) *TorrentStorage { +func newTorrentStorage(filename string) *TorrentStorage { // Open the JSON file and read the data torrents, err := loadTorrentsFromJSON(filename) if err != nil { torrents = make(Torrents) } - // Create a new TorrentStorage + // Create a new Storage return &TorrentStorage{ torrents: torrents, filename: filename, @@ -187,12 +184,9 @@ func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) { return } if removeFromDebrid && torrent.ID != "" && torrent.Debrid != "" { - dbClient := service.GetDebrid().GetClient(torrent.Debrid) + dbClient := GetStore().debrid.GetClient(torrent.Debrid) if dbClient != nil { - err := dbClient.DeleteTorrent(torrent.ID) - if err != nil { - fmt.Println(err) - } + _ = dbClient.DeleteTorrent(torrent.ID) } } @@ -244,10 +238,12 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool) } }() + clients := GetStore().debrid.GetClients() + go func() { for id, debrid := range toDelete { - dbClient := service.GetDebrid().GetClient(debrid) - if dbClient == nil { + dbClient, ok := clients[debrid] + if !ok { continue } err := dbClient.DeleteTorrent(id) @@ -278,3 +274,73 @@ func (ts *TorrentStorage) Reset() { defer ts.mu.Unlock() ts.torrents = make(Torrents) } + +type Torrent struct { + ID string `json:"id"` + Debrid string `json:"debrid"` + TorrentPath string `json:"-"` + DebridTorrent *types.Torrent `json:"-"` + + AddedOn int64 `json:"added_on,omitempty"` + AmountLeft int64 `json:"amount_left"` + AutoTmm bool `json:"auto_tmm"` + Availability float64 `json:"availability,omitempty"` + Category string `json:"category,omitempty"` + Completed int64 `json:"completed"` + CompletionOn int `json:"completion_on,omitempty"` + ContentPath string `json:"content_path"` + DlLimit int `json:"dl_limit"` + Dlspeed int64 `json:"dlspeed"` + Downloaded int64 `json:"downloaded"` + DownloadedSession int64 `json:"downloaded_session"` + Eta int `json:"eta"` + FlPiecePrio bool `json:"f_l_piece_prio,omitempty"` + ForceStart bool `json:"force_start,omitempty"` + Hash string `json:"hash"` + LastActivity int64 `json:"last_activity,omitempty"` + MagnetUri string `json:"magnet_uri,omitempty"` + MaxRatio int `json:"max_ratio,omitempty"` + MaxSeedingTime int `json:"max_seeding_time,omitempty"` + Name string `json:"name,omitempty"` + NumComplete int `json:"num_complete,omitempty"` + NumIncomplete int `json:"num_incomplete,omitempty"` + NumLeechs int `json:"num_leechs,omitempty"` + NumSeeds int `json:"num_seeds,omitempty"` + Priority int `json:"priority,omitempty"` + Progress float64 `json:"progress"` + Ratio int `json:"ratio,omitempty"` + RatioLimit int `json:"ratio_limit,omitempty"` + SavePath string `json:"save_path"` + SeedingTimeLimit int `json:"seeding_time_limit,omitempty"` + SeenComplete int64 `json:"seen_complete,omitempty"` + SeqDl bool `json:"seq_dl"` + Size int64 `json:"size,omitempty"` + State string `json:"state,omitempty"` + SuperSeeding bool `json:"super_seeding"` + Tags string `json:"tags,omitempty"` + TimeActive int `json:"time_active,omitempty"` + TotalSize int64 `json:"total_size,omitempty"` + Tracker string `json:"tracker,omitempty"` + UpLimit int64 `json:"up_limit,omitempty"` + Uploaded int64 `json:"uploaded,omitempty"` + UploadedSession int64 `json:"uploaded_session,omitempty"` + Upspeed int64 `json:"upspeed,omitempty"` + Source string `json:"source,omitempty"` + + sync.Mutex +} + +func (t *Torrent) IsReady() bool { + return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != "" +} + +func (t *Torrent) discordContext() string { + format := ` + **Name:** %s + **Arr:** %s + **Hash:** %s + **MagnetURI:** %s + **Debrid:** %s + ` + return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid) +} diff --git a/pkg/web/api.go b/pkg/web/api.go index b3ce47b..a9f6aba 100644 --- a/pkg/web/api.go +++ b/pkg/web/api.go @@ -2,6 +2,7 @@ package web import ( "fmt" + "github.com/sirrobot01/decypharr/pkg/store" "net/http" "strings" "time" @@ -12,34 +13,37 @@ import ( "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/qbit" - "github.com/sirrobot01/decypharr/pkg/service" "github.com/sirrobot01/decypharr/pkg/version" ) -func (ui *Handler) handleGetArrs(w http.ResponseWriter, r *http.Request) { - svc := service.GetService() - request.JSONResponse(w, svc.Arr.GetAll(), http.StatusOK) +func (wb *Web) handleGetArrs(w http.ResponseWriter, r *http.Request) { + _store := store.GetStore() + request.JSONResponse(w, _store.GetArr().GetAll(), http.StatusOK) } -func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() if err := r.ParseMultipartForm(32 << 20); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } - svc := service.GetService() + _store := store.GetStore() - results := make([]*qbit.ImportRequest, 0) + results := make([]*store.ImportRequest, 0) errs := make([]string, 0) arrName := r.FormValue("arr") notSymlink := r.FormValue("notSymlink") == "true" - downloadUncached := r.FormValue("downloadUncached") == "true" - if arrName == "" { - arrName = "uncategorized" + debridName := r.FormValue("debrid") + callbackUrl := r.FormValue("callbackUrl") + downloadFolder := r.FormValue("downloadFolder") + if downloadFolder == "" { + downloadFolder = config.Get().QBitTorrent.DownloadFolder } - _arr := svc.Arr.Get(arrName) + downloadUncached := r.FormValue("downloadUncached") == "true" + + _arr := _store.GetArr().Get(arrName) if _arr == nil { _arr = arr.New(arrName, "", "", false, false, &downloadUncached) } @@ -59,8 +63,9 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) { errs = append(errs, fmt.Sprintf("Failed to parse URL %s: %v", url, err)) continue } - importReq := qbit.NewImportRequest(magnet, _arr, !notSymlink, downloadUncached) - if err := importReq.Process(ui.qbit); err != nil { + + importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, !notSymlink, downloadUncached, callbackUrl, store.ImportTypeAPI) + if err := _store.AddTorrent(ctx, importReq); err != nil { errs = append(errs, fmt.Sprintf("URL %s: %v", url, err)) continue } @@ -83,8 +88,8 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) { continue } - importReq := qbit.NewImportRequest(magnet, _arr, !notSymlink, downloadUncached) - err = importReq.Process(ui.qbit) + importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, !notSymlink, downloadUncached, callbackUrl, store.ImportTypeAPI) + err = _store.AddTorrent(ctx, importReq) if err != nil { errs = append(errs, fmt.Sprintf("File %s: %v", fileHeader.Filename, err)) continue @@ -94,27 +99,27 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) { } request.JSONResponse(w, struct { - Results []*qbit.ImportRequest `json:"results"` - Errors []string `json:"errors,omitempty"` + Results []*store.ImportRequest `json:"results"` + Errors []string `json:"errors,omitempty"` }{ Results: results, Errors: errs, }, http.StatusOK) } -func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleRepairMedia(w http.ResponseWriter, r *http.Request) { var req RepairRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } - svc := service.GetService() + _store := store.GetStore() var arrs []string if req.ArrName != "" { - _arr := svc.Arr.Get(req.ArrName) + _arr := _store.GetArr().Get(req.ArrName) if _arr == nil { http.Error(w, "No Arrs found to repair", http.StatusNotFound) return @@ -124,15 +129,15 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) { if req.Async { go func() { - if err := svc.Repair.AddJob(arrs, req.MediaIds, req.AutoProcess, false); err != nil { - ui.logger.Error().Err(err).Msg("Failed to repair media") + if err := _store.GetRepair().AddJob(arrs, req.MediaIds, req.AutoProcess, false); err != nil { + wb.logger.Error().Err(err).Msg("Failed to repair media") } }() request.JSONResponse(w, "Repair process started", http.StatusOK) return } - if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil { + if err := _store.GetRepair().AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil { http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError) return } @@ -140,16 +145,16 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) { request.JSONResponse(w, "Repair completed", http.StatusOK) } -func (ui *Handler) handleGetVersion(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleGetVersion(w http.ResponseWriter, r *http.Request) { v := version.GetInfo() request.JSONResponse(w, v, http.StatusOK) } -func (ui *Handler) handleGetTorrents(w http.ResponseWriter, r *http.Request) { - request.JSONResponse(w, ui.qbit.Storage.GetAllSorted("", "", nil, "added_on", false), http.StatusOK) +func (wb *Web) handleGetTorrents(w http.ResponseWriter, r *http.Request) { + request.JSONResponse(w, wb.torrents.GetAllSorted("", "", nil, "added_on", false), http.StatusOK) } -func (ui *Handler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) { hash := chi.URLParam(r, "hash") category := chi.URLParam(r, "category") removeFromDebrid := r.URL.Query().Get("removeFromDebrid") == "true" @@ -157,11 +162,11 @@ func (ui *Handler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) { http.Error(w, "No hash provided", http.StatusBadRequest) return } - ui.qbit.Storage.Delete(hash, category, removeFromDebrid) + wb.torrents.Delete(hash, category, removeFromDebrid) w.WriteHeader(http.StatusOK) } -func (ui *Handler) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) { hashesStr := r.URL.Query().Get("hashes") removeFromDebrid := r.URL.Query().Get("removeFromDebrid") == "true" if hashesStr == "" { @@ -169,15 +174,15 @@ func (ui *Handler) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) return } hashes := strings.Split(hashesStr, ",") - ui.qbit.Storage.DeleteMultiple(hashes, removeFromDebrid) + wb.torrents.DeleteMultiple(hashes, removeFromDebrid) w.WriteHeader(http.StatusOK) } -func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleGetConfig(w http.ResponseWriter, r *http.Request) { cfg := config.Get() arrCfgs := make([]config.Arr, 0) - svc := service.GetService() - for _, a := range svc.Arr.GetAll() { + _store := store.GetStore() + for _, a := range _store.GetArr().GetAll() { arrCfgs = append(arrCfgs, config.Arr{ Host: a.Host, Name: a.Name, @@ -191,11 +196,11 @@ func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) { request.JSONResponse(w, cfg, http.StatusOK) } -func (ui *Handler) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { // Decode the JSON body var updatedConfig config.Config if err := json.NewDecoder(r.Body).Decode(&updatedConfig); err != nil { - ui.logger.Error().Err(err).Msg("Failed to decode config update request") + wb.logger.Error().Err(err).Msg("Failed to decode config update request") http.Error(w, "Invalid request body: "+err.Error(), http.StatusBadRequest) return } @@ -232,11 +237,12 @@ func (ui *Handler) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { } // Update Arrs through the service - svc := service.GetService() - svc.Arr.Clear() // Clear existing arrs + _store := store.GetStore() + _arr := _store.GetArr() + _arr.Clear() // Clear existing arrs for _, a := range updatedConfig.Arrs { - svc.Arr.AddOrUpdate(&arr.Arr{ + _arr.AddOrUpdate(&arr.Arr{ Name: a.Name, Host: a.Host, Token: a.Token, @@ -263,25 +269,25 @@ func (ui *Handler) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { request.JSONResponse(w, map[string]string{"status": "success"}, http.StatusOK) } -func (ui *Handler) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) { - svc := service.GetService() - request.JSONResponse(w, svc.Repair.GetJobs(), http.StatusOK) +func (wb *Web) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) { + _store := store.GetStore() + request.JSONResponse(w, _store.GetRepair().GetJobs(), http.StatusOK) } -func (ui *Handler) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { http.Error(w, "No job ID provided", http.StatusBadRequest) return } - svc := service.GetService() - if err := svc.Repair.ProcessJob(id); err != nil { - ui.logger.Error().Err(err).Msg("Failed to process repair job") + _store := store.GetStore() + if err := _store.GetRepair().ProcessJob(id); err != nil { + wb.logger.Error().Err(err).Msg("Failed to process repair job") } w.WriteHeader(http.StatusOK) } -func (ui *Handler) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) { // Read ids from body var req struct { IDs []string `json:"ids"` @@ -295,7 +301,22 @@ func (ui *Handler) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) return } - svc := service.GetService() - svc.Repair.DeleteJobs(req.IDs) + _store := store.GetStore() + _store.GetRepair().DeleteJobs(req.IDs) + w.WriteHeader(http.StatusOK) +} + +func (wb *Web) handleStopRepairJob(w http.ResponseWriter, r *http.Request) { + id := chi.URLParam(r, "id") + if id == "" { + http.Error(w, "No job ID provided", http.StatusBadRequest) + return + } + _store := store.GetStore() + if err := _store.GetRepair().StopJob(id); err != nil { + wb.logger.Error().Err(err).Msg("Failed to stop repair job") + http.Error(w, "Failed to stop job: "+err.Error(), http.StatusInternalServerError) + return + } w.WriteHeader(http.StatusOK) } diff --git a/pkg/web/auth.go b/pkg/web/auth.go index d67d25e..95c45b8 100644 --- a/pkg/web/auth.go +++ b/pkg/web/auth.go @@ -6,7 +6,7 @@ import ( "net/http" ) -func (ui *Handler) verifyAuth(username, password string) bool { +func (wb *Web) verifyAuth(username, password string) bool { // If you're storing hashed password, use bcrypt to compare if username == "" { return false @@ -22,11 +22,11 @@ func (ui *Handler) verifyAuth(username, password string) bool { return err == nil } -func (ui *Handler) skipAuthHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) skipAuthHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() cfg.UseAuth = false if err := cfg.Save(); err != nil { - ui.logger.Error().Err(err).Msg("failed to save config") + wb.logger.Error().Err(err).Msg("failed to save config") http.Error(w, "failed to save config", http.StatusInternalServerError) return } diff --git a/pkg/web/middlewares.go b/pkg/web/middlewares.go index b029d66..7e334b7 100644 --- a/pkg/web/middlewares.go +++ b/pkg/web/middlewares.go @@ -6,7 +6,7 @@ import ( "net/http" ) -func (ui *Handler) setupMiddleware(next http.Handler) http.Handler { +func (wb *Web) setupMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cfg := config.Get() needsAuth := cfg.NeedsSetup() @@ -24,7 +24,7 @@ func (ui *Handler) setupMiddleware(next http.Handler) http.Handler { }) } -func (ui *Handler) authMiddleware(next http.Handler) http.Handler { +func (wb *Web) authMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Check if setup is needed cfg := config.Get() @@ -38,7 +38,7 @@ func (ui *Handler) authMiddleware(next http.Handler) http.Handler { return } - session, _ := store.Get(r, "auth-session") + session, _ := wb.cookie.Get(r, "auth-session") auth, ok := session.Values["authenticated"].(bool) if !ok || !auth { diff --git a/pkg/web/routes.go b/pkg/web/routes.go index 005a4ee..2a96f2d 100644 --- a/pkg/web/routes.go +++ b/pkg/web/routes.go @@ -5,35 +5,36 @@ import ( "net/http" ) -func (ui *Handler) Routes() http.Handler { +func (wb *Web) Routes() http.Handler { r := chi.NewRouter() - r.Get("/login", ui.LoginHandler) - r.Post("/login", ui.LoginHandler) - r.Get("/register", ui.RegisterHandler) - r.Post("/register", ui.RegisterHandler) - r.Get("/skip-auth", ui.skipAuthHandler) - r.Get("/version", ui.handleGetVersion) + r.Get("/login", wb.LoginHandler) + r.Post("/login", wb.LoginHandler) + r.Get("/register", wb.RegisterHandler) + r.Post("/register", wb.RegisterHandler) + r.Get("/skip-auth", wb.skipAuthHandler) + r.Get("/version", wb.handleGetVersion) r.Group(func(r chi.Router) { - r.Use(ui.authMiddleware) - r.Use(ui.setupMiddleware) - r.Get("/", ui.IndexHandler) - r.Get("/download", ui.DownloadHandler) - r.Get("/repair", ui.RepairHandler) - r.Get("/config", ui.ConfigHandler) + r.Use(wb.authMiddleware) + r.Use(wb.setupMiddleware) + r.Get("/", wb.IndexHandler) + r.Get("/download", wb.DownloadHandler) + r.Get("/repair", wb.RepairHandler) + r.Get("/config", wb.ConfigHandler) r.Route("/api", func(r chi.Router) { - r.Get("/arrs", ui.handleGetArrs) - r.Post("/add", ui.handleAddContent) - r.Post("/repair", ui.handleRepairMedia) - r.Get("/repair/jobs", ui.handleGetRepairJobs) - r.Post("/repair/jobs/{id}/process", ui.handleProcessRepairJob) - r.Delete("/repair/jobs", ui.handleDeleteRepairJob) - r.Get("/torrents", ui.handleGetTorrents) - r.Delete("/torrents/{category}/{hash}", ui.handleDeleteTorrent) - r.Delete("/torrents/", ui.handleDeleteTorrents) - r.Get("/config", ui.handleGetConfig) - r.Post("/config", ui.handleUpdateConfig) + r.Get("/arrs", wb.handleGetArrs) + r.Post("/add", wb.handleAddContent) + r.Post("/repair", wb.handleRepairMedia) + r.Get("/repair/jobs", wb.handleGetRepairJobs) + r.Post("/repair/jobs/{id}/process", wb.handleProcessRepairJob) + r.Post("/repair/jobs/{id}/stop", wb.handleStopRepairJob) + r.Delete("/repair/jobs", wb.handleDeleteRepairJob) + r.Get("/torrents", wb.handleGetTorrents) + r.Delete("/torrents/{category}/{hash}", wb.handleDeleteTorrent) + r.Delete("/torrents/", wb.handleDeleteTorrents) + r.Get("/config", wb.handleGetConfig) + r.Post("/config", wb.handleUpdateConfig) }) }) diff --git a/pkg/web/templates/config.html b/pkg/web/templates/config.html index 5b009b4..9760612 100644 --- a/pkg/web/templates/config.html +++ b/pkg/web/templates/config.html @@ -245,43 +245,48 @@
-
+
- +
-
+
- + Interval for the repair process(e.g., 24h, 1d, 03:00, or a crontab)
+
+ + + Number of workers to use for the repair process +
- Speeds up the repair process by using Zurg + If you have Zurg running, you can use it to speed up the repair process
-
+
Use Internal Webdav for repair(make sure webdav is enabled in the debrid section
-
+
Run repair on startup
-
+
@@ -340,7 +345,14 @@ Rate limit for the debrid service. Confirm your debrid service rate limit
-
+
+
+
+ + +
+ Create an internal webdav for this debrid +
@@ -348,13 +360,6 @@
Download uncached files from the debrid service
-
-
- - -
- Check if the file is cached before downloading(Disabled) -
@@ -369,16 +374,10 @@
Preprocess RARed torrents to allow reading the files inside
-
-
- - -
- Create an internal webdav for this debrid -
-
-
Webdav
+
+
+
Webdav Settings
@@ -441,12 +440,12 @@
-
-
Custom Folders
+
+
Virtual Folders

Create virtual directories with filters to organize your content

- +
+
@@ -218,6 +221,27 @@ } } + // Return status text and class based on job status + function getStatus(status) { + switch (status) { + case 'started': + return {text: 'In Progress', class: 'text-primary'}; + case 'failed': + return {text: 'Failed', class: 'text-danger'}; + case 'completed': + return {text: 'Completed', class: 'text-success'}; + case 'pending': + return {text: 'Pending', class: 'text-warning'}; + case 'cancelled': + return {text: 'Cancelled', class: 'text-secondary'}; + case 'processing': + return {text: 'Processing', class: 'text-info'}; + default: + // Return status in title case if unknown + return {text: status.charAt(0).toUpperCase() + status.slice(1), class: 'text-secondary'}; + } + } + // Render jobs table with pagination function renderJobsTable(page) { const tableBody = document.getElementById('jobsTableBody'); @@ -254,24 +278,10 @@ const formattedDate = startedDate.toLocaleString(); // Determine status - let status = 'In Progress'; - let statusClass = 'text-primary'; + let status = getStatus(job.status); let canDelete = job.status !== "started"; let totalItems = job.broken_items ? Object.values(job.broken_items).reduce((sum, arr) => sum + arr.length, 0) : 0; - if (job.status === 'failed') { - status = 'Failed'; - statusClass = 'text-danger'; - } else if (job.status === 'completed') { - status = 'Completed'; - statusClass = 'text-success'; - } else if (job.status === 'pending') { - status = 'Pending'; - statusClass = 'text-warning'; - } else if (job.status === "processing") { - status = 'Processing'; - statusClass = 'text-info'; - } row.innerHTML = ` @@ -283,25 +293,31 @@ ${job.id.substring(0, 8)} ${job.arrs.join(', ')} ${formattedDate} - ${status} + ${status.text} ${totalItems} ${job.status === "pending" ? - `` : - `` - } + } + ${(job.status === "started" || job.status === "processing") ? + `` : + '' + } ${canDelete ? - `` : - `` - } + `` : + `` + } `; @@ -370,6 +386,13 @@ viewJobDetails(jobId); }); }); + + document.querySelectorAll('.stop-job').forEach(button => { + button.addEventListener('click', (e) => { + const jobId = e.currentTarget.dataset.id; + stopJob(jobId); + }); + }); } document.getElementById('selectAllJobs').addEventListener('change', function() { @@ -456,6 +479,25 @@ } } + async function stopJob(jobId) { + if (confirm('Are you sure you want to stop this job?')) { + try { + const response = await fetcher(`/api/repair/jobs/${jobId}/stop`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + }); + + if (!response.ok) throw new Error(await response.text()); + createToast('Job stop requested successfully'); + await loadJobs(currentPage); // Refresh the jobs list + } catch (error) { + createToast(`Error stopping job: ${error.message}`, 'error'); + } + } + } + // View job details function function viewJobDetails(jobId) { // Find the job @@ -477,24 +519,9 @@ } // Set status with color - let status = 'In Progress'; - let statusClass = 'text-primary'; + let status = getStatus(job.status); - if (job.status === 'failed') { - status = 'Failed'; - statusClass = 'text-danger'; - } else if (job.status === 'completed') { - status = 'Completed'; - statusClass = 'text-success'; - } else if (job.status === 'pending') { - status = 'Pending'; - statusClass = 'text-warning'; - } else if (job.status === "processing") { - status = 'Processing'; - statusClass = 'text-info'; - } - - document.getElementById('modalJobStatus').innerHTML = `${status}`; + document.getElementById('modalJobStatus').innerHTML = `${status.text}`; // Set other job details document.getElementById('modalJobArrs').textContent = job.arrs.join(', '); @@ -524,6 +551,19 @@ processBtn.classList.add('d-none'); } + // Stop button visibility + const stopBtn = document.getElementById('stopJobBtn'); // You'll need to add this button to the HTML + if (job.status === 'started' || job.status === 'processing') { + stopBtn.classList.remove('d-none'); + stopBtn.onclick = () => { + stopJob(job.id); + const modal = bootstrap.Modal.getInstance(document.getElementById('jobDetailsModal')); + modal.hide(); + }; + } else { + stopBtn.classList.add('d-none'); + } + // Populate broken items table const brokenItemsTableBody = document.getElementById('brokenItemsTableBody'); const noBrokenItemsMessage = document.getElementById('noBrokenItemsMessage'); diff --git a/pkg/web/ui.go b/pkg/web/ui.go index b7659a5..9ce2009 100644 --- a/pkg/web/ui.go +++ b/pkg/web/ui.go @@ -7,7 +7,7 @@ import ( "net/http" ) -func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) LoginHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() if cfg.NeedsAuth() { http.Redirect(w, r, "/register", http.StatusSeeOther) @@ -19,7 +19,7 @@ func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) { "Page": "login", "Title": "Login", } - _ = templates.ExecuteTemplate(w, "layout", data) + _ = wb.templates.ExecuteTemplate(w, "layout", data) return } @@ -33,8 +33,8 @@ func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) { return } - if ui.verifyAuth(credentials.Username, credentials.Password) { - session, _ := store.Get(r, "auth-session") + if wb.verifyAuth(credentials.Username, credentials.Password) { + session, _ := wb.cookie.Get(r, "auth-session") session.Values["authenticated"] = true session.Values["username"] = credentials.Username if err := session.Save(r, w); err != nil { @@ -48,8 +48,8 @@ func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, "Invalid credentials", http.StatusUnauthorized) } -func (ui *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) { - session, _ := store.Get(r, "auth-session") +func (wb *Web) LogoutHandler(w http.ResponseWriter, r *http.Request) { + session, _ := wb.cookie.Get(r, "auth-session") session.Values["authenticated"] = false session.Options.MaxAge = -1 err := session.Save(r, w) @@ -59,7 +59,7 @@ func (ui *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/login", http.StatusSeeOther) } -func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) RegisterHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() authCfg := cfg.GetAuth() @@ -69,7 +69,7 @@ func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) { "Page": "register", "Title": "Register", } - _ = templates.ExecuteTemplate(w, "layout", data) + _ = wb.templates.ExecuteTemplate(w, "layout", data) return } @@ -99,7 +99,7 @@ func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) { } // Create a session - session, _ := store.Get(r, "auth-session") + session, _ := wb.cookie.Get(r, "auth-session") session.Values["authenticated"] = true session.Values["username"] = username if err := session.Save(r, w); err != nil { @@ -110,42 +110,49 @@ func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/", http.StatusSeeOther) } -func (ui *Handler) IndexHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) IndexHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() data := map[string]interface{}{ "URLBase": cfg.URLBase, "Page": "index", "Title": "Torrents", } - _ = templates.ExecuteTemplate(w, "layout", data) + _ = wb.templates.ExecuteTemplate(w, "layout", data) } -func (ui *Handler) DownloadHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) DownloadHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() - data := map[string]interface{}{ - "URLBase": cfg.URLBase, - "Page": "download", - "Title": "Download", + debrids := make([]string, 0) + for _, d := range cfg.Debrids { + debrids = append(debrids, d.Name) } - _ = templates.ExecuteTemplate(w, "layout", data) + data := map[string]interface{}{ + "URLBase": cfg.URLBase, + "Page": "download", + "Title": "Download", + "Debrids": debrids, + "HasMultiDebrid": len(debrids) > 1, + "DownloadFolder": cfg.QBitTorrent.DownloadFolder, + } + _ = wb.templates.ExecuteTemplate(w, "layout", data) } -func (ui *Handler) RepairHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) RepairHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() data := map[string]interface{}{ "URLBase": cfg.URLBase, "Page": "repair", "Title": "Repair", } - _ = templates.ExecuteTemplate(w, "layout", data) + _ = wb.templates.ExecuteTemplate(w, "layout", data) } -func (ui *Handler) ConfigHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) ConfigHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() data := map[string]interface{}{ "URLBase": cfg.URLBase, "Page": "config", "Title": "Config", } - _ = templates.ExecuteTemplate(w, "layout", data) + _ = wb.templates.ExecuteTemplate(w, "layout", data) } diff --git a/pkg/web/server.go b/pkg/web/web.go similarity index 70% rename from pkg/web/server.go rename to pkg/web/web.go index 2f2703f..aa20c58 100644 --- a/pkg/web/server.go +++ b/pkg/web/web.go @@ -6,7 +6,7 @@ import ( "github.com/gorilla/sessions" "github.com/rs/zerolog" "github.com/sirrobot01/decypharr/internal/logger" - "github.com/sirrobot01/decypharr/pkg/qbit" + "github.com/sirrobot01/decypharr/pkg/store" "html/template" "os" ) @@ -50,26 +50,15 @@ type RepairRequest struct { //go:embed templates/* var content embed.FS -type Handler struct { - qbit *qbit.QBit - logger zerolog.Logger -} - -func New(qbit *qbit.QBit) *Handler { - return &Handler{ - qbit: qbit, - logger: logger.New("ui"), - } -} - -var ( - secretKey = cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"") - store = sessions.NewCookieStore([]byte(secretKey)) +type Web struct { + logger zerolog.Logger + cookie *sessions.CookieStore templates *template.Template -) + torrents *store.TorrentStorage +} -func init() { - templates = template.Must(template.ParseFS( +func New() *Web { + templates := template.Must(template.ParseFS( content, "templates/layout.html", "templates/index.html", @@ -79,10 +68,17 @@ func init() { "templates/login.html", "templates/register.html", )) - - store.Options = &sessions.Options{ + secretKey := cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"") + cookieStore := sessions.NewCookieStore([]byte(secretKey)) + cookieStore.Options = &sessions.Options{ Path: "/", MaxAge: 86400 * 7, HttpOnly: false, } + return &Web{ + logger: logger.New("ui"), + templates: templates, + cookie: cookieStore, + torrents: store.GetStore().GetTorrentStorage(), + } } diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 3fd6c0b..20678ec 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" + "github.com/sirrobot01/decypharr/pkg/debrid/store" ) var sharedClient = &http.Client{ @@ -28,7 +28,7 @@ var sharedClient = &http.Client{ } type File struct { - cache *debrid.Cache + cache *store.Cache fileId string torrentName string @@ -128,7 +128,7 @@ func (f *File) stream() (*http.Response, error) { cleanupResp := func() { if resp.Body != nil { - io.Copy(io.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) resp.Body.Close() } } @@ -192,7 +192,7 @@ func (f *File) stream() (*http.Response, error) { if newResp.StatusCode != http.StatusOK && newResp.StatusCode != http.StatusPartialContent { cleanupBody := func() { if newResp.Body != nil { - io.Copy(io.Discard, newResp.Body) + _, _ = io.Copy(io.Discard, newResp.Body) newResp.Body.Close() } } diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index 8df178b..923cba4 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -3,6 +3,8 @@ package webdav import ( "context" "fmt" + "github.com/sirrobot01/decypharr/pkg/debrid/types" + "golang.org/x/net/webdav" "io" "mime" "net/http" @@ -15,21 +17,19 @@ import ( "github.com/rs/zerolog" "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" - "github.com/sirrobot01/decypharr/pkg/debrid/types" + "github.com/sirrobot01/decypharr/pkg/debrid/store" "github.com/sirrobot01/decypharr/pkg/version" - "golang.org/x/net/webdav" ) type Handler struct { Name string logger zerolog.Logger - cache *debrid.Cache + cache *store.Cache URLBase string RootPath string } -func NewHandler(name, urlBase string, cache *debrid.Cache, logger zerolog.Logger) *Handler { +func NewHandler(name, urlBase string, cache *store.Cache, logger zerolog.Logger) *Handler { h := &Handler{ Name: name, cache: cache, @@ -191,7 +191,7 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F } name = utils.PathUnescape(path.Clean(name)) rootDir := path.Clean(h.RootPath) - metadataOnly := ctx.Value("metadataOnly") != nil + metadataOnly := ctx.Value(metadataOnlyKey) != nil now := time.Now() // 1) special case version.txt @@ -490,7 +490,7 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) { done := make(chan struct{}) go func() { defer close(done) - io.Copy(w, fRaw) + _, _ = io.Copy(w, fRaw) }() select { case <-ctx.Done(): diff --git a/pkg/webdav/misc.go b/pkg/webdav/misc.go index 25deb85..2e3ad64 100644 --- a/pkg/webdav/misc.go +++ b/pkg/webdav/misc.go @@ -84,9 +84,7 @@ func filesToXML(urlPath string, fi os.FileInfo, children []os.FileInfo) stringbu }) } - sb := builderPool.Get().(stringbuf.StringBuf) - sb.Reset() - defer builderPool.Put(sb) + sb := stringbuf.New("") // XML header and main element _, _ = sb.WriteString(``) diff --git a/pkg/webdav/propfind.go b/pkg/webdav/propfind.go index 27c0a59..62ccd51 100644 --- a/pkg/webdav/propfind.go +++ b/pkg/webdav/propfind.go @@ -8,21 +8,19 @@ import ( "path" "strconv" "strings" - "sync" "time" ) -var builderPool = sync.Pool{ +type contextKey string - New: func() interface{} { - buf := stringbuf.New("") - return buf - }, -} +const ( + // metadataOnlyKey is used to indicate that the request is for metadata only + metadataOnlyKey contextKey = "metadataOnly" +) func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) { // Setup context for metadata only - ctx := context.WithValue(r.Context(), "metadataOnly", true) + ctx := context.WithValue(r.Context(), metadataOnlyKey, true) r = r.WithContext(ctx) cleanPath := path.Clean(r.URL.Path) @@ -85,9 +83,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) { }) } - sb := builderPool.Get().(stringbuf.StringBuf) - sb.Reset() - defer builderPool.Put(sb) + sb := stringbuf.New("") // XML header and main element _, _ = sb.WriteString(``) diff --git a/pkg/webdav/webdav.go b/pkg/webdav/webdav.go index 6ead760..7259234 100644 --- a/pkg/webdav/webdav.go +++ b/pkg/webdav/webdav.go @@ -7,7 +7,7 @@ import ( "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" "github.com/sirrobot01/decypharr/internal/config" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/store" "html/template" "net/http" "net/url" @@ -90,13 +90,12 @@ type WebDav struct { } func New() *WebDav { - svc := service.GetService() urlBase := config.Get().URLBase w := &WebDav{ Handlers: make([]*Handler, 0), URLBase: urlBase, } - for name, c := range svc.Debrid.Caches { + for name, c := range store.GetStore().GetDebrid().GetCaches() { h := NewHandler(name, urlBase, c, c.GetLogger()) w.Handlers = append(w.Handlers, h) } diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go deleted file mode 100644 index 3c58e0f..0000000 --- a/pkg/worker/worker.go +++ /dev/null @@ -1,72 +0,0 @@ -package worker - -import ( - "context" - "github.com/rs/zerolog" - "github.com/sirrobot01/decypharr/internal/config" - "github.com/sirrobot01/decypharr/internal/logger" - "github.com/sirrobot01/decypharr/pkg/service" - "sync" - "time" -) - -var ( - _logInstance zerolog.Logger -) - -func getLogger() zerolog.Logger { - return _logInstance -} - -func Start(ctx context.Context) error { - cfg := config.Get() - // Start Arr Refresh Worker - _logInstance = logger.New("worker") - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - cleanUpQueuesWorker(ctx, cfg) - }() - wg.Wait() - return nil -} - -func cleanUpQueuesWorker(ctx context.Context, cfg *config.Config) { - // Start Clean up Queues Worker - _logger := getLogger() - _logger.Debug().Msg("Clean up Queues Worker started") - cleanupCtx := context.WithValue(ctx, "worker", "cleanup") - cleanupTicker := time.NewTicker(time.Duration(10) * time.Second) - - var cleanupMutex sync.Mutex - - for { - select { - case <-cleanupCtx.Done(): - _logger.Debug().Msg("Clean up Queues Worker stopped") - return - case <-cleanupTicker.C: - if cleanupMutex.TryLock() { - go func() { - defer cleanupMutex.Unlock() - cleanUpQueues() - }() - } - } - } -} - -func cleanUpQueues() { - // Clean up queues - _logger := getLogger() - for _, a := range service.GetService().Arr.GetAll() { - if !a.Cleanup { - continue - } - if err := a.CleanupQueue(); err != nil { - _logger.Error().Err(err).Msg("Error cleaning up queue") - } - } -} diff --git a/scripts/deploy.sh b/scripts/deploy.sh deleted file mode 100755 index 662ecfa..0000000 --- a/scripts/deploy.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -# deploy.sh - -# Function to display usage -usage() { - echo "Usage: $0 [-b|--beta] " - echo "Example for main: $0 v1.0.0" - echo "Example for beta: $0 -b v1.0.0" - exit 1 -} - -# Parse arguments -BETA=false - -while [[ "$#" -gt 0 ]]; do - case $1 in - -b|--beta) BETA=true; shift ;; - -*) echo "Unknown parameter: $1"; usage ;; - *) VERSION="$1"; shift ;; - esac -done - -# Check if version is provided -if [ -z "$VERSION" ]; then - echo "Error: Version is required" - usage -fi - -# Validate version format -if ! [[ $VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - echo "Error: Version must be in format v1.0.0" - exit 1 -fi - -# Set tag based on branch -if [ "$BETA" = true ]; then - TAG="$VERSION-beta" - BRANCH="beta" -else - TAG="$VERSION" - BRANCH="main" -fi - -echo "Deploying version $VERSION to $BRANCH branch..." - -# Ensure we're on the right branch -git checkout $BRANCH || exit 1 - -# Create and push tag -echo "Creating tag $TAG..." -git tag "$TAG" || exit 1 -git push origin "$TAG" || exit 1 - -echo "Deployment initiated successfully!" -echo "GitHub Actions will handle the release process." -echo "Check the progress at: https://github.com/sirrobot01/decypharr/actions" \ No newline at end of file