- Revamp decypharr arch \n

- Add callback_ur, download_folder to addContent API \n
- Fix few bugs \n
- More declarative UI keywords
- Speed up repairs
- Few other improvements/bug fixes
This commit is contained in:
Mukhtar Akere
2025-06-02 12:57:36 +01:00
parent 1cd09239f9
commit 9c6c44d785
67 changed files with 1726 additions and 1464 deletions

View File

@@ -61,6 +61,4 @@ EXPOSE 8282
VOLUME ["/app"]
USER nonroot:nonroot
HEALTHCHECK --interval=3s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app"]
CMD ["/usr/bin/decypharr", "--config", "/app"]

View File

@@ -7,11 +7,10 @@ import (
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/qbit"
"github.com/sirrobot01/decypharr/pkg/server"
"github.com/sirrobot01/decypharr/pkg/service"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/version"
"github.com/sirrobot01/decypharr/pkg/web"
"github.com/sirrobot01/decypharr/pkg/webdav"
"github.com/sirrobot01/decypharr/pkg/worker"
"net/http"
"os"
"runtime"
@@ -62,7 +61,7 @@ func Start(ctx context.Context) error {
qb := qbit.New()
wd := webdav.New()
ui := web.New(qb).Routes()
ui := web.New().Routes()
webdavRoutes := wd.Routes()
qbitRoutes := qb.Routes()
@@ -95,14 +94,14 @@ func Start(ctx context.Context) error {
_log.Info().Msg("Restarting Decypharr...")
<-done // wait for them to finish
qb.Reset()
service.Reset()
store.Reset()
// rebuild svcCtx off the original parent
svcCtx, cancelSvc = context.WithCancel(ctx)
runtime.GC()
config.Reload()
service.Reset()
store.Reset()
// loop will restart services automatically
}
}
@@ -146,11 +145,7 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e
})
safeGo(func() error {
return worker.Start(ctx)
})
safeGo(func() error {
arr := service.GetService().Arr
arr := store.GetStore().GetArr()
if arr == nil {
return nil
}
@@ -159,9 +154,9 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e
if cfg := config.Get(); cfg.Repair.Enabled {
safeGo(func() error {
r := service.GetService().Repair
if r != nil {
if err := r.Start(ctx); err != nil {
repair := store.GetStore().GetRepair()
if repair != nil {
if err := repair.Start(ctx); err != nil {
_log.Error().Err(err).Msg("repair failed")
}
}

View File

@@ -145,5 +145,8 @@ func checkWebDAV(ctx context.Context, baseUrl, port, path string) bool {
}
defer resp.Body.Close()
return resp.StatusCode == 207 || resp.StatusCode == http.StatusOK
return resp.StatusCode == http.StatusMultiStatus ||
resp.StatusCode == http.StatusOK ||
resp.StatusCode == http.StatusServiceUnavailable // It's still indexing
}

View File

@@ -5,7 +5,7 @@ This guide will help you set up Decypharr with Rclone, allowing you to use your
#### Rclone
Make sure you have Rclone installed and configured on your system. You can follow the [Rclone installation guide](https://rclone.org/install/) for instructions.
It's recommended to use docker version of Rclone, as it provides a consistent environment across different platforms.
It's recommended to use a docker version of Rclone, as it provides a consistent environment across different platforms.
### Steps
@@ -35,7 +35,7 @@ Create a `rclone.conf` file in `/opt/rclone/` with your Rclone configuration.
```conf
[decypharr]
type = webdav
url = https://your-ip-or-domain:8282/webdav/realdebrid
url = http://your-ip-or-domain:8282/webdav/realdebrid
vendor = other
pacer_min_sleep = 0
```
@@ -69,13 +69,10 @@ services:
decypharr:
image: cy01/blackhole:latest
container_name: decypharr
user: "1000:1000"
volumes:
- /mnt/:/mnt
- /mnt/:/mnt:rslave
- /opt/decypharr/:/app
environment:
- PUID=1000
- PGID=1000
- UMASK=002
ports:
- "8282:8282/tcp"
@@ -87,14 +84,11 @@ services:
restart: unless-stopped
environment:
TZ: UTC
PUID: 1000
PGID: 1000
ports:
- 5572:5572
volumes:
- /mnt/remote/realdebrid:/data:rshared
- /opt/rclone/rclone.conf:/config/rclone/rclone.conf
- /mnt:/mnt
cap_add:
- SYS_ADMIN
security_opt:
@@ -105,9 +99,17 @@ services:
decypharr:
condition: service_healthy
restart: true
command: "mount decypharr: /data --allow-non-empty --allow-other --uid=1000 --gid=1000 --umask=002 --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth "
command: "mount decypharr: /data --allow-non-empty --allow-other --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth "
```
#### Docker Notes
- Ensure that the `/mnt/` directory is mounted correctly to access your media files.
- You can check your current user and group IDs and UMASK by running `id -a` and `umask` commands in your terminal.
- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions.
- Also adding `--uid=$YOUR_PUID --gid=$YOUR_PGID` to the `rclone mount` command can help with permissions.
- The `UMASK` environment variable can be set to control file permissions created by Decypharr.
Start the containers:
```bash
docker-compose up -d
@@ -132,7 +134,7 @@ For each provider, you'll need a different rclone. OR you can change your `rclon
```apache
[decypharr]
type = webdav
url = https://your-ip-or-domain:8282/webdav/
url = http://your-ip-or-domain:8282/webdav/
vendor = other
pacer_min_sleep = 0
```

View File

@@ -45,7 +45,6 @@ docker run -d \
Create a `docker-compose.yml` file with the following content:
```yaml
version: '3.7'
services:
decypharr:
image: cy01/blackhole:latest
@@ -64,20 +63,14 @@ Run the Docker Compose setup:
docker-compose up -d
```
#### Notes for Docker Users
- Ensure that the `/mnt/` directory is mounted correctly to access your media files.
- The `./config/` directory should contain your `config.json` file.
- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions.
- The `UMASK` environment variable can be set to control file permissions created by Decypharr.
## Binary Installation
If you prefer not to use Docker, you can download and run the binary directly.
Download the binary from the releases page
Download your OS-specific release from the [releases page](https://github.com/sirrobot01/decypharr/releases).
Create a configuration file (see Configuration)
Run the binary:
```bash
chmod +x decypharr
./decypharr --config /path/to/config/folder
@@ -109,4 +102,30 @@ You can also configure Decypharr through the web interface, but it's recommended
"log_level": "info",
"port": "8282"
}
```
```
### Notes for Docker Users
- Ensure that the `/mnt/` directory is mounted correctly to access your media files.
- The `./config/` directory should contain your `config.json` file.
- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions.
- The `UMASK` environment variable can be set to control file permissions created by Decypharr.
##### Health Checks
- Health checks are disabled by default. You can enable them by adding a `healthcheck` section in your `docker-compose.yml` file.
- Health checks checks for availability of several parts of the application;
- The main web interface
- The qBittorrent API
- The WebDAV server (if enabled). You should disable health checks for the initial indexes as they can take a long time to complete.
```yaml
services:
decypharr:
...
...
healthcheck:
test: ["CMD", "/usr/bin/healthcheck", "--config", "/app/"]
interval: 5s
timeout: 10s
retries: 3
```

View File

@@ -98,6 +98,10 @@ func (c *Config) AuthFile() string {
return filepath.Join(c.Path, "auth.json")
}
func (c *Config) TorrentsFile() string {
return filepath.Join(c.Path, "torrents.json")
}
func (c *Config) loadConfig() error {
// Load the config file
if configPath == "" {

View File

@@ -1,7 +1,10 @@
package utils
import (
"fmt"
"io"
"net/url"
"os"
"strings"
)
@@ -19,3 +22,65 @@ func PathUnescape(path string) string {
return unescapedPath
}
func PreCacheFile(filePaths []string) error {
if len(filePaths) == 0 {
return fmt.Errorf("no file paths provided")
}
for _, filePath := range filePaths {
err := func(f string) error {
file, err := os.Open(f)
if err != nil {
if os.IsNotExist(err) {
// File has probably been moved by arr, return silently
return nil
}
return fmt.Errorf("failed to open file: %s: %v", f, err)
}
defer file.Close()
// Pre-cache the file header (first 256KB) using 16KB chunks.
if err := readSmallChunks(file, 0, 256*1024, 16*1024); err != nil {
return err
}
if err := readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil {
return err
}
return nil
}(filePath)
if err != nil {
return err
}
}
return nil
}
func readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error {
_, err := file.Seek(startPos, 0)
if err != nil {
return err
}
buf := make([]byte, chunkSize)
bytesRemaining := totalToRead
for bytesRemaining > 0 {
toRead := chunkSize
if bytesRemaining < chunkSize {
toRead = bytesRemaining
}
n, err := file.Read(buf[:toRead])
if err != nil {
if err == io.EOF {
break
}
return err
}
bytesRemaining -= n
}
return nil
}

View File

@@ -25,11 +25,11 @@ var (
)
type Magnet struct {
Name string
InfoHash string
Size int64
Link string
File []byte
Name string `json:"name"`
InfoHash string `json:"infoHash"`
Size int64 `json:"size"`
Link string `json:"link"`
File []byte `json:"-"`
}
func (m *Magnet) IsTorrent() bool {
@@ -83,7 +83,6 @@ func GetMagnetFromBytes(torrentData []byte) (*Magnet, error) {
if err != nil {
return nil, err
}
log.Println("InfoHash: ", infoHash)
magnet := &Magnet{
InfoHash: infoHash,
Name: info.Name,

View File

@@ -11,7 +11,6 @@ import (
"github.com/sirrobot01/decypharr/internal/request"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
@@ -121,10 +120,10 @@ type Storage struct {
logger zerolog.Logger
}
func (as *Storage) Cleanup() {
as.mu.Lock()
defer as.mu.Unlock()
as.Arrs = make(map[string]*Arr)
func (s *Storage) Cleanup() {
s.mu.Lock()
defer s.mu.Unlock()
s.Arrs = make(map[string]*Arr)
}
func InferType(host, name string) Type {
@@ -154,26 +153,26 @@ func NewStorage() *Storage {
}
}
func (as *Storage) AddOrUpdate(arr *Arr) {
as.mu.Lock()
defer as.mu.Unlock()
func (s *Storage) AddOrUpdate(arr *Arr) {
s.mu.Lock()
defer s.mu.Unlock()
if arr.Name == "" {
return
}
as.Arrs[arr.Name] = arr
s.Arrs[arr.Name] = arr
}
func (as *Storage) Get(name string) *Arr {
as.mu.Lock()
defer as.mu.Unlock()
return as.Arrs[name]
func (s *Storage) Get(name string) *Arr {
s.mu.Lock()
defer s.mu.Unlock()
return s.Arrs[name]
}
func (as *Storage) GetAll() []*Arr {
as.mu.Lock()
defer as.mu.Unlock()
arrs := make([]*Arr, 0, len(as.Arrs))
for _, arr := range as.Arrs {
func (s *Storage) GetAll() []*Arr {
s.mu.Lock()
defer s.mu.Unlock()
arrs := make([]*Arr, 0, len(s.Arrs))
for _, arr := range s.Arrs {
if arr.Host != "" && arr.Token != "" {
arrs = append(arrs, arr)
}
@@ -181,19 +180,19 @@ func (as *Storage) GetAll() []*Arr {
return arrs
}
func (as *Storage) Clear() {
as.mu.Lock()
defer as.mu.Unlock()
as.Arrs = make(map[string]*Arr)
func (s *Storage) Clear() {
s.mu.Lock()
defer s.mu.Unlock()
s.Arrs = make(map[string]*Arr)
}
func (as *Storage) StartSchedule(ctx context.Context) error {
func (s *Storage) StartSchedule(ctx context.Context) error {
ticker := time.NewTicker(10 * time.Second)
select {
case <-ticker.C:
as.cleanupArrsQueue()
s.cleanupArrsQueue()
case <-ctx.Done():
ticker.Stop()
return nil
@@ -201,9 +200,9 @@ func (as *Storage) StartSchedule(ctx context.Context) error {
return nil
}
func (as *Storage) cleanupArrsQueue() {
func (s *Storage) cleanupArrsQueue() {
arrs := make([]*Arr, 0)
for _, arr := range as.Arrs {
for _, arr := range s.Arrs {
if !arr.Cleanup {
continue
}
@@ -212,26 +211,18 @@ func (as *Storage) cleanupArrsQueue() {
if len(arrs) > 0 {
for _, arr := range arrs {
if err := arr.CleanupQueue(); err != nil {
as.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name)
s.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name)
}
}
}
}
func (a *Arr) Refresh() error {
func (a *Arr) Refresh() {
payload := struct {
Name string `json:"name"`
}{
Name: "RefreshMonitoredDownloads",
}
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
if err == nil && resp != nil {
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
if statusOk {
return nil
}
}
return fmt.Errorf("failed to refresh: %v", err)
_, _ = a.Request(http.MethodPost, "api/v3/command", payload)
}

View File

@@ -205,5 +205,4 @@ func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, e
}
defer resp.Body.Close()
return resp.Body, nil
}

218
pkg/debrid/debrid.go Normal file
View File

@@ -0,0 +1,218 @@
package debrid
import (
"context"
"fmt"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/debrid_link"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"strings"
"sync"
)
type Storage struct {
clients map[string]types.Client
clientsLock sync.Mutex
caches map[string]*store.Cache
cachesLock sync.Mutex
LastUsed string
}
func NewStorage() *Storage {
cfg := config.Get()
clients := make(map[string]types.Client)
_logger := logger.Default()
caches := make(map[string]*store.Cache)
for _, dc := range cfg.Debrids {
client, err := createDebridClient(dc)
if err != nil {
_logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client")
continue
}
_log := client.GetLogger()
if dc.UseWebDav {
caches[dc.Name] = store.NewDebridCache(dc, client)
_log.Info().Msg("Debrid Service started with WebDAV")
} else {
_log.Info().Msg("Debrid Service started")
}
clients[dc.Name] = client
}
d := &Storage{
clients: clients,
LastUsed: "",
caches: caches,
}
return d
}
func (d *Storage) GetClient(name string) types.Client {
d.clientsLock.Lock()
defer d.clientsLock.Unlock()
client, exists := d.clients[name]
if !exists {
return nil
}
return client
}
func (d *Storage) Reset() {
d.clientsLock.Lock()
d.clients = make(map[string]types.Client)
d.clientsLock.Unlock()
d.cachesLock.Lock()
d.caches = make(map[string]*store.Cache)
d.cachesLock.Unlock()
d.LastUsed = ""
}
func (d *Storage) GetClients() map[string]types.Client {
d.clientsLock.Lock()
defer d.clientsLock.Unlock()
clientsCopy := make(map[string]types.Client)
for name, client := range d.clients {
clientsCopy[name] = client
}
return clientsCopy
}
func (d *Storage) GetCaches() map[string]*store.Cache {
d.clientsLock.Lock()
defer d.clientsLock.Unlock()
cachesCopy := make(map[string]*store.Cache)
for name, cache := range d.caches {
cachesCopy[name] = cache
}
return cachesCopy
}
func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types.Client {
d.clientsLock.Lock()
defer d.clientsLock.Unlock()
filteredClients := make(map[string]types.Client)
for name, client := range d.clients {
if filter(client) {
filteredClients[name] = client
}
}
return filteredClients
}
func (d *Storage) FilterCaches(filter func(*store.Cache) bool) map[string]*store.Cache {
d.cachesLock.Lock()
defer d.cachesLock.Unlock()
filteredCaches := make(map[string]*store.Cache)
for name, cache := range d.caches {
if filter(cache) {
filteredCaches[name] = cache
}
}
return filteredCaches
}
func createDebridClient(dc config.Debrid) (types.Client, error) {
switch dc.Name {
case "realdebrid":
return realdebrid.New(dc)
case "torbox":
return torbox.New(dc)
case "debridlink":
return debrid_link.New(dc)
case "alldebrid":
return alldebrid.New(dc)
default:
return realdebrid.New(dc)
}
}
func ProcessTorrent(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) {
debridTorrent := &types.Torrent{
InfoHash: magnet.InfoHash,
Magnet: magnet,
Name: magnet.Name,
Arr: a,
Size: magnet.Size,
Files: make(map[string]types.File),
}
clients := store.FilterClients(func(c types.Client) bool {
if selectedDebrid != "" && c.GetName() != selectedDebrid {
return false
}
return true
})
if len(clients) == 0 {
return nil, fmt.Errorf("no debrid clients available")
}
errs := make([]error, 0, len(clients))
// Override first, arr second, debrid third
if overrideDownloadUncached {
debridTorrent.DownloadUncached = true
} else if a.DownloadUncached != nil {
// Arr cached is set
debridTorrent.DownloadUncached = *a.DownloadUncached
} else {
debridTorrent.DownloadUncached = false
}
for index, db := range clients {
_logger := db.GetLogger()
_logger.Info().
Str("Debrid", db.GetName()).
Str("Arr", a.Name).
Str("Hash", debridTorrent.InfoHash).
Str("Name", debridTorrent.Name).
Msg("Processing torrent")
if !overrideDownloadUncached && a.DownloadUncached == nil {
debridTorrent.DownloadUncached = db.GetDownloadUncached()
}
dbt, err := db.SubmitMagnet(debridTorrent)
if err != nil || dbt == nil || dbt.Id == "" {
errs = append(errs, err)
continue
}
dbt.Arr = a
_logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.GetName())
store.LastUsed = index
torrent, err := db.CheckStatus(dbt, isSymlink)
if err != nil && torrent != nil && torrent.Id != "" {
// Delete the torrent if it was not downloaded
go func(id string) {
_ = db.DeleteTorrent(id)
}(torrent.Id)
}
return torrent, err
}
if len(errs) == 0 {
return nil, fmt.Errorf("failed to process torrent: no clients available")
}
if len(errs) == 1 {
return nil, fmt.Errorf("failed to process torrent: %w", errs[0])
} else {
errStrings := make([]string, 0, len(errs))
for _, err := range errs {
errStrings = append(errStrings, err.Error())
}
return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", "))
}
}

View File

@@ -1,103 +0,0 @@
package debrid
import (
"fmt"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/alldebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid_link"
"github.com/sirrobot01/decypharr/pkg/debrid/realdebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/torbox"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"strings"
)
func createDebridClient(dc config.Debrid) (types.Client, error) {
switch dc.Name {
case "realdebrid":
return realdebrid.New(dc)
case "torbox":
return torbox.New(dc)
case "debridlink":
return debrid_link.New(dc)
case "alldebrid":
return alldebrid.New(dc)
default:
return realdebrid.New(dc)
}
}
func ProcessTorrent(d *Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) {
debridTorrent := &types.Torrent{
InfoHash: magnet.InfoHash,
Magnet: magnet,
Name: magnet.Name,
Arr: a,
Size: magnet.Size,
Files: make(map[string]types.File),
}
errs := make([]error, 0, len(d.Clients))
// Override first, arr second, debrid third
if overrideDownloadUncached {
debridTorrent.DownloadUncached = true
} else if a.DownloadUncached != nil {
// Arr cached is set
debridTorrent.DownloadUncached = *a.DownloadUncached
} else {
debridTorrent.DownloadUncached = false
}
for index, db := range d.Clients {
logger := db.GetLogger()
logger.Info().Str("Debrid", db.GetName()).Str("Hash", debridTorrent.InfoHash).Msg("Processing torrent")
if !overrideDownloadUncached && a.DownloadUncached == nil {
debridTorrent.DownloadUncached = db.GetDownloadUncached()
}
//if db.GetCheckCached() {
// hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
// if !exists || !hash {
// logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name)
// continue
// } else {
// logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
// }
//}
dbt, err := db.SubmitMagnet(debridTorrent)
if err != nil || dbt == nil || dbt.Id == "" {
errs = append(errs, err)
continue
}
dbt.Arr = a
logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.GetName())
d.LastUsed = index
torrent, err := db.CheckStatus(dbt, isSymlink)
if err != nil && torrent != nil && torrent.Id != "" {
// Delete the torrent if it was not downloaded
go func(id string) {
_ = db.DeleteTorrent(id)
}(torrent.Id)
}
return torrent, err
}
if len(errs) == 0 {
return nil, fmt.Errorf("failed to process torrent: no clients available")
}
if len(errs) == 1 {
return nil, fmt.Errorf("failed to process torrent: %w", errs[0])
} else {
errStrings := make([]string, 0, len(errs))
for _, err := range errs {
errStrings = append(errStrings, err.Error())
}
return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", "))
}
}

View File

@@ -1,68 +0,0 @@
package debrid
import (
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
)
type Engine struct {
Clients map[string]types.Client
clientsMu sync.Mutex
Caches map[string]*Cache
cacheMu sync.Mutex
LastUsed string
}
func NewEngine() *Engine {
cfg := config.Get()
clients := make(map[string]types.Client)
_logger := logger.Default()
caches := make(map[string]*Cache)
for _, dc := range cfg.Debrids {
client, err := createDebridClient(dc)
if err != nil {
_logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client")
continue
}
_log := client.GetLogger()
if dc.UseWebDav {
caches[dc.Name] = New(dc, client)
_log.Info().Msg("Debrid Service started with WebDAV")
} else {
_log.Info().Msg("Debrid Service started")
}
clients[dc.Name] = client
}
d := &Engine{
Clients: clients,
LastUsed: "",
Caches: caches,
}
return d
}
func (d *Engine) GetClient(name string) types.Client {
d.clientsMu.Lock()
defer d.clientsMu.Unlock()
return d.Clients[name]
}
func (d *Engine) Reset() {
d.clientsMu.Lock()
d.Clients = make(map[string]types.Client)
d.clientsMu.Unlock()
d.cacheMu.Lock()
d.Caches = make(map[string]*Cache)
d.cacheMu.Unlock()
}
func (d *Engine) GetDebrids() map[string]types.Client {
return d.Clients
}

View File

@@ -1 +0,0 @@
package debrid

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"io"
"net/http"
gourl "net/url"
@@ -20,8 +21,6 @@ import (
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/rar"
)

View File

@@ -1,4 +1,4 @@
package debrid
package store
import (
"bufio"
@@ -6,6 +6,7 @@ import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"os"
"path"
"path/filepath"
@@ -22,7 +23,6 @@ import (
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
_ "time/tzdata"
)
@@ -108,7 +108,7 @@ type Cache struct {
customFolders []string
}
func New(dc config.Debrid, client types.Client) *Cache {
func NewDebridCache(dc config.Debrid, client types.Client) *Cache {
cfg := config.Get()
cetSc, err := gocron.NewScheduler(gocron.WithLocation(time.UTC))
if err != nil {
@@ -691,7 +691,7 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
return nil
}
func (c *Cache) AddTorrent(t *types.Torrent) error {
func (c *Cache) Add(t *types.Torrent) error {
if len(t.Files) == 0 {
if err := c.client.UpdateTorrent(t); err != nil {
return fmt.Errorf("failed to update torrent: %w", err)

View File

@@ -1,14 +1,14 @@
package debrid
package store
import (
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
"time"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type linkCache struct {

View File

@@ -1,4 +1,4 @@
package debrid
package store
import (
"github.com/sirrobot01/decypharr/pkg/debrid/types"

View File

@@ -1,4 +1,4 @@
package debrid
package store
import (
"context"

View File

@@ -1,4 +1,4 @@
package debrid
package store
import (
"context"

View File

@@ -1,4 +1,4 @@
package debrid
package store
import (
"fmt"

View File

@@ -1,4 +1,4 @@
package debrid
package store
import (
"context"

1
pkg/debrid/store/xml.go Normal file
View File

@@ -0,0 +1 @@
package store

View File

@@ -34,10 +34,12 @@ type Torrent struct {
Debrid string `json:"debrid"`
Arr *arr.Arr `json:"arr"`
Mu sync.Mutex `json:"-"`
SizeDownloaded int64 `json:"-"` // This is used for local download
DownloadUncached bool `json:"-"`
Arr *arr.Arr `json:"arr"`
SizeDownloaded int64 `json:"-"` // This is used for local download
DownloadUncached bool `json:"-"`
sync.Mutex
}
type DownloadLink struct {

127
pkg/qbit/context.go Normal file
View File

@@ -0,0 +1,127 @@
package qbit
import (
"context"
"encoding/base64"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"strings"
)
type contextKey string
const (
categoryKey contextKey = "category"
hashesKey contextKey = "hashes"
arrKey contextKey = "arr"
)
func getCategory(ctx context.Context) string {
if category, ok := ctx.Value(categoryKey).(string); ok {
return category
}
return ""
}
func getHashes(ctx context.Context) []string {
if hashes, ok := ctx.Value(hashesKey).([]string); ok {
return hashes
}
return nil
}
func getArr(ctx context.Context) *arr.Arr {
if a, ok := ctx.Value(arrKey).(*arr.Arr); ok {
return a
}
return nil
}
func decodeAuthHeader(header string) (string, string, error) {
encodedTokens := strings.Split(header, " ")
if len(encodedTokens) != 2 {
return "", "", nil
}
encodedToken := encodedTokens[1]
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
if err != nil {
return "", "", err
}
bearer := string(bytes)
colonIndex := strings.LastIndex(bearer, ":")
host := bearer[:colonIndex]
token := bearer[colonIndex+1:]
return host, token, nil
}
func (q *QBit) categoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" {
// Get from form
_ = r.ParseForm()
category = r.Form.Get("category")
if category == "" {
// Get from multipart form
_ = r.ParseMultipartForm(32 << 20)
category = r.FormValue("category")
}
}
ctx := context.WithValue(r.Context(), categoryKey, strings.TrimSpace(category))
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func (q *QBit) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
category := getCategory(r.Context())
arrs := store.GetStore().GetArr()
// Check if arr exists
a := arrs.Get(category)
if a == nil {
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached)
}
if err == nil {
host = strings.TrimSpace(host)
if host != "" {
a.Host = host
}
token = strings.TrimSpace(token)
if token != "" {
a.Token = token
}
}
arrs.AddOrUpdate(a)
ctx := context.WithValue(r.Context(), arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func hashesContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_hashes := chi.URLParam(r, "hashes")
var hashes []string
if _hashes != "" {
hashes = strings.Split(_hashes, "|")
}
if hashes == nil {
// Get hashes from form
_ = r.ParseForm()
hashes = r.Form["hashes"]
}
for i, hash := range hashes {
hashes[i] = strings.TrimSpace(hash)
}
ctx := context.WithValue(r.Context(), hashesKey, hashes)
next.ServeHTTP(w, r.WithContext(ctx))
})
}

View File

@@ -1,107 +1,16 @@
package qbit
import (
"context"
"encoding/base64"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/service"
"net/http"
"path/filepath"
"strings"
)
func decodeAuthHeader(header string) (string, string, error) {
encodedTokens := strings.Split(header, " ")
if len(encodedTokens) != 2 {
return "", "", nil
}
encodedToken := encodedTokens[1]
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
if err != nil {
return "", "", err
}
bearer := string(bytes)
colonIndex := strings.LastIndex(bearer, ":")
host := bearer[:colonIndex]
token := bearer[colonIndex+1:]
return host, token, nil
}
func (q *QBit) CategoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" {
// Get from form
_ = r.ParseForm()
category = r.Form.Get("category")
if category == "" {
// Get from multipart form
_ = r.ParseMultipartForm(32 << 20)
category = r.FormValue("category")
}
}
ctx := context.WithValue(r.Context(), "category", strings.TrimSpace(category))
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func (q *QBit) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
category := r.Context().Value("category").(string)
svc := service.GetService()
// Check if arr exists
a := svc.Arr.Get(category)
if a == nil {
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached)
}
if err == nil {
host = strings.TrimSpace(host)
if host != "" {
a.Host = host
}
token = strings.TrimSpace(token)
if token != "" {
a.Token = token
}
}
svc.Arr.AddOrUpdate(a)
ctx := context.WithValue(r.Context(), "arr", a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func HashesCtx(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_hashes := chi.URLParam(r, "hashes")
var hashes []string
if _hashes != "" {
hashes = strings.Split(_hashes, "|")
}
if hashes == nil {
// Get hashes from form
_ = r.ParseForm()
hashes = r.Form["hashes"]
}
for i, hash := range hashes {
hashes[i] = strings.TrimSpace(hash)
}
ctx := context.WithValue(r.Context(), "hashes", hashes)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
_arr := ctx.Value("arr").(*arr.Arr)
_arr := getArr(ctx)
if _arr == nil {
// No arr
_, _ = w.Write([]byte("Ok."))
@@ -122,7 +31,7 @@ func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
}
func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) {
preferences := NewAppPreferences()
preferences := getAppPreferences()
preferences.WebUiUsername = q.Username
preferences.SavePath = q.DownloadFolder
@@ -150,10 +59,10 @@ func (q *QBit) handleShutdown(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
//log all url params
ctx := r.Context()
category := ctx.Value("category").(string)
category := getCategory(ctx)
filter := strings.Trim(r.URL.Query().Get("filter"), "")
hashes, _ := ctx.Value("hashes").([]string)
torrents := q.Storage.GetAllSorted(category, filter, hashes, "added_on", false)
hashes := getHashes(ctx)
torrents := q.storage.GetAllSorted(category, filter, hashes, "added_on", false)
request.JSONResponse(w, torrents, http.StatusOK)
}
@@ -180,9 +89,13 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
}
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
debridName := r.FormValue("debrid")
category := r.FormValue("category")
_arr := getArr(ctx)
if _arr == nil {
_arr = arr.New(category, "", "", false, false, nil)
}
atleastOne := false
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
// Handle magnet URLs
if urls := r.FormValue("urls"); urls != "" {
@@ -191,7 +104,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
urlList = append(urlList, strings.TrimSpace(u))
}
for _, url := range urlList {
if err := q.AddMagnet(ctx, url, category); err != nil {
if err := q.addMagnet(ctx, url, _arr, debridName, isSymlink); err != nil {
q.logger.Info().Msgf("Error adding magnet: %v", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
@@ -204,7 +117,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
if r.MultipartForm != nil && r.MultipartForm.File != nil {
if files := r.MultipartForm.File["torrents"]; len(files) > 0 {
for _, fileHeader := range files {
if err := q.AddTorrent(ctx, fileHeader, category); err != nil {
if err := q.addTorrent(ctx, fileHeader, _arr, debridName, isSymlink); err != nil {
q.logger.Info().Msgf("Error adding torrent: %v", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
@@ -224,14 +137,14 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
hashes := getHashes(ctx)
if len(hashes) == 0 {
http.Error(w, "No hashes provided", http.StatusBadRequest)
return
}
category := ctx.Value("category").(string)
category := getCategory(ctx)
for _, hash := range hashes {
q.Storage.Delete(hash, category, false)
q.storage.Delete(hash, category, false)
}
w.WriteHeader(http.StatusOK)
@@ -239,10 +152,10 @@ func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
category := ctx.Value("category").(string)
hashes := getHashes(ctx)
category := getCategory(ctx)
for _, hash := range hashes {
torrent := q.Storage.Get(hash, category)
torrent := q.storage.Get(hash, category)
if torrent == nil {
continue
}
@@ -254,10 +167,10 @@ func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
category := ctx.Value("category").(string)
hashes := getHashes(ctx)
category := getCategory(ctx)
for _, hash := range hashes {
torrent := q.Storage.Get(hash, category)
torrent := q.storage.Get(hash, category)
if torrent == nil {
continue
}
@@ -269,10 +182,10 @@ func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
category := ctx.Value("category").(string)
hashes := getHashes(ctx)
category := getCategory(ctx)
for _, hash := range hashes {
torrent := q.Storage.Get(hash, category)
torrent := q.storage.Get(hash, category)
if torrent == nil {
continue
}
@@ -315,7 +228,7 @@ func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hash := r.URL.Query().Get("hash")
torrent := q.Storage.Get(hash, ctx.Value("category").(string))
torrent := q.storage.Get(hash, getCategory(ctx))
properties := q.GetTorrentProperties(torrent)
request.JSONResponse(w, properties, http.StatusOK)
@@ -324,22 +237,22 @@ func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hash := r.URL.Query().Get("hash")
torrent := q.Storage.Get(hash, ctx.Value("category").(string))
torrent := q.storage.Get(hash, getCategory(ctx))
if torrent == nil {
return
}
files := q.GetTorrentFiles(torrent)
files := q.getTorrentFiles(torrent)
request.JSONResponse(w, files, http.StatusOK)
}
func (q *QBit) handleSetCategory(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
category := ctx.Value("category").(string)
hashes, _ := ctx.Value("hashes").([]string)
torrents := q.Storage.GetAll("", "", hashes)
category := getCategory(ctx)
hashes := getHashes(ctx)
torrents := q.storage.GetAll("", "", hashes)
for _, torrent := range torrents {
torrent.Category = category
q.Storage.AddOrUpdate(torrent)
q.storage.AddOrUpdate(torrent)
}
request.JSONResponse(w, nil, http.StatusOK)
}
@@ -351,33 +264,33 @@ func (q *QBit) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) {
return
}
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
hashes := getHashes(ctx)
tags := strings.Split(r.FormValue("tags"), ",")
for i, tag := range tags {
tags[i] = strings.TrimSpace(tag)
}
torrents := q.Storage.GetAll("", "", hashes)
torrents := q.storage.GetAll("", "", hashes)
for _, t := range torrents {
q.SetTorrentTags(t, tags)
q.setTorrentTags(t, tags)
}
request.JSONResponse(w, nil, http.StatusOK)
}
func (q *QBit) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleremoveTorrentTags(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
return
}
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
hashes := getHashes(ctx)
tags := strings.Split(r.FormValue("tags"), ",")
for i, tag := range tags {
tags[i] = strings.TrimSpace(tag)
}
torrents := q.Storage.GetAll("", "", hashes)
torrents := q.storage.GetAll("", "", hashes)
for _, torrent := range torrents {
q.RemoveTorrentTags(torrent, tags)
q.removeTorrentTags(torrent, tags)
}
request.JSONResponse(w, nil, http.StatusOK)
@@ -397,6 +310,6 @@ func (q *QBit) handleCreateTags(w http.ResponseWriter, r *http.Request) {
for i, tag := range tags {
tags[i] = strings.TrimSpace(tag)
}
q.AddTags(tags)
q.addTags(tags)
request.JSONResponse(w, nil, http.StatusOK)
}

View File

@@ -1,80 +0,0 @@
package qbit
import (
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
"github.com/sirrobot01/decypharr/pkg/service"
"time"
"github.com/google/uuid"
"github.com/sirrobot01/decypharr/pkg/arr"
)
type ImportRequest struct {
ID string `json:"id"`
Path string `json:"path"`
Magnet *utils.Magnet `json:"magnet"`
Arr *arr.Arr `json:"arr"`
IsSymlink bool `json:"isSymlink"`
SeriesId int `json:"series"`
Seasons []int `json:"seasons"`
Episodes []string `json:"episodes"`
DownloadUncached bool `json:"downloadUncached"`
Failed bool `json:"failed"`
FailedAt time.Time `json:"failedAt"`
Reason string `json:"reason"`
Completed bool `json:"completed"`
CompletedAt time.Time `json:"completedAt"`
Async bool `json:"async"`
}
type ManualImportResponseSchema struct {
Priority string `json:"priority"`
Status string `json:"status"`
Result string `json:"result"`
Queued time.Time `json:"queued"`
Trigger string `json:"trigger"`
SendUpdatesToClient bool `json:"sendUpdatesToClient"`
UpdateScheduledTask bool `json:"updateScheduledTask"`
Id int `json:"id"`
}
func NewImportRequest(magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool) *ImportRequest {
return &ImportRequest{
ID: uuid.NewString(),
Magnet: magnet,
Arr: arr,
Failed: false,
Completed: false,
Async: false,
IsSymlink: isSymlink,
DownloadUncached: downloadUncached,
}
}
func (i *ImportRequest) Fail(reason string) {
i.Failed = true
i.FailedAt = time.Now()
i.Reason = reason
}
func (i *ImportRequest) Complete() {
i.Completed = true
i.CompletedAt = time.Now()
}
func (i *ImportRequest) Process(q *QBit) (err error) {
// Use this for now.
// This sends the torrent to the arr
svc := service.GetService()
torrent := createTorrentFromMagnet(i.Magnet, i.Arr.Name, "manual")
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, i.Magnet, i.Arr, i.IsSymlink, i.DownloadUncached)
if err != nil {
return err
}
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
q.Storage.AddOrUpdate(torrent)
go q.ProcessFiles(torrent, debridTorrent, i.Arr, i.IsSymlink)
return nil
}

View File

@@ -1,52 +1,38 @@
package qbit
import (
"cmp"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"os"
"path/filepath"
"github.com/sirrobot01/decypharr/pkg/store"
)
type QBit struct {
Username string `json:"username"`
Password string `json:"password"`
Port string `json:"port"`
DownloadFolder string `json:"download_folder"`
Categories []string `json:"categories"`
Storage *TorrentStorage
logger zerolog.Logger
Tags []string
RefreshInterval int
SkipPreCache bool
downloadSemaphore chan struct{}
Username string
Password string
DownloadFolder string
Categories []string
storage *store.TorrentStorage
logger zerolog.Logger
Tags []string
}
func New() *QBit {
_cfg := config.Get()
cfg := _cfg.QBitTorrent
port := cmp.Or(_cfg.Port, os.Getenv("QBIT_PORT"), "8282")
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
return &QBit{
Username: cfg.Username,
Password: cfg.Password,
Port: port,
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")),
logger: logger.New("qbit"),
RefreshInterval: refreshInterval,
SkipPreCache: cfg.SkipPreCache,
downloadSemaphore: make(chan struct{}, cmp.Or(cfg.MaxDownloads, 5)),
Username: cfg.Username,
Password: cfg.Password,
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
storage: store.GetStore().GetTorrentStorage(),
logger: logger.New("qbit"),
}
}
func (q *QBit) Reset() {
if q.Storage != nil {
q.Storage.Reset()
if q.storage != nil {
q.storage.Reset()
}
q.Tags = nil
close(q.downloadSemaphore)
}

View File

@@ -7,12 +7,12 @@ import (
func (q *QBit) Routes() http.Handler {
r := chi.NewRouter()
r.Use(q.CategoryContext)
r.Use(q.categoryContext)
r.Group(func(r chi.Router) {
r.Use(q.authContext)
r.Post("/auth/login", q.handleLogin)
r.Route("/torrents", func(r chi.Router) {
r.Use(HashesCtx)
r.Use(hashesContext)
r.Get("/info", q.handleTorrentsInfo)
r.Post("/add", q.handleTorrentsAdd)
r.Post("/delete", q.handleTorrentsDelete)
@@ -20,7 +20,7 @@ func (q *QBit) Routes() http.Handler {
r.Post("/createCategory", q.handleCreateCategory)
r.Post("/setCategory", q.handleSetCategory)
r.Post("/addTags", q.handleAddTorrentTags)
r.Post("/removeTags", q.handleRemoveTorrentTags)
r.Post("/removeTags", q.handleremoveTorrentTags)
r.Post("/createTags", q.handleCreateTags)
r.Get("/tags", q.handleGetTags)
r.Get("/pause", q.handleTorrentsPause)

View File

@@ -1,38 +1,35 @@
package qbit
import (
"cmp"
"context"
"fmt"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/service"
"github.com/sirrobot01/decypharr/pkg/store"
"io"
"mime/multipart"
"os"
"path/filepath"
"strings"
"time"
)
// All torrent related helpers goes here
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
// All torrent-related helpers goes here
func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid string, isSymlink bool) error {
magnet, err := utils.GetMagnetFromUrl(url)
if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err)
}
err = q.Process(ctx, magnet, category)
_store := store.GetStore()
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, isSymlink, false, "", store.ImportTypeQBitTorrent)
err = _store.AddTorrent(ctx, importReq)
if err != nil {
return fmt.Errorf("failed to process torrent: %w", err)
}
return nil
}
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, arr *arr.Arr, debrid string, isSymlink bool) error {
file, _ := fileHeader.Open()
defer file.Close()
var reader io.Reader = file
@@ -40,226 +37,28 @@ func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
if err != nil {
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
}
err = q.Process(ctx, magnet, category)
_store := store.GetStore()
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, isSymlink, false, "", store.ImportTypeQBitTorrent)
err = _store.AddTorrent(ctx, importReq)
if err != nil {
return fmt.Errorf("failed to process torrent: %w", err)
}
return nil
}
func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error {
svc := service.GetService()
torrent := createTorrentFromMagnet(magnet, category, "auto")
a, ok := ctx.Value("arr").(*arr.Arr)
if !ok {
return fmt.Errorf("arr not found in context")
}
isSymlink := ctx.Value("isSymlink").(bool)
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, magnet, a, isSymlink, false)
if err != nil || debridTorrent == nil {
if err == nil {
err = fmt.Errorf("failed to process torrent")
}
return err
}
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
q.Storage.AddOrUpdate(torrent)
go q.ProcessFiles(torrent, debridTorrent, a, isSymlink) // We can send async for file processing not to delay the response
return nil
}
func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debridTypes.Torrent, arr *arr.Arr, isSymlink bool) {
svc := service.GetService()
client := svc.Debrid.GetClient(debridTorrent.Debrid)
downloadingStatuses := client.GetDownloadingStatus()
for debridTorrent.Status != "downloaded" {
q.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress)
dbT, err := client.CheckStatus(debridTorrent, isSymlink)
if err != nil {
if dbT != nil && dbT.Id != "" {
// Delete the torrent if it was not downloaded
go func() {
_ = client.DeleteTorrent(dbT.Id)
}()
}
q.logger.Error().Msgf("Error checking status: %v", err)
q.MarkAsFailed(torrent)
go func() {
if err := arr.Refresh(); err != nil {
q.logger.Error().Msgf("Error refreshing arr: %v", err)
}
}()
return
}
debridTorrent = dbT
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
// Exit the loop for downloading statuses to prevent memory buildup
if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) {
break
}
if !utils.Contains(client.GetDownloadingStatus(), debridTorrent.Status) {
break
}
time.Sleep(time.Duration(q.RefreshInterval) * time.Second)
}
var torrentSymlinkPath string
var err error
debridTorrent.Arr = arr
// Check if debrid supports webdav by checking cache
timer := time.Now()
if isSymlink {
cache, useWebdav := svc.Debrid.Caches[debridTorrent.Debrid]
if useWebdav {
q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
// Use webdav to download the file
if err := cache.AddTorrent(debridTorrent); err != nil {
q.logger.Error().Msgf("Error adding torrent to cache: %v", err)
q.MarkAsFailed(torrent)
return
}
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
torrentSymlinkPath, err = q.createSymlinksWebdav(debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
} else {
// User is using either zurg or debrid webdav
torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/
}
} else {
torrentSymlinkPath, err = q.ProcessManualFile(torrent)
}
if err != nil {
q.MarkAsFailed(torrent)
go func() {
_ = client.DeleteTorrent(debridTorrent.Id)
}()
q.logger.Info().Msgf("Error: %v", err)
return
}
torrent.TorrentPath = torrentSymlinkPath
q.UpdateTorrent(torrent, debridTorrent)
q.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer))
go func() {
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
q.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
if err := arr.Refresh(); err != nil {
q.logger.Error().Msgf("Error refreshing arr: %v", err)
}
}
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
t.State = "error"
q.Storage.AddOrUpdate(t)
go func() {
if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil {
q.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
return t
}
func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
if err != nil {
addedOn = time.Now()
}
totalSize := debridTorrent.Bytes
progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0
sizeCompleted := int64(float64(totalSize) * progress)
var speed int64
if debridTorrent.Speed != 0 {
speed = debridTorrent.Speed
}
var eta int
if speed != 0 {
eta = int((totalSize - sizeCompleted) / speed)
}
t.ID = debridTorrent.Id
t.Name = debridTorrent.Name
t.AddedOn = addedOn.Unix()
t.DebridTorrent = debridTorrent
t.Debrid = debridTorrent.Debrid
t.Size = totalSize
t.Completed = sizeCompleted
t.Downloaded = sizeCompleted
t.DownloadedSession = sizeCompleted
t.Uploaded = sizeCompleted
t.UploadedSession = sizeCompleted
t.AmountLeft = totalSize - sizeCompleted
t.Progress = progress
t.Eta = eta
t.Dlspeed = speed
t.Upspeed = speed
t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
return t
}
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
if debridClient := service.GetDebrid().GetClient(debridTorrent.Debrid); debridClient != nil {
if debridTorrent.Status != "downloaded" {
_ = debridClient.UpdateTorrent(debridTorrent)
}
}
t = q.UpdateTorrentMin(t, debridTorrent)
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
if t.IsReady() {
t.State = "pausedUP"
q.Storage.Update(t)
return t
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if t.IsReady() {
t.State = "pausedUP"
q.Storage.Update(t)
return t
}
updatedT := q.UpdateTorrent(t, debridTorrent)
t = updatedT
case <-time.After(10 * time.Minute): // Add a timeout
return t
}
}
}
func (q *QBit) ResumeTorrent(t *Torrent) bool {
func (q *QBit) ResumeTorrent(t *store.Torrent) bool {
return true
}
func (q *QBit) PauseTorrent(t *Torrent) bool {
func (q *QBit) PauseTorrent(t *store.Torrent) bool {
return true
}
func (q *QBit) RefreshTorrent(t *Torrent) bool {
func (q *QBit) RefreshTorrent(t *store.Torrent) bool {
return true
}
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties {
return &TorrentProperties{
AdditionDate: t.AddedOn,
Comment: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>",
@@ -284,7 +83,7 @@ func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
}
}
func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
func (q *QBit) getTorrentFiles(t *store.Torrent) []*TorrentFile {
files := make([]*TorrentFile, 0)
if t.DebridTorrent == nil {
return files
@@ -298,7 +97,7 @@ func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
return files
}
func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool {
func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",")
for _, tag := range tags {
if tag == "" {
@@ -312,20 +111,20 @@ func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool {
}
}
t.Tags = strings.Join(torrentTags, ",")
q.Storage.Update(t)
q.storage.Update(t)
return true
}
func (q *QBit) RemoveTorrentTags(t *Torrent, tags []string) bool {
func (q *QBit) removeTorrentTags(t *store.Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",")
newTorrentTags := utils.RemoveItem(torrentTags, tags...)
q.Tags = utils.RemoveItem(q.Tags, tags...)
t.Tags = strings.Join(newTorrentTags, ",")
q.Storage.Update(t)
q.storage.Update(t)
return true
}
func (q *QBit) AddTags(tags []string) bool {
func (q *QBit) addTags(tags []string) bool {
for _, tag := range tags {
if tag == "" {
continue
@@ -337,7 +136,7 @@ func (q *QBit) AddTags(tags []string) bool {
return true
}
func (q *QBit) RemoveTags(tags []string) bool {
func (q *QBit) removeTags(tags []string) bool {
q.Tags = utils.RemoveItem(q.Tags, tags...)
return true
}

View File

@@ -1,11 +1,5 @@
package qbit
import (
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
)
type BuildInfo struct {
Libtorrent string `json:"libtorrent"`
Bitness int `json:"bitness"`
@@ -172,76 +166,6 @@ type TorrentCategory struct {
SavePath string `json:"savePath"`
}
type Torrent struct {
ID string `json:"id"`
DebridTorrent *types.Torrent `json:"-"`
Debrid string `json:"debrid"`
TorrentPath string `json:"-"`
AddedOn int64 `json:"added_on,omitempty"`
AmountLeft int64 `json:"amount_left"`
AutoTmm bool `json:"auto_tmm"`
Availability float64 `json:"availability,omitempty"`
Category string `json:"category,omitempty"`
Completed int64 `json:"completed"`
CompletionOn int `json:"completion_on,omitempty"`
ContentPath string `json:"content_path"`
DlLimit int `json:"dl_limit"`
Dlspeed int64 `json:"dlspeed"`
Downloaded int64 `json:"downloaded"`
DownloadedSession int64 `json:"downloaded_session"`
Eta int `json:"eta"`
FlPiecePrio bool `json:"f_l_piece_prio,omitempty"`
ForceStart bool `json:"force_start,omitempty"`
Hash string `json:"hash"`
LastActivity int64 `json:"last_activity,omitempty"`
MagnetUri string `json:"magnet_uri,omitempty"`
MaxRatio int `json:"max_ratio,omitempty"`
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
Name string `json:"name,omitempty"`
NumComplete int `json:"num_complete,omitempty"`
NumIncomplete int `json:"num_incomplete,omitempty"`
NumLeechs int `json:"num_leechs,omitempty"`
NumSeeds int `json:"num_seeds,omitempty"`
Priority int `json:"priority,omitempty"`
Progress float64 `json:"progress"`
Ratio int `json:"ratio,omitempty"`
RatioLimit int `json:"ratio_limit,omitempty"`
SavePath string `json:"save_path"`
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
SeenComplete int64 `json:"seen_complete,omitempty"`
SeqDl bool `json:"seq_dl"`
Size int64 `json:"size,omitempty"`
State string `json:"state,omitempty"`
SuperSeeding bool `json:"super_seeding"`
Tags string `json:"tags,omitempty"`
TimeActive int `json:"time_active,omitempty"`
TotalSize int64 `json:"total_size,omitempty"`
Tracker string `json:"tracker,omitempty"`
UpLimit int64 `json:"up_limit,omitempty"`
Uploaded int64 `json:"uploaded,omitempty"`
UploadedSession int64 `json:"uploaded_session,omitempty"`
Upspeed int64 `json:"upspeed,omitempty"`
Source string `json:"source,omitempty"`
Mu sync.Mutex `json:"-"`
}
func (t *Torrent) IsReady() bool {
return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != ""
}
func (t *Torrent) discordContext() string {
format := `
**Name:** %s
**Arr:** %s
**Hash:** %s
**MagnetURI:** %s
**Debrid:** %s
`
return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid)
}
type TorrentProperties struct {
AdditionDate int64 `json:"addition_date,omitempty"`
Comment string `json:"comment,omitempty"`
@@ -289,7 +213,7 @@ type TorrentFile struct {
Availability float64 `json:"availability,omitempty"`
}
func NewAppPreferences() *AppPreferences {
func getAppPreferences() *AppPreferences {
preferences := &AppPreferences{
AddTrackers: "",
AddTrackersEnabled: false,

View File

@@ -3,6 +3,8 @@ package repair
import (
"fmt"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"os"
"path/filepath"
)
@@ -82,3 +84,96 @@ func collectFiles(media arr.Content) map[string][]arr.ContentFile {
}
return uniqueParents
}
func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]types.Client, caches map[string]*store.Cache) []arr.ContentFile {
brokenFiles := make([]arr.ContentFile, 0)
r.logger.Debug().Msgf("Checking %s", torrentPath)
// Get the debrid client
dir := filepath.Dir(torrentPath)
debridName := r.findDebridForPath(dir, clients)
if debridName == "" {
r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath)
return files // Return all files as broken if no debrid found
}
cache, ok := caches[debridName]
if !ok {
r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName)
return files // Return all files as broken if no cache found
}
// Check if torrent exists
torrentName := filepath.Clean(filepath.Base(torrentPath))
torrent := cache.GetTorrentByName(torrentName)
if torrent == nil {
r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName)
return files // Return all files as broken if torrent not found
}
// Batch check files
filePaths := make([]string, len(files))
for i, file := range files {
filePaths[i] = file.TargetPath
}
brokenFilePaths := cache.GetBrokenFiles(torrent, filePaths)
if len(brokenFilePaths) > 0 {
r.logger.Debug().Msgf("%d broken files found in %s", len(brokenFilePaths), torrentName)
// Create a set for O(1) lookup
brokenSet := make(map[string]bool, len(brokenFilePaths))
for _, brokenPath := range brokenFilePaths {
brokenSet[brokenPath] = true
}
// Filter broken files
for _, contentFile := range files {
if brokenSet[contentFile.TargetPath] {
brokenFiles = append(brokenFiles, contentFile)
}
}
}
return brokenFiles
}
func (r *Repair) findDebridForPath(dir string, clients map[string]types.Client) string {
// Check cache first
r.cacheMutex.RLock()
if r.debridPathCache == nil {
r.debridPathCache = make(map[string]string)
}
if debridName, exists := r.debridPathCache[dir]; exists {
r.cacheMutex.RUnlock()
return debridName
}
r.cacheMutex.RUnlock()
// Find debrid client
for _, client := range clients {
mountPath := client.GetMountPath()
if mountPath == "" {
continue
}
if filepath.Clean(mountPath) == filepath.Clean(dir) {
debridName := client.GetName()
// Cache the result
r.cacheMutex.Lock()
r.debridPathCache[dir] = debridName
r.cacheMutex.Unlock()
return debridName
}
}
// Cache empty result to avoid repeated lookups
r.cacheMutex.Lock()
r.debridPathCache[dir] = ""
r.cacheMutex.Unlock()
return ""
}

View File

@@ -3,6 +3,7 @@ package repair
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/go-co-op/gocron/v2"
"github.com/google/uuid"
@@ -12,7 +13,7 @@ import (
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
"github.com/sirrobot01/decypharr/pkg/debrid"
"golang.org/x/sync/errgroup"
"net"
"net/http"
@@ -29,7 +30,7 @@ import (
type Repair struct {
Jobs map[string]*Job
arrs *arr.Storage
deb *debrid.Engine
deb *debrid.Storage
interval string
runOnStart bool
ZurgURL string
@@ -40,7 +41,10 @@ type Repair struct {
filename string
workers int
scheduler gocron.Scheduler
ctx context.Context
debridPathCache map[string]string // Cache for path -> debrid name mapping
cacheMutex sync.RWMutex
ctx context.Context
}
type JobStatus string
@@ -51,6 +55,7 @@ const (
JobFailed JobStatus = "failed"
JobCompleted JobStatus = "completed"
JobProcessing JobStatus = "processing"
JobCancelled JobStatus = "cancelled"
)
type Job struct {
@@ -66,9 +71,12 @@ type Job struct {
Recurrent bool `json:"recurrent"`
Error string `json:"error"`
cancelFunc context.CancelFunc
ctx context.Context
}
func New(arrs *arr.Storage, engine *debrid.Engine) *Repair {
func New(arrs *arr.Storage, engine *debrid.Storage) *Repair {
cfg := config.Get()
workers := runtime.NumCPU() * 20
if cfg.Repair.Workers > 0 {
@@ -220,7 +228,8 @@ func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job {
func (r *Repair) preRunChecks() error {
if r.useWebdav {
if len(r.deb.Caches) == 0 {
caches := r.deb.GetCaches()
if len(caches) == 0 {
return fmt.Errorf("no caches found")
}
return nil
@@ -254,21 +263,59 @@ func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess, recu
job.AutoProcess = autoProcess
job.Recurrent = recurrent
r.reset(job)
job.ctx, job.cancelFunc = context.WithCancel(r.ctx)
r.Jobs[key] = job
go r.saveToFile()
go func() {
if err := r.repair(job); err != nil {
r.logger.Error().Err(err).Msg("Error running repair")
r.logger.Error().Err(err).Msg("Error running repair")
job.FailedAt = time.Now()
job.Error = err.Error()
job.Status = JobFailed
job.CompletedAt = time.Now()
if !errors.Is(job.ctx.Err(), context.Canceled) {
job.FailedAt = time.Now()
job.Error = err.Error()
job.Status = JobFailed
job.CompletedAt = time.Now()
} else {
job.FailedAt = time.Now()
job.Error = err.Error()
job.Status = JobFailed
job.CompletedAt = time.Now()
}
}
}()
return nil
}
func (r *Repair) StopJob(id string) error {
job := r.GetJob(id)
if job == nil {
return fmt.Errorf("job %s not found", id)
}
// Check if job can be stopped
if job.Status != JobStarted && job.Status != JobProcessing {
return fmt.Errorf("job %s cannot be stopped (status: %s)", id, job.Status)
}
// Cancel the job
if job.cancelFunc != nil {
job.cancelFunc()
r.logger.Info().Msgf("Job %s cancellation requested", id)
go func() {
if job.Status == JobStarted || job.Status == JobProcessing {
job.Status = JobCancelled
job.CompletedAt = time.Now()
job.Error = "Job was cancelled by user"
r.saveToFile()
}
}()
return nil
}
return fmt.Errorf("job %s cannot be cancelled", id)
}
func (r *Repair) repair(job *Job) error {
defer r.saveToFile()
if err := r.preRunChecks(); err != nil {
@@ -278,7 +325,7 @@ func (r *Repair) repair(job *Job) error {
// Use a mutex to protect concurrent access to brokenItems
var mu sync.Mutex
brokenItems := map[string][]arr.ContentFile{}
g, ctx := errgroup.WithContext(r.ctx)
g, ctx := errgroup.WithContext(job.ctx)
for _, a := range job.Arrs {
a := a // Capture range variable
@@ -321,6 +368,14 @@ func (r *Repair) repair(job *Job) error {
// Wait for all goroutines to complete and check for errors
if err := g.Wait(); err != nil {
// Check if j0b was canceled
if errors.Is(ctx.Err(), context.Canceled) {
job.Status = JobCancelled
job.CompletedAt = time.Now()
job.Error = "Job was cancelled"
return fmt.Errorf("job cancelled")
}
job.FailedAt = time.Now()
job.Error = err.Error()
job.Status = JobFailed
@@ -367,7 +422,7 @@ func (r *Repair) repair(job *Job) error {
return nil
}
func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) {
func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) {
brokenItems := make([]arr.ContentFile, 0)
a := r.arrs.Get(_arr)
@@ -384,7 +439,7 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil
return brokenItems, nil
}
// Check first media to confirm mounts are accessible
if !r.isMediaAccessible(media[0]) {
if !r.isMediaAccessible(media) {
r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts")
return brokenItems, nil
}
@@ -400,14 +455,14 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil
defer wg.Done()
for m := range workerChan {
select {
case <-r.ctx.Done():
case <-job.ctx.Done():
return
default:
}
items := r.getBrokenFiles(m)
items := r.getBrokenFiles(job, m)
if items != nil {
r.logger.Debug().Msgf("Found %d broken files for %s", len(items), m.Title)
if j.AutoProcess {
if job.AutoProcess {
r.logger.Info().Msgf("Auto processing %d broken items for %s", len(items), m.Title)
// Delete broken items
@@ -431,7 +486,7 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil
for _, m := range media {
select {
case <-r.ctx.Done():
case <-job.ctx.Done():
break
default:
workerChan <- m
@@ -449,43 +504,49 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil
return brokenItems, nil
}
func (r *Repair) isMediaAccessible(m arr.Content) bool {
files := m.Files
// isMediaAccessible checks if the mounts are accessible
func (r *Repair) isMediaAccessible(media []arr.Content) bool {
firstMedia := media[0]
for _, m := range media {
if len(m.Files) > 0 {
firstMedia = m
break
}
}
files := firstMedia.Files
if len(files) == 0 {
return false
}
firstFile := files[0]
r.logger.Debug().Msgf("Checking parent directory for %s", firstFile.Path)
//if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) {
// r.logger.Debug().Msgf("Parent directory not accessible for %s", firstFile.Path)
// return false
//}
// Check symlink parent directory
symlinkPath := getSymlinkTarget(firstFile.Path)
r.logger.Debug().Msgf("Checking symlink parent directory for %s", symlinkPath)
parentSymlink := ""
if symlinkPath != "" {
parentSymlink := filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents
parentSymlink = filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents
}
if parentSymlink != "" {
if _, err := os.Stat(parentSymlink); os.IsNotExist(err) {
return false
}
return true
}
return true
return false
}
func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile {
func (r *Repair) getBrokenFiles(job *Job, media arr.Content) []arr.ContentFile {
if r.useWebdav {
return r.getWebdavBrokenFiles(media)
return r.getWebdavBrokenFiles(job, media)
} else if r.IsZurg {
return r.getZurgBrokenFiles(media)
return r.getZurgBrokenFiles(job, media)
} else {
return r.getFileBrokenFiles(media)
return r.getFileBrokenFiles(job, media)
}
}
func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile {
func (r *Repair) getFileBrokenFiles(job *Job, media arr.Content) []arr.ContentFile {
// This checks symlink target, try to get read a tiny bit of the file
brokenFiles := make([]arr.ContentFile, 0)
@@ -510,7 +571,7 @@ func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile {
return brokenFiles
}
func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
func (r *Repair) getZurgBrokenFiles(job *Job, media arr.Content) []arr.ContentFile {
// Use zurg setup to check file availability with zurg
// This reduces bandwidth usage significantly
@@ -550,12 +611,17 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
resp.Body.Close()
if err := resp.Body.Close(); err != nil {
return nil
}
brokenFiles = append(brokenFiles, file)
continue
}
downloadUrl := resp.Request.URL.String()
resp.Body.Close()
if err := resp.Body.Close(); err != nil {
return nil
}
if downloadUrl != "" {
r.logger.Trace().Msgf("Found download url: %s", downloadUrl)
} else {
@@ -573,16 +639,16 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
return brokenFiles
}
func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile {
func (r *Repair) getWebdavBrokenFiles(job *Job, media arr.Content) []arr.ContentFile {
// Use internal webdav setup to check file availability
caches := r.deb.Caches
caches := r.deb.GetCaches()
if len(caches) == 0 {
r.logger.Info().Msg("No caches found. Can't use webdav")
return nil
}
clients := r.deb.Clients
clients := r.deb.GetClients()
if len(clients) == 0 {
r.logger.Info().Msg("No clients found. Can't use webdav")
return nil
@@ -590,58 +656,36 @@ func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile {
brokenFiles := make([]arr.ContentFile, 0)
uniqueParents := collectFiles(media)
for torrentPath, f := range uniqueParents {
r.logger.Debug().Msgf("Checking %s", torrentPath)
// Get the debrid first
dir := filepath.Dir(torrentPath)
debridName := ""
for _, client := range clients {
mountPath := client.GetMountPath()
if mountPath == "" {
continue
var brokenFilesMutex sync.Mutex
var wg sync.WaitGroup
// Limit concurrent torrent checks
semaphore := make(chan struct{}, min(len(uniqueParents), 30)) // Limit to 5 concurrent checks
for torrentPath, files := range uniqueParents {
wg.Add(1)
go func(torrentPath string, files []arr.ContentFile) {
defer wg.Done()
semaphore <- struct{}{} // Acquire
defer func() { <-semaphore }() // Release
select {
case <-job.ctx.Done():
return
default:
}
if filepath.Clean(mountPath) == filepath.Clean(dir) {
debridName = client.GetName()
break
}
}
if debridName == "" {
r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath)
continue
}
cache, ok := caches[debridName]
if !ok {
r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName)
continue
}
// Check if torrent exists
torrentName := filepath.Clean(filepath.Base(torrentPath))
torrent := cache.GetTorrentByName(torrentName)
if torrent == nil {
r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName)
brokenFiles = append(brokenFiles, f...)
continue
}
files := make([]string, 0)
for _, file := range f {
files = append(files, file.TargetPath)
}
brokenFilesForTorrent := r.checkTorrentFiles(torrentPath, files, clients, caches)
_brokenFiles := cache.GetBrokenFiles(torrent, files)
totalBrokenFiles := len(_brokenFiles)
if totalBrokenFiles > 0 {
r.logger.Debug().Msgf("%d broken files found in %s", totalBrokenFiles, torrentName)
for _, contentFile := range f {
if utils.Contains(_brokenFiles, contentFile.TargetPath) {
brokenFiles = append(brokenFiles, contentFile)
}
if len(brokenFilesForTorrent) > 0 {
brokenFilesMutex.Lock()
brokenFiles = append(brokenFiles, brokenFilesForTorrent...)
brokenFilesMutex.Unlock()
}
}
}(torrentPath, files)
}
wg.Wait()
if len(brokenFiles) == 0 {
r.logger.Debug().Msgf("No broken files found for %s", media.Title)
return nil
}
r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
@@ -696,7 +740,11 @@ func (r *Repair) ProcessJob(id string) error {
return nil
}
g, ctx := errgroup.WithContext(r.ctx)
if job.ctx == nil || job.ctx.Err() != nil {
job.ctx, job.cancelFunc = context.WithCancel(r.ctx)
}
g, ctx := errgroup.WithContext(job.ctx)
g.SetLimit(r.workers)
for arrName, items := range brokenItems {

View File

@@ -5,19 +5,20 @@ import (
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/request"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/service"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"runtime"
)
func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) {
ingests := make([]debridTypes.IngestData, 0)
svc := service.GetService()
if svc.Debrid == nil {
_store := store.GetStore()
debrids := _store.GetDebrid()
if debrids == nil {
http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError)
return
}
for _, cache := range svc.Debrid.Caches {
for _, cache := range debrids.GetCaches() {
if cache == nil {
s.logger.Error().Msg("Debrid cache is nil, skipping")
continue
@@ -41,13 +42,17 @@ func (s *Server) handleIngestsByDebrid(w http.ResponseWriter, r *http.Request) {
return
}
svc := service.GetService()
if svc.Debrid == nil {
_store := store.GetStore()
debrids := _store.GetDebrid()
if debrids == nil {
http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError)
return
}
cache, exists := svc.Debrid.Caches[debridName]
caches := debrids.GetCaches()
cache, exists := caches[debridName]
if !exists {
http.Error(w, "Debrid cache not found: "+debridName, http.StatusNotFound)
return
@@ -87,12 +92,13 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
"go_version": runtime.Version(),
}
svc := service.GetService()
if svc.Debrid == nil {
debrids := store.GetStore().GetDebrid()
if debrids == nil {
request.JSONResponse(w, stats, http.StatusOK)
return
}
clients := svc.Debrid.GetDebrids()
clients := debrids.GetClients()
caches := debrids.GetCaches()
profiles := make([]*debridTypes.Profile, 0)
for debridName, client := range clients {
profile, err := client.GetProfile()
@@ -101,7 +107,7 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
s.logger.Error().Err(err).Msg("Failed to get debrid profile")
continue
}
cache, ok := svc.Debrid.Caches[debridName]
cache, ok := caches[debridName]
if ok {
// Get torrent data
profile.LibrarySize = len(cache.GetTorrents())

View File

@@ -3,7 +3,7 @@ package server
import (
"cmp"
"encoding/json"
"github.com/sirrobot01/decypharr/pkg/service"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
)
@@ -38,8 +38,7 @@ func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Invalid ID", http.StatusBadRequest)
return
}
svc := service.GetService()
repair := svc.Repair
repair := store.GetStore().GetRepair()
mediaId := cmp.Or(payload.TmdbID, payload.TvdbID)

View File

@@ -1,47 +0,0 @@
package service
import (
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
"github.com/sirrobot01/decypharr/pkg/repair"
"sync"
)
type Service struct {
Repair *repair.Repair
Arr *arr.Storage
Debrid *debrid.Engine
}
var (
instance *Service
once sync.Once
)
// GetService returns the singleton instance
func GetService() *Service {
once.Do(func() {
arrs := arr.NewStorage()
deb := debrid.NewEngine()
instance = &Service{
Repair: repair.New(arrs, deb),
Arr: arrs,
Debrid: deb,
}
})
return instance
}
func Reset() {
if instance != nil {
if instance.Debrid != nil {
instance.Debrid.Reset()
}
}
once = sync.Once{}
instance = nil
}
func GetDebrid() *debrid.Engine {
return GetService().Debrid
}

View File

@@ -1,8 +1,8 @@
package qbit
package store
import (
"fmt"
"io"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"net/http"
"os"
"path/filepath"
@@ -11,7 +11,6 @@ import (
"github.com/cavaliergopher/grab/v3"
"github.com/sirrobot01/decypharr/internal/utils"
debrid "github.com/sirrobot01/decypharr/pkg/debrid/types"
)
func Download(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error {
@@ -57,21 +56,21 @@ Loop:
return resp.Err()
}
func (q *QBit) ProcessManualFile(torrent *Torrent) (string, error) {
func (s *Store) ProcessManualFile(torrent *Torrent) (string, error) {
debridTorrent := torrent.DebridTorrent
q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
torrentPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, utils.RemoveExtension(debridTorrent.OriginalFilename))
s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename))
torrentPath = utils.RemoveInvalidChars(torrentPath)
err := os.MkdirAll(torrentPath, os.ModePerm)
if err != nil {
// add previous error to the error and return
// add the previous error to the error and return
return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err)
}
q.downloadFiles(torrent, torrentPath)
s.downloadFiles(torrent, torrentPath)
return torrentPath, nil
}
func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
func (s *Store) downloadFiles(torrent *Torrent, parent string) {
debridTorrent := torrent.DebridTorrent
var wg sync.WaitGroup
@@ -79,15 +78,15 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
for _, file := range debridTorrent.GetFiles() {
totalSize += file.Size
}
debridTorrent.Mu.Lock()
debridTorrent.Lock()
debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes
debridTorrent.Progress = 0 // Reset progress
debridTorrent.Mu.Unlock()
debridTorrent.Unlock()
progressCallback := func(downloaded int64, speed int64) {
debridTorrent.Mu.Lock()
defer debridTorrent.Mu.Unlock()
torrent.Mu.Lock()
defer torrent.Mu.Unlock()
debridTorrent.Lock()
defer debridTorrent.Unlock()
torrent.Lock()
defer torrent.Unlock()
// Update total downloaded bytes
debridTorrent.SizeDownloaded += downloaded
@@ -97,7 +96,7 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
if totalSize > 0 {
debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100
}
q.UpdateTorrentMin(torrent, debridTorrent)
s.UpdateTorrentMin(torrent, debridTorrent)
}
client := &grab.Client{
UserAgent: "Decypharr[QBitTorrent]",
@@ -110,14 +109,14 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
errChan := make(chan error, len(debridTorrent.Files))
for _, file := range debridTorrent.GetFiles() {
if file.DownloadLink == nil {
q.logger.Info().Msgf("No download link found for %s", file.Name)
s.logger.Info().Msgf("No download link found for %s", file.Name)
continue
}
wg.Add(1)
q.downloadSemaphore <- struct{}{}
go func(file debrid.File) {
s.downloadSemaphore <- struct{}{}
go func(file types.File) {
defer wg.Done()
defer func() { <-q.downloadSemaphore }()
defer func() { <-s.downloadSemaphore }()
filename := file.Name
err := Download(
@@ -129,10 +128,10 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
)
if err != nil {
q.logger.Error().Msgf("Failed to download %s: %v", filename, err)
s.logger.Error().Msgf("Failed to download %s: %v", filename, err)
errChan <- err
} else {
q.logger.Info().Msgf("Downloaded %s", filename)
s.logger.Info().Msgf("Downloaded %s", filename)
}
}(file)
}
@@ -146,21 +145,21 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
}
}
if len(errors) > 0 {
q.logger.Error().Msgf("Errors occurred during download: %v", errors)
s.logger.Error().Msgf("Errors occurred during download: %v", errors)
return
}
q.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
}
func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) {
func (s *Store) ProcessSymlink(torrent *Torrent) (string, error) {
debridTorrent := torrent.DebridTorrent
files := debridTorrent.Files
if len(files) == 0 {
return "", fmt.Errorf("no video files found")
}
q.logger.Info().Msgf("Checking symlinks for %d files...", len(files))
s.logger.Info().Msgf("Checking symlinks for %d files...", len(files))
rCloneBase := debridTorrent.MountPath
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
torrentPath, err := s.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
// This returns filename.ext for alldebrid instead of the parent folder filename/
torrentFolder := torrentPath
if err != nil {
@@ -173,7 +172,7 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) {
torrentFolder = utils.RemoveExtension(torrentFolder)
torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder
}
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
torrentSymlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
@@ -192,10 +191,10 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) {
return nil
})
if err != nil {
q.logger.Warn().Msgf("Error while scanning rclone path: %v", err)
s.logger.Warn().Msgf("Error while scanning rclone path: %v", err)
}
pending := make(map[string]debrid.File)
pending := make(map[string]types.File)
for _, file := range files {
if realRelPath, ok := realPaths[file.Name]; ok {
file.Path = realRelPath
@@ -216,43 +215,43 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) {
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(pending, path)
q.logger.Info().Msgf("File is ready: %s", file.Name)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending))
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending))
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(pending))
}
}
if q.SkipPreCache {
if s.skipPreCache {
return torrentSymlinkPath, nil
}
go func() {
if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil {
q.logger.Error().Msgf("Failed to pre-cache file: %s", err)
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
q.logger.Trace().Msgf("Pre-cached %d files", len(filePaths))
s.logger.Trace().Msgf("Pre-cached %d files", len(filePaths))
}
}()
return torrentSymlinkPath, nil
}
func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) {
func (s *Store) createSymlinksWebdav(torrent *Torrent, debridTorrent *types.Torrent, rclonePath, torrentFolder string) (string, error) {
files := debridTorrent.Files
symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
symlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err := os.MkdirAll(symlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
}
remainingFiles := make(map[string]debrid.File)
remainingFiles := make(map[string]types.File)
for _, file := range files {
remainingFiles[file.Name] = file
}
@@ -278,107 +277,44 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, t
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
q.logger.Info().Msgf("File is ready: %s", file.Name)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
}
}
if q.SkipPreCache {
if s.skipPreCache {
return symlinkPath, nil
}
go func() {
if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil {
q.logger.Error().Msgf("Failed to pre-cache file: %s", err)
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
q.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
// Pre-cache the first 256KB and 1MB of the file
return symlinkPath, nil
}
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
func (s *Store) getTorrentPath(rclonePath string, debridTorrent *types.Torrent) (string, error) {
for {
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)
if err == nil {
q.logger.Debug().Msgf("Found torrent path: %s", torrentPath)
s.logger.Debug().Msgf("Found torrent path: %s", torrentPath)
return torrentPath, err
}
time.Sleep(100 * time.Millisecond)
}
}
func (q *QBit) preCacheFile(name string, filePaths []string) error {
q.logger.Trace().Msgf("Pre-caching torrent: %s", name)
if len(filePaths) == 0 {
return fmt.Errorf("no file paths provided")
}
for _, filePath := range filePaths {
err := func(f string) error {
file, err := os.Open(f)
if err != nil {
if os.IsNotExist(err) {
// File has probably been moved by arr, return silently
return nil
}
return fmt.Errorf("failed to open file: %s: %v", f, err)
}
defer file.Close()
// Pre-cache the file header (first 256KB) using 16KB chunks.
if err := q.readSmallChunks(file, 0, 256*1024, 16*1024); err != nil {
return err
}
if err := q.readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil {
return err
}
return nil
}(filePath)
if err != nil {
return err
}
}
return nil
}
func (q *QBit) readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error {
_, err := file.Seek(startPos, 0)
if err != nil {
return err
}
buf := make([]byte, chunkSize)
bytesRemaining := totalToRead
for bytesRemaining > 0 {
toRead := chunkSize
if bytesRemaining < chunkSize {
toRead = bytesRemaining
}
n, err := file.Read(buf[:toRead])
if err != nil {
if err == io.EOF {
break
}
return err
}
bytesRemaining -= n
}
return nil
}

View File

@@ -1,18 +1,21 @@
package qbit
package store
import (
"github.com/sirrobot01/decypharr/internal/utils"
"os"
"path/filepath"
"strings"
)
func createTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Torrent {
func createTorrentFromMagnet(req *ImportRequest) *Torrent {
magnet := req.Magnet
arrName := req.Arr.Name
torrent := &Torrent{
ID: "",
Hash: strings.ToLower(magnet.InfoHash),
Name: magnet.Name,
Size: magnet.Size,
Category: category,
Source: source,
Category: arrName,
Source: string(req.Type),
State: "downloading",
MagnetUri: magnet.Link,
@@ -22,6 +25,7 @@ func createTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Tor
AutoTmm: false,
Ratio: 1,
RatioLimit: 1,
SavePath: filepath.Join(req.DownloadFolder, arrName) + string(os.PathSeparator),
}
return torrent
}

103
pkg/store/request.go Normal file
View File

@@ -0,0 +1,103 @@
package store
import (
"bytes"
"encoding/json"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"net/http"
"net/url"
"time"
)
type ImportType string
const (
ImportTypeQBitTorrent ImportType = "qbit"
ImportTypeAPI ImportType = "api"
)
func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest {
return &ImportRequest{
Status: "started",
DownloadFolder: downloadFolder,
Debrid: debrid,
Magnet: magnet,
Arr: arr,
IsSymlink: isSymlink,
DownloadUncached: downloadUncached,
CallBackUrl: callBackUrl,
Type: importType,
}
}
type ImportRequest struct {
DownloadFolder string `json:"downloadFolder"`
Debrid string `json:"debrid"`
Magnet *utils.Magnet `json:"magnet"`
Arr *arr.Arr `json:"arr"`
IsSymlink bool `json:"isSymlink"`
DownloadUncached bool `json:"downloadUncached"`
CallBackUrl string `json:"callBackUrl"`
Status string `json:"status"`
CompletedAt time.Time `json:"completedAt,omitempty"`
Error error `json:"error,omitempty"`
Type ImportType `json:"type"`
Async bool `json:"async"`
}
type importResponse struct {
Status string `json:"status"`
CompletedAt time.Time `json:"completedAt"`
Error error `json:"error"`
Torrent *Torrent `json:"torrent"`
Debrid *debridTypes.Torrent `json:"debrid"`
}
func (i *ImportRequest) sendCallback(torrent *Torrent, debridTorrent *debridTypes.Torrent) {
if i.CallBackUrl == "" {
return
}
// Check if the callback URL is valid
if _, err := url.ParseRequestURI(i.CallBackUrl); err != nil {
return
}
client := request.New()
payload, err := json.Marshal(&importResponse{
Status: i.Status,
Error: i.Error,
CompletedAt: i.CompletedAt,
Torrent: torrent,
Debrid: debridTorrent,
})
if err != nil {
return
}
req, err := http.NewRequest("POST", i.CallBackUrl, bytes.NewReader(payload))
if err != nil {
return
}
req.Header.Set("Content-Type", "application/json")
_, _ = client.Do(req)
}
func (i *ImportRequest) markAsFailed(err error, torrent *Torrent, debridTorrent *debridTypes.Torrent) {
i.Status = "failed"
i.Error = err
i.CompletedAt = time.Now()
i.sendCallback(torrent, debridTorrent)
}
func (i *ImportRequest) markAsCompleted(torrent *Torrent, debridTorrent *debridTypes.Torrent) {
i.Status = "completed"
i.Error = nil
i.CompletedAt = time.Now()
i.sendCallback(torrent, debridTorrent)
}

75
pkg/store/store.go Normal file
View File

@@ -0,0 +1,75 @@
package store
import (
"cmp"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid"
"github.com/sirrobot01/decypharr/pkg/repair"
"sync"
"time"
)
type Store struct {
repair *repair.Repair
arr *arr.Storage
debrid *debrid.Storage
torrents *TorrentStorage
logger zerolog.Logger
refreshInterval time.Duration
skipPreCache bool
downloadSemaphore chan struct{}
}
var (
instance *Store
once sync.Once
)
// GetStore returns the singleton instance
func GetStore() *Store {
once.Do(func() {
arrs := arr.NewStorage()
deb := debrid.NewStorage()
cfg := config.Get()
qbitCfg := cfg.QBitTorrent
instance = &Store{
repair: repair.New(arrs, deb),
arr: arrs,
debrid: deb,
torrents: newTorrentStorage(cfg.TorrentsFile()),
logger: logger.New("store"),
refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute,
skipPreCache: qbitCfg.SkipPreCache,
downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)),
}
})
return instance
}
func Reset() {
if instance != nil {
if instance.debrid != nil {
instance.debrid.Reset()
}
close(instance.downloadSemaphore)
}
once = sync.Once{}
instance = nil
}
func (s *Store) GetArr() *arr.Storage {
return s.arr
}
func (s *Store) GetDebrid() *debrid.Storage {
return s.debrid
}
func (s *Store) GetRepair() *repair.Repair {
return s.repair
}
func (s *Store) GetTorrentStorage() *TorrentStorage {
return s.torrents
}

210
pkg/store/torrent.go Normal file
View File

@@ -0,0 +1,210 @@
package store
import (
"cmp"
"context"
"fmt"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"os"
"path/filepath"
"time"
)
func (s *Store) AddTorrent(ctx context.Context, importReq *ImportRequest) error {
torrent := createTorrentFromMagnet(importReq)
debridTorrent, err := debridTypes.ProcessTorrent(ctx, s.debrid, importReq.Debrid, importReq.Magnet, importReq.Arr, importReq.IsSymlink, importReq.DownloadUncached)
if err != nil || debridTorrent == nil {
if err == nil {
err = fmt.Errorf("failed to process torrent")
}
// This error is returned immediately to the user(no need for callback)
return err
}
torrent = s.UpdateTorrentMin(torrent, debridTorrent)
s.torrents.AddOrUpdate(torrent)
go s.processFiles(torrent, debridTorrent, importReq) // We can send async for file processing not to delay the response
return nil
}
func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, importReq *ImportRequest) {
client := s.debrid.GetClient(debridTorrent.Debrid)
downloadingStatuses := client.GetDownloadingStatus()
_arr := importReq.Arr
for debridTorrent.Status != "downloaded" {
s.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress)
dbT, err := client.CheckStatus(debridTorrent, importReq.IsSymlink)
if err != nil {
if dbT != nil && dbT.Id != "" {
// Delete the torrent if it was not downloaded
go func() {
_ = client.DeleteTorrent(dbT.Id)
}()
}
s.logger.Error().Msgf("Error checking status: %v", err)
s.markTorrentAsFailed(torrent)
go func() {
_arr.Refresh()
}()
importReq.markAsFailed(err, torrent, debridTorrent)
return
}
debridTorrent = dbT
torrent = s.UpdateTorrentMin(torrent, debridTorrent)
// Exit the loop for downloading statuses to prevent memory buildup
if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) {
break
}
if !utils.Contains(client.GetDownloadingStatus(), debridTorrent.Status) {
break
}
time.Sleep(s.refreshInterval)
}
var torrentSymlinkPath string
var err error
debridTorrent.Arr = _arr
// Check if debrid supports webdav by checking cache
timer := time.Now()
if importReq.IsSymlink {
caches := s.debrid.GetCaches()
cache, useWebdav := caches[debridTorrent.Debrid]
if useWebdav {
s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
// Use webdav to download the file
if err := cache.Add(debridTorrent); err != nil {
s.logger.Error().Msgf("Error adding torrent to cache: %v", err)
s.markTorrentAsFailed(torrent)
importReq.markAsFailed(err, torrent, debridTorrent)
return
}
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
torrentSymlinkPath, err = s.createSymlinksWebdav(torrent, debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
} else {
// User is using either zurg or debrid webdav
torrentSymlinkPath, err = s.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/
}
} else {
torrentSymlinkPath, err = s.ProcessManualFile(torrent)
}
if err != nil {
s.markTorrentAsFailed(torrent)
go func() {
_ = client.DeleteTorrent(debridTorrent.Id)
}()
s.logger.Info().Msgf("Error: %v", err)
importReq.markAsFailed(err, torrent, debridTorrent)
return
}
torrent.TorrentPath = torrentSymlinkPath
s.UpdateTorrent(torrent, debridTorrent)
s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer))
go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed
go func() {
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
s.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
_arr.Refresh()
}
func (s *Store) markTorrentAsFailed(t *Torrent) *Torrent {
t.State = "error"
s.torrents.AddOrUpdate(t)
go func() {
if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil {
s.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
return t
}
func (s *Store) UpdateTorrentMin(t *Torrent, debridTorrent *types.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
if err != nil {
addedOn = time.Now()
}
totalSize := debridTorrent.Bytes
progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0
sizeCompleted := int64(float64(totalSize) * progress)
var speed int64
if debridTorrent.Speed != 0 {
speed = debridTorrent.Speed
}
var eta int
if speed != 0 {
eta = int((totalSize - sizeCompleted) / speed)
}
t.ID = debridTorrent.Id
t.Name = debridTorrent.Name
t.AddedOn = addedOn.Unix()
t.DebridTorrent = debridTorrent
t.Debrid = debridTorrent.Debrid
t.Size = totalSize
t.Completed = sizeCompleted
t.Downloaded = sizeCompleted
t.DownloadedSession = sizeCompleted
t.Uploaded = sizeCompleted
t.UploadedSession = sizeCompleted
t.AmountLeft = totalSize - sizeCompleted
t.Progress = progress
t.Eta = eta
t.Dlspeed = speed
t.Upspeed = speed
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
return t
}
func (s *Store) UpdateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
if debridClient := s.debrid.GetClients()[debridTorrent.Debrid]; debridClient != nil {
if debridTorrent.Status != "downloaded" {
_ = debridClient.UpdateTorrent(debridTorrent)
}
}
t = s.UpdateTorrentMin(t, debridTorrent)
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
if t.IsReady() {
t.State = "pausedUP"
s.torrents.Update(t)
return t
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if t.IsReady() {
t.State = "pausedUP"
s.torrents.Update(t)
return t
}
updatedT := s.UpdateTorrent(t, debridTorrent)
t = updatedT
case <-time.After(10 * time.Minute): // Add a timeout
return t
}
}
}

View File

@@ -1,18 +1,15 @@
package qbit
package store
import (
"encoding/json"
"fmt"
"github.com/sirrobot01/decypharr/pkg/service"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"os"
"sort"
"sync"
)
func keyPair(hash, category string) string {
if category == "" {
category = "uncategorized"
}
return fmt.Sprintf("%s|%s", hash, category)
}
@@ -36,13 +33,13 @@ func loadTorrentsFromJSON(filename string) (Torrents, error) {
return torrents, nil
}
func NewTorrentStorage(filename string) *TorrentStorage {
func newTorrentStorage(filename string) *TorrentStorage {
// Open the JSON file and read the data
torrents, err := loadTorrentsFromJSON(filename)
if err != nil {
torrents = make(Torrents)
}
// Create a new TorrentStorage
// Create a new Storage
return &TorrentStorage{
torrents: torrents,
filename: filename,
@@ -187,12 +184,9 @@ func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) {
return
}
if removeFromDebrid && torrent.ID != "" && torrent.Debrid != "" {
dbClient := service.GetDebrid().GetClient(torrent.Debrid)
dbClient := GetStore().debrid.GetClient(torrent.Debrid)
if dbClient != nil {
err := dbClient.DeleteTorrent(torrent.ID)
if err != nil {
fmt.Println(err)
}
_ = dbClient.DeleteTorrent(torrent.ID)
}
}
@@ -244,10 +238,12 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
}
}()
clients := GetStore().debrid.GetClients()
go func() {
for id, debrid := range toDelete {
dbClient := service.GetDebrid().GetClient(debrid)
if dbClient == nil {
dbClient, ok := clients[debrid]
if !ok {
continue
}
err := dbClient.DeleteTorrent(id)
@@ -278,3 +274,73 @@ func (ts *TorrentStorage) Reset() {
defer ts.mu.Unlock()
ts.torrents = make(Torrents)
}
type Torrent struct {
ID string `json:"id"`
Debrid string `json:"debrid"`
TorrentPath string `json:"-"`
DebridTorrent *types.Torrent `json:"-"`
AddedOn int64 `json:"added_on,omitempty"`
AmountLeft int64 `json:"amount_left"`
AutoTmm bool `json:"auto_tmm"`
Availability float64 `json:"availability,omitempty"`
Category string `json:"category,omitempty"`
Completed int64 `json:"completed"`
CompletionOn int `json:"completion_on,omitempty"`
ContentPath string `json:"content_path"`
DlLimit int `json:"dl_limit"`
Dlspeed int64 `json:"dlspeed"`
Downloaded int64 `json:"downloaded"`
DownloadedSession int64 `json:"downloaded_session"`
Eta int `json:"eta"`
FlPiecePrio bool `json:"f_l_piece_prio,omitempty"`
ForceStart bool `json:"force_start,omitempty"`
Hash string `json:"hash"`
LastActivity int64 `json:"last_activity,omitempty"`
MagnetUri string `json:"magnet_uri,omitempty"`
MaxRatio int `json:"max_ratio,omitempty"`
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
Name string `json:"name,omitempty"`
NumComplete int `json:"num_complete,omitempty"`
NumIncomplete int `json:"num_incomplete,omitempty"`
NumLeechs int `json:"num_leechs,omitempty"`
NumSeeds int `json:"num_seeds,omitempty"`
Priority int `json:"priority,omitempty"`
Progress float64 `json:"progress"`
Ratio int `json:"ratio,omitempty"`
RatioLimit int `json:"ratio_limit,omitempty"`
SavePath string `json:"save_path"`
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
SeenComplete int64 `json:"seen_complete,omitempty"`
SeqDl bool `json:"seq_dl"`
Size int64 `json:"size,omitempty"`
State string `json:"state,omitempty"`
SuperSeeding bool `json:"super_seeding"`
Tags string `json:"tags,omitempty"`
TimeActive int `json:"time_active,omitempty"`
TotalSize int64 `json:"total_size,omitempty"`
Tracker string `json:"tracker,omitempty"`
UpLimit int64 `json:"up_limit,omitempty"`
Uploaded int64 `json:"uploaded,omitempty"`
UploadedSession int64 `json:"uploaded_session,omitempty"`
Upspeed int64 `json:"upspeed,omitempty"`
Source string `json:"source,omitempty"`
sync.Mutex
}
func (t *Torrent) IsReady() bool {
return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != ""
}
func (t *Torrent) discordContext() string {
format := `
**Name:** %s
**Arr:** %s
**Hash:** %s
**MagnetURI:** %s
**Debrid:** %s
`
return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid)
}

View File

@@ -2,6 +2,7 @@ package web
import (
"fmt"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"strings"
"time"
@@ -12,34 +13,37 @@ import (
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/qbit"
"github.com/sirrobot01/decypharr/pkg/service"
"github.com/sirrobot01/decypharr/pkg/version"
)
func (ui *Handler) handleGetArrs(w http.ResponseWriter, r *http.Request) {
svc := service.GetService()
request.JSONResponse(w, svc.Arr.GetAll(), http.StatusOK)
func (wb *Web) handleGetArrs(w http.ResponseWriter, r *http.Request) {
_store := store.GetStore()
request.JSONResponse(w, _store.GetArr().GetAll(), http.StatusOK)
}
func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if err := r.ParseMultipartForm(32 << 20); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
svc := service.GetService()
_store := store.GetStore()
results := make([]*qbit.ImportRequest, 0)
results := make([]*store.ImportRequest, 0)
errs := make([]string, 0)
arrName := r.FormValue("arr")
notSymlink := r.FormValue("notSymlink") == "true"
downloadUncached := r.FormValue("downloadUncached") == "true"
if arrName == "" {
arrName = "uncategorized"
debridName := r.FormValue("debrid")
callbackUrl := r.FormValue("callbackUrl")
downloadFolder := r.FormValue("downloadFolder")
if downloadFolder == "" {
downloadFolder = config.Get().QBitTorrent.DownloadFolder
}
_arr := svc.Arr.Get(arrName)
downloadUncached := r.FormValue("downloadUncached") == "true"
_arr := _store.GetArr().Get(arrName)
if _arr == nil {
_arr = arr.New(arrName, "", "", false, false, &downloadUncached)
}
@@ -59,8 +63,9 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
errs = append(errs, fmt.Sprintf("Failed to parse URL %s: %v", url, err))
continue
}
importReq := qbit.NewImportRequest(magnet, _arr, !notSymlink, downloadUncached)
if err := importReq.Process(ui.qbit); err != nil {
importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, !notSymlink, downloadUncached, callbackUrl, store.ImportTypeAPI)
if err := _store.AddTorrent(ctx, importReq); err != nil {
errs = append(errs, fmt.Sprintf("URL %s: %v", url, err))
continue
}
@@ -83,8 +88,8 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
continue
}
importReq := qbit.NewImportRequest(magnet, _arr, !notSymlink, downloadUncached)
err = importReq.Process(ui.qbit)
importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, !notSymlink, downloadUncached, callbackUrl, store.ImportTypeAPI)
err = _store.AddTorrent(ctx, importReq)
if err != nil {
errs = append(errs, fmt.Sprintf("File %s: %v", fileHeader.Filename, err))
continue
@@ -94,27 +99,27 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
}
request.JSONResponse(w, struct {
Results []*qbit.ImportRequest `json:"results"`
Errors []string `json:"errors,omitempty"`
Results []*store.ImportRequest `json:"results"`
Errors []string `json:"errors,omitempty"`
}{
Results: results,
Errors: errs,
}, http.StatusOK)
}
func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
func (wb *Web) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
var req RepairRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
svc := service.GetService()
_store := store.GetStore()
var arrs []string
if req.ArrName != "" {
_arr := svc.Arr.Get(req.ArrName)
_arr := _store.GetArr().Get(req.ArrName)
if _arr == nil {
http.Error(w, "No Arrs found to repair", http.StatusNotFound)
return
@@ -124,15 +129,15 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
if req.Async {
go func() {
if err := svc.Repair.AddJob(arrs, req.MediaIds, req.AutoProcess, false); err != nil {
ui.logger.Error().Err(err).Msg("Failed to repair media")
if err := _store.GetRepair().AddJob(arrs, req.MediaIds, req.AutoProcess, false); err != nil {
wb.logger.Error().Err(err).Msg("Failed to repair media")
}
}()
request.JSONResponse(w, "Repair process started", http.StatusOK)
return
}
if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil {
if err := _store.GetRepair().AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil {
http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError)
return
}
@@ -140,16 +145,16 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
request.JSONResponse(w, "Repair completed", http.StatusOK)
}
func (ui *Handler) handleGetVersion(w http.ResponseWriter, r *http.Request) {
func (wb *Web) handleGetVersion(w http.ResponseWriter, r *http.Request) {
v := version.GetInfo()
request.JSONResponse(w, v, http.StatusOK)
}
func (ui *Handler) handleGetTorrents(w http.ResponseWriter, r *http.Request) {
request.JSONResponse(w, ui.qbit.Storage.GetAllSorted("", "", nil, "added_on", false), http.StatusOK)
func (wb *Web) handleGetTorrents(w http.ResponseWriter, r *http.Request) {
request.JSONResponse(w, wb.torrents.GetAllSorted("", "", nil, "added_on", false), http.StatusOK)
}
func (ui *Handler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) {
func (wb *Web) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) {
hash := chi.URLParam(r, "hash")
category := chi.URLParam(r, "category")
removeFromDebrid := r.URL.Query().Get("removeFromDebrid") == "true"
@@ -157,11 +162,11 @@ func (ui *Handler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) {
http.Error(w, "No hash provided", http.StatusBadRequest)
return
}
ui.qbit.Storage.Delete(hash, category, removeFromDebrid)
wb.torrents.Delete(hash, category, removeFromDebrid)
w.WriteHeader(http.StatusOK)
}
func (ui *Handler) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) {
func (wb *Web) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) {
hashesStr := r.URL.Query().Get("hashes")
removeFromDebrid := r.URL.Query().Get("removeFromDebrid") == "true"
if hashesStr == "" {
@@ -169,15 +174,15 @@ func (ui *Handler) handleDeleteTorrents(w http.ResponseWriter, r *http.Request)
return
}
hashes := strings.Split(hashesStr, ",")
ui.qbit.Storage.DeleteMultiple(hashes, removeFromDebrid)
wb.torrents.DeleteMultiple(hashes, removeFromDebrid)
w.WriteHeader(http.StatusOK)
}
func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
func (wb *Web) handleGetConfig(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
arrCfgs := make([]config.Arr, 0)
svc := service.GetService()
for _, a := range svc.Arr.GetAll() {
_store := store.GetStore()
for _, a := range _store.GetArr().GetAll() {
arrCfgs = append(arrCfgs, config.Arr{
Host: a.Host,
Name: a.Name,
@@ -191,11 +196,11 @@ func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
request.JSONResponse(w, cfg, http.StatusOK)
}
func (ui *Handler) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
// Decode the JSON body
var updatedConfig config.Config
if err := json.NewDecoder(r.Body).Decode(&updatedConfig); err != nil {
ui.logger.Error().Err(err).Msg("Failed to decode config update request")
wb.logger.Error().Err(err).Msg("Failed to decode config update request")
http.Error(w, "Invalid request body: "+err.Error(), http.StatusBadRequest)
return
}
@@ -232,11 +237,12 @@ func (ui *Handler) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
}
// Update Arrs through the service
svc := service.GetService()
svc.Arr.Clear() // Clear existing arrs
_store := store.GetStore()
_arr := _store.GetArr()
_arr.Clear() // Clear existing arrs
for _, a := range updatedConfig.Arrs {
svc.Arr.AddOrUpdate(&arr.Arr{
_arr.AddOrUpdate(&arr.Arr{
Name: a.Name,
Host: a.Host,
Token: a.Token,
@@ -263,25 +269,25 @@ func (ui *Handler) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
request.JSONResponse(w, map[string]string{"status": "success"}, http.StatusOK)
}
func (ui *Handler) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) {
svc := service.GetService()
request.JSONResponse(w, svc.Repair.GetJobs(), http.StatusOK)
func (wb *Web) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) {
_store := store.GetStore()
request.JSONResponse(w, _store.GetRepair().GetJobs(), http.StatusOK)
}
func (ui *Handler) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) {
func (wb *Web) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
if id == "" {
http.Error(w, "No job ID provided", http.StatusBadRequest)
return
}
svc := service.GetService()
if err := svc.Repair.ProcessJob(id); err != nil {
ui.logger.Error().Err(err).Msg("Failed to process repair job")
_store := store.GetStore()
if err := _store.GetRepair().ProcessJob(id); err != nil {
wb.logger.Error().Err(err).Msg("Failed to process repair job")
}
w.WriteHeader(http.StatusOK)
}
func (ui *Handler) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) {
func (wb *Web) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) {
// Read ids from body
var req struct {
IDs []string `json:"ids"`
@@ -295,7 +301,22 @@ func (ui *Handler) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request)
return
}
svc := service.GetService()
svc.Repair.DeleteJobs(req.IDs)
_store := store.GetStore()
_store.GetRepair().DeleteJobs(req.IDs)
w.WriteHeader(http.StatusOK)
}
func (wb *Web) handleStopRepairJob(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
if id == "" {
http.Error(w, "No job ID provided", http.StatusBadRequest)
return
}
_store := store.GetStore()
if err := _store.GetRepair().StopJob(id); err != nil {
wb.logger.Error().Err(err).Msg("Failed to stop repair job")
http.Error(w, "Failed to stop job: "+err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}

View File

@@ -6,7 +6,7 @@ import (
"net/http"
)
func (ui *Handler) verifyAuth(username, password string) bool {
func (wb *Web) verifyAuth(username, password string) bool {
// If you're storing hashed password, use bcrypt to compare
if username == "" {
return false
@@ -22,11 +22,11 @@ func (ui *Handler) verifyAuth(username, password string) bool {
return err == nil
}
func (ui *Handler) skipAuthHandler(w http.ResponseWriter, r *http.Request) {
func (wb *Web) skipAuthHandler(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
cfg.UseAuth = false
if err := cfg.Save(); err != nil {
ui.logger.Error().Err(err).Msg("failed to save config")
wb.logger.Error().Err(err).Msg("failed to save config")
http.Error(w, "failed to save config", http.StatusInternalServerError)
return
}

View File

@@ -6,7 +6,7 @@ import (
"net/http"
)
func (ui *Handler) setupMiddleware(next http.Handler) http.Handler {
func (wb *Web) setupMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
needsAuth := cfg.NeedsSetup()
@@ -24,7 +24,7 @@ func (ui *Handler) setupMiddleware(next http.Handler) http.Handler {
})
}
func (ui *Handler) authMiddleware(next http.Handler) http.Handler {
func (wb *Web) authMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Check if setup is needed
cfg := config.Get()
@@ -38,7 +38,7 @@ func (ui *Handler) authMiddleware(next http.Handler) http.Handler {
return
}
session, _ := store.Get(r, "auth-session")
session, _ := wb.cookie.Get(r, "auth-session")
auth, ok := session.Values["authenticated"].(bool)
if !ok || !auth {

View File

@@ -5,35 +5,36 @@ import (
"net/http"
)
func (ui *Handler) Routes() http.Handler {
func (wb *Web) Routes() http.Handler {
r := chi.NewRouter()
r.Get("/login", ui.LoginHandler)
r.Post("/login", ui.LoginHandler)
r.Get("/register", ui.RegisterHandler)
r.Post("/register", ui.RegisterHandler)
r.Get("/skip-auth", ui.skipAuthHandler)
r.Get("/version", ui.handleGetVersion)
r.Get("/login", wb.LoginHandler)
r.Post("/login", wb.LoginHandler)
r.Get("/register", wb.RegisterHandler)
r.Post("/register", wb.RegisterHandler)
r.Get("/skip-auth", wb.skipAuthHandler)
r.Get("/version", wb.handleGetVersion)
r.Group(func(r chi.Router) {
r.Use(ui.authMiddleware)
r.Use(ui.setupMiddleware)
r.Get("/", ui.IndexHandler)
r.Get("/download", ui.DownloadHandler)
r.Get("/repair", ui.RepairHandler)
r.Get("/config", ui.ConfigHandler)
r.Use(wb.authMiddleware)
r.Use(wb.setupMiddleware)
r.Get("/", wb.IndexHandler)
r.Get("/download", wb.DownloadHandler)
r.Get("/repair", wb.RepairHandler)
r.Get("/config", wb.ConfigHandler)
r.Route("/api", func(r chi.Router) {
r.Get("/arrs", ui.handleGetArrs)
r.Post("/add", ui.handleAddContent)
r.Post("/repair", ui.handleRepairMedia)
r.Get("/repair/jobs", ui.handleGetRepairJobs)
r.Post("/repair/jobs/{id}/process", ui.handleProcessRepairJob)
r.Delete("/repair/jobs", ui.handleDeleteRepairJob)
r.Get("/torrents", ui.handleGetTorrents)
r.Delete("/torrents/{category}/{hash}", ui.handleDeleteTorrent)
r.Delete("/torrents/", ui.handleDeleteTorrents)
r.Get("/config", ui.handleGetConfig)
r.Post("/config", ui.handleUpdateConfig)
r.Get("/arrs", wb.handleGetArrs)
r.Post("/add", wb.handleAddContent)
r.Post("/repair", wb.handleRepairMedia)
r.Get("/repair/jobs", wb.handleGetRepairJobs)
r.Post("/repair/jobs/{id}/process", wb.handleProcessRepairJob)
r.Post("/repair/jobs/{id}/stop", wb.handleStopRepairJob)
r.Delete("/repair/jobs", wb.handleDeleteRepairJob)
r.Get("/torrents", wb.handleGetTorrents)
r.Delete("/torrents/{category}/{hash}", wb.handleDeleteTorrent)
r.Delete("/torrents/", wb.handleDeleteTorrents)
r.Get("/config", wb.handleGetConfig)
r.Post("/config", wb.handleUpdateConfig)
})
})

View File

@@ -245,43 +245,48 @@
<!-- Step 5: Repair Configuration -->
<div class="setup-step d-none" id="step5">
<div class="section mb-5">
<div class="row mb-2">
<div class="row mb-3">
<div class="col">
<div class="form-check me-3 d-inline-block">
<input type="checkbox" class="form-check-input" name="repair.enabled" id="repair.enabled">
<label class="form-check-label" for="repair.enabled">Enable Repair</label>
<label class="form-check-label" for="repair.enabled">Enable Scheduled Repair</label>
</div>
</div>
</div>
<div id="repairCol" class="d-none">
<div>
<div class="row">
<div class="col-md-4 mb-3">
<label class="form-label" for="repair.interval">Interval</label>
<label class="form-label" for="repair.interval">Scheduled Interval</label>
<input type="text" class="form-control" name="repair.interval" id="repair.interval" placeholder="e.g., 24h">
<small class="form-text text-muted">Interval for the repair process(e.g., 24h, 1d, 03:00, or a crontab)</small>
</div>
<div class="col-md-3 mb-3">
<label class="form-label" for="repair.workers">Workers</label>
<input type="text" class="form-control" name="repair.workers" id="repair.workers">
<small class="form-text text-muted">Number of workers to use for the repair process</small>
</div>
<div class="col-md-5 mb-3">
<label class="form-label" for="repair.zurg_url">Zurg URL</label>
<input type="text" class="form-control" name="repair.zurg_url" id="repair.zurg_url" placeholder="http://zurg:9999">
<small class="form-text text-muted">Speeds up the repair process by using Zurg</small>
<small class="form-text text-muted">If you have Zurg running, you can use it to speed up the repair process</small>
</div>
</div>
<div class="row">
<div class="col-md-3 mb-3">
<div class="col-md-4 mb-3">
<div class="form-check">
<input type="checkbox" class="form-check-input" name="repair.use_webdav" id="repair.use_webdav">
<label class="form-check-label" for="repair.use_webdav">Use Webdav</label>
</div>
<small class="form-text text-muted">Use Internal Webdav for repair(make sure webdav is enabled in the debrid section</small>
</div>
<div class="col-md-3 mb-3">
<div class="col-md-4 mb-3">
<div class="form-check">
<input type="checkbox" class="form-check-input" name="repair.run_on_start" id="repair.run_on_start">
<label class="form-check-label" for="repair.run_on_start">Run on Start</label>
</div>
<small class="form-text text-muted">Run repair on startup</small>
</div>
<div class="col-md-3 mb-3">
<div class="col-md-4 mb-3">
<div class="form-check">
<input type="checkbox" class="form-check-input" name="repair.auto_process" id="repair.auto_process">
<label class="form-check-label" for="repair.auto_process">Auto Process</label>
@@ -340,7 +345,14 @@
<small class="form-text text-muted">Rate limit for the debrid service. Confirm your debrid service rate limit</small>
</div>
</div>
<div class="row">
<div class="row mb-3">
<div class="col-md-3">
<div class="form-check me-3">
<input type="checkbox" class="form-check-input useWebdav" name="debrid[${index}].use_webdav" id="debrid[${index}].use_webdav">
<label class="form-check-label" for="debrid[${index}].use_webdav">Enable WebDav Server</label>
</div>
<small class="form-text text-muted">Create an internal webdav for this debrid</small>
</div>
<div class="col-md-3">
<div class="form-check me-3">
<input type="checkbox" class="form-check-input" name="debrid[${index}].download_uncached" id="debrid[${index}].download_uncached">
@@ -348,13 +360,6 @@
</div>
<small class="form-text text-muted">Download uncached files from the debrid service</small>
</div>
<div class="col-md-3">
<div class="form-check me-3">
<input type="checkbox" class="form-check-input" name="debrid[${index}].check_cached" id="debrid[${index}].check_cached">
<label class="form-check-label" for="debrid[${index}].check_cached" disabled>Check Cached</label>
</div>
<small class="form-text text-muted">Check if the file is cached before downloading(Disabled)</small>
</div>
<div class="col-md-3">
<div class="form-check me-3">
<input type="checkbox" class="form-check-input" name="debrid[${index}].add_samples" id="debrid[${index}].add_samples">
@@ -369,16 +374,10 @@
</div>
<small class="form-text text-muted">Preprocess RARed torrents to allow reading the files inside</small>
</div>
<div class="col-md-4">
<div class="form-check me-3">
<input type="checkbox" class="form-check-input useWebdav" name="debrid[${index}].use_webdav" id="debrid[${index}].use_webdav">
<label class="form-check-label" for="debrid[${index}].use_webdav">Enable WebDav Server</label>
</div>
<small class="form-text text-muted">Create an internal webdav for this debrid</small>
</div>
</div>
<div class="webdav d-none">
<h6 class="pb-2">Webdav</h6>
<div class="webdav d-none mt-1">
<hr/>
<h6 class="pb-2">Webdav Settings</h6>
<div class="row mt-3">
<div class="col-md-3 mb-3">
<label class="form-label" for="debrid[${index}].torrents_refresh_interval">Torrents Refresh Interval</label>
@@ -441,12 +440,12 @@
</div>
</div>
<div class="row mt-3">
<div class="col mt-3">
<h6 class="pb-2">Custom Folders</h6>
<div class="col">
<h6 class="pb-2">Virtual Folders</h6>
<div class="col-12">
<p class="text-muted small">Create virtual directories with filters to organize your content</p>
<div class="directories-container" id="debrid[${index}].directories">
<!-- Dynamic directories will be added here -->
</div>
<button type="button" class="btn btn-secondary mt-2 webdav-field" onclick="addDirectory(${index});">
<i class="bi bi-plus"></i> Add Directory
@@ -842,9 +841,6 @@
// Load Repair config
if (config.repair) {
if (config.repair.enabled) {
document.getElementById('repairCol').classList.remove('d-none');
}
Object.entries(config.repair).forEach(([key, value]) => {
const input = document.querySelector(`[name="repair.${key}"]`);
if (input) {
@@ -921,14 +917,6 @@
}
});
$(document).on('change', 'input[name="repair.enabled"]', function() {
if (this.checked) {
$('#repairCol').removeClass('d-none');
} else {
$('#repairCol').addClass('d-none');
}
});
async function saveConfig(e) {
const submitButton = e.target.querySelector('button[type="submit"]');
submitButton.disabled = true;
@@ -1072,7 +1060,7 @@
debrids: [],
qbittorrent: {
download_folder: document.querySelector('[name="qbit.download_folder"]').value,
refresh_interval: parseInt(document.querySelector('[name="qbit.refresh_interval"]').value || '0', 10),
refresh_interval: parseInt(document.querySelector('[name="qbit.refresh_interval"]').value, 10),
max_downloads: parseInt(document.querySelector('[name="qbit.max_downloads"]').value || '0', 5),
skip_pre_cache: document.querySelector('[name="qbit.skip_pre_cache"]').checked
},
@@ -1082,6 +1070,7 @@
interval: document.querySelector('[name="repair.interval"]').value,
run_on_start: document.querySelector('[name="repair.run_on_start"]').checked,
zurg_url: document.querySelector('[name="repair.zurg_url"]').value,
workers: parseInt(document.querySelector('[name="repair.workers"]').value),
use_webdav: document.querySelector('[name="repair.use_webdav"]').checked,
auto_process: document.querySelector('[name="repair.auto_process"]').checked
}
@@ -1098,7 +1087,6 @@
folder: document.querySelector(`[name="debrid[${i}].folder"]`).value,
rate_limit: document.querySelector(`[name="debrid[${i}].rate_limit"]`).value,
download_uncached: document.querySelector(`[name="debrid[${i}].download_uncached"]`).checked,
check_cached: document.querySelector(`[name="debrid[${i}].check_cached"]`).checked,
unpack_rar: document.querySelector(`[name="debrid[${i}].unpack_rar"]`).checked,
add_samples: document.querySelector(`[name="debrid[${i}].add_samples"]`).checked,
use_webdav: document.querySelector(`[name="debrid[${i}].use_webdav"]`).checked

View File

@@ -17,11 +17,33 @@
<hr />
<div class="mb-3">
<label for="category" class="form-label">Enter Category</label>
<input type="text" class="form-control" id="category" name="arr" placeholder="Enter Category (e.g sonarr, radarr, radarr4k)">
<div class="row mb-3">
<div class="col-md-6">
<label for="downloadFolder" class="form-label">Download Folder</label>
<input type="text" class="form-control" id="downloadFolder" name="downloadFolder" placeholder="Enter Download Folder (e.g /downloads/torrents)">
<small class="text-muted">Default is your qbittorent download_folder</small>
</div>
<div class="col-md-6">
<label for="arr" class="form-label">Arr (if any)</label>
<input type="text" class="form-control" id="arr" name="arr" placeholder="Enter Category (e.g sonarr, radarr, radarr4k)">
<small class="text-muted">Optional, leave empty if not using Arr</small>
</div>
</div>
{{ if .HasMultiDebrid }}
<div class="row mb-3">
<div class="col-md-6">
<label for="debrid" class="form-label">Select Debrid</label>
<select class="form-select" id="debrid" name="debrid">
{{ range $index, $debrid := .Debrids }}
<option value="{{ $debrid }}" {{ if eq $index 0 }}selected{{end}}>{{ $debrid }}</option>
{{ end }}
</select>
<small class="text-muted">Select a debrid service to use for this download</small>
</div>
</div>
{{ end }}
<div class="row mb-3">
<div class="col-md-2 mb-3">
<div class="form-check d-inline-block me-3">
@@ -48,23 +70,27 @@
</div>
<script>
let downloadFolder = '{{ .DownloadFolder }}';
document.addEventListener('DOMContentLoaded', () => {
const loadSavedDownloadOptions = () => {
const savedCategory = localStorage.getItem('downloadCategory');
const savedSymlink = localStorage.getItem('downloadSymlink');
const savedDownloadUncached = localStorage.getItem('downloadUncached');
document.getElementById('category').value = savedCategory || '';
document.getElementById('arr').value = savedCategory || '';
document.getElementById('isSymlink').checked = savedSymlink === 'true';
document.getElementById('downloadUncached').checked = savedDownloadUncached === 'true';
document.getElementById('downloadFolder').value = localStorage.getItem('downloadFolder') || downloadFolder || '';
};
const saveCurrentDownloadOptions = () => {
const category = document.getElementById('category').value;
const arr = document.getElementById('arr').value;
const isSymlink = document.getElementById('isSymlink').checked;
const downloadUncached = document.getElementById('downloadUncached').checked;
localStorage.setItem('downloadCategory', category);
const downloadFolder = document.getElementById('downloadFolder').value;
localStorage.setItem('downloadCategory', arr);
localStorage.setItem('downloadSymlink', isSymlink.toString());
localStorage.setItem('downloadUncached', downloadUncached.toString());
localStorage.setItem('downloadFolder', downloadFolder);
};
// Load the last used download options from local storage
@@ -108,9 +134,11 @@
return;
}
formData.append('arr', document.getElementById('category').value);
formData.append('arr', document.getElementById('arr').value);
formData.append('downloadFolder', document.getElementById('downloadFolder').value);
formData.append('notSymlink', document.getElementById('isSymlink').checked);
formData.append('downloadUncached', document.getElementById('downloadUncached').checked);
formData.append('debrid', document.getElementById('debrid') ? document.getElementById('debrid').value : '');
const response = await fetcher('/api/add', {
method: 'POST',
@@ -139,7 +167,7 @@
});
// Save the download options to local storage when they change
document.getElementById('category').addEventListener('change', saveCurrentDownloadOptions);
document.getElementById('arr').addEventListener('change', saveCurrentDownloadOptions);
document.getElementById('isSymlink').addEventListener('change', saveCurrentDownloadOptions);
// Read the URL parameters for a magnet link and add it to the download queue if found

View File

@@ -129,11 +129,11 @@
<td>${torrent.debrid || 'None'}</td>
<td><span class="badge ${getStateColor(torrent.state)}">${torrent.state}</span></td>
<td>
<button class="btn btn-sm btn-outline-danger" onclick="deleteTorrent('${torrent.hash}', '${torrent.category}', false)">
<button class="btn btn-sm btn-outline-danger" onclick="deleteTorrent('${torrent.hash}', '${torrent.category || ''}', false)">
<i class="bi bi-trash"></i>
</button>
${torrent.debrid && torrent.id ? `
<button class="btn btn-sm btn-outline-danger" onclick="deleteTorrent('${torrent.hash}', '${torrent.category}', true)">
<button class="btn btn-sm btn-outline-danger" onclick="deleteTorrent('${torrent.hash}', '${torrent.category || ''}', true)">
<i class="bi bi-trash"></i> Remove from Debrid
</button>
` : ''}
@@ -485,7 +485,7 @@
}
},
'delete': async (torrent) => {
await deleteTorrent(torrent.hash);
await deleteTorrent(torrent.hash, torrent.category || '', false);
}
};

View File

@@ -36,6 +36,22 @@
background-color: var(--bg-color);
color: var(--text-color);
transition: background-color 0.3s ease, color 0.3s ease;
display: flex;
flex-direction: column;
min-height: 100vh;
}
footer {
background-color: var(--bg-color);
border-top: 1px solid var(--border-color);
}
footer a {
color: var(--text-color);
}
footer a:hover {
color: var(--primary-color);
}
.navbar {
@@ -193,6 +209,20 @@
{{ else }}
{{ end }}
<footer class="mt-auto py-2 text-center border-top">
<div class="container">
<small class="text-muted">
<a href="https://github.com/sirrobot01/decypharr" target="_blank" class="text-decoration-none me-3">
<i class="bi bi-github me-1"></i>GitHub
</a>
<a href="https://sirrobot01.github.io/decypharr" target="_blank" class="text-decoration-none">
<i class="bi bi-book me-1"></i>Documentation
</a>
</small>
</div>
</footer>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.bundle.min.js"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/js/select2.min.js"></script>

View File

@@ -143,6 +143,9 @@
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-bs-dismiss="modal">Close</button>
<button type="button" class="btn btn-primary" id="processJobBtn">Process Items</button>
<button type="button" class="btn btn-warning d-none" id="stopJobBtn">
<i class="bi bi-stop-fill me-1"></i>Stop Job
</button>
</div>
</div>
</div>
@@ -218,6 +221,27 @@
}
}
// Return status text and class based on job status
function getStatus(status) {
switch (status) {
case 'started':
return {text: 'In Progress', class: 'text-primary'};
case 'failed':
return {text: 'Failed', class: 'text-danger'};
case 'completed':
return {text: 'Completed', class: 'text-success'};
case 'pending':
return {text: 'Pending', class: 'text-warning'};
case 'cancelled':
return {text: 'Cancelled', class: 'text-secondary'};
case 'processing':
return {text: 'Processing', class: 'text-info'};
default:
// Return status in title case if unknown
return {text: status.charAt(0).toUpperCase() + status.slice(1), class: 'text-secondary'};
}
}
// Render jobs table with pagination
function renderJobsTable(page) {
const tableBody = document.getElementById('jobsTableBody');
@@ -254,24 +278,10 @@
const formattedDate = startedDate.toLocaleString();
// Determine status
let status = 'In Progress';
let statusClass = 'text-primary';
let status = getStatus(job.status);
let canDelete = job.status !== "started";
let totalItems = job.broken_items ? Object.values(job.broken_items).reduce((sum, arr) => sum + arr.length, 0) : 0;
if (job.status === 'failed') {
status = 'Failed';
statusClass = 'text-danger';
} else if (job.status === 'completed') {
status = 'Completed';
statusClass = 'text-success';
} else if (job.status === 'pending') {
status = 'Pending';
statusClass = 'text-warning';
} else if (job.status === "processing") {
status = 'Processing';
statusClass = 'text-info';
}
row.innerHTML = `
<td>
@@ -283,25 +293,31 @@
<td><a href="#" class="text-link view-job" data-id="${job.id}"><small>${job.id.substring(0, 8)}</small></a></td>
<td>${job.arrs.join(', ')}</td>
<td><small>${formattedDate}</small></td>
<td><span class="${statusClass}">${status}</span></td>
<td><span class="${status.class}">${status.text}</span></td>
<td>${totalItems}</td>
<td>
${job.status === "pending" ?
`<button class="btn btn-sm btn-primary process-job" data-id="${job.id}">
<i class="bi bi-play-fill"></i> Process
`<button class="btn btn-sm btn-primary process-job" data-id="${job.id}">
<i class="bi bi-play-fill"></i> Process
</button>` :
`<button class="btn btn-sm btn-primary" disabled>
<i class="bi bi-eye"></i> Process
`<button class="btn btn-sm btn-primary" disabled>
<i class="bi bi-eye"></i> Process
</button>`
}
}
${(job.status === "started" || job.status === "processing") ?
`<button class="btn btn-sm btn-warning stop-job" data-id="${job.id}">
<i class="bi bi-stop-fill"></i> Stop
</button>` :
''
}
${canDelete ?
`<button class="btn btn-sm btn-danger delete-job" data-id="${job.id}">
<i class="bi bi-trash"></i>
</button>` :
`<button class="btn btn-sm btn-danger" disabled>
<i class="bi bi-trash"></i>
</button>`
}
`<button class="btn btn-sm btn-danger delete-job" data-id="${job.id}">
<i class="bi bi-trash"></i>
</button>` :
`<button class="btn btn-sm btn-danger" disabled>
<i class="bi bi-trash"></i>
</button>`
}
</td>
`;
@@ -370,6 +386,13 @@
viewJobDetails(jobId);
});
});
document.querySelectorAll('.stop-job').forEach(button => {
button.addEventListener('click', (e) => {
const jobId = e.currentTarget.dataset.id;
stopJob(jobId);
});
});
}
document.getElementById('selectAllJobs').addEventListener('change', function() {
@@ -456,6 +479,25 @@
}
}
async function stopJob(jobId) {
if (confirm('Are you sure you want to stop this job?')) {
try {
const response = await fetcher(`/api/repair/jobs/${jobId}/stop`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
});
if (!response.ok) throw new Error(await response.text());
createToast('Job stop requested successfully');
await loadJobs(currentPage); // Refresh the jobs list
} catch (error) {
createToast(`Error stopping job: ${error.message}`, 'error');
}
}
}
// View job details function
function viewJobDetails(jobId) {
// Find the job
@@ -477,24 +519,9 @@
}
// Set status with color
let status = 'In Progress';
let statusClass = 'text-primary';
let status = getStatus(job.status);
if (job.status === 'failed') {
status = 'Failed';
statusClass = 'text-danger';
} else if (job.status === 'completed') {
status = 'Completed';
statusClass = 'text-success';
} else if (job.status === 'pending') {
status = 'Pending';
statusClass = 'text-warning';
} else if (job.status === "processing") {
status = 'Processing';
statusClass = 'text-info';
}
document.getElementById('modalJobStatus').innerHTML = `<span class="${statusClass}">${status}</span>`;
document.getElementById('modalJobStatus').innerHTML = `<span class="${status.class}">${status.text}</span>`;
// Set other job details
document.getElementById('modalJobArrs').textContent = job.arrs.join(', ');
@@ -524,6 +551,19 @@
processBtn.classList.add('d-none');
}
// Stop button visibility
const stopBtn = document.getElementById('stopJobBtn'); // You'll need to add this button to the HTML
if (job.status === 'started' || job.status === 'processing') {
stopBtn.classList.remove('d-none');
stopBtn.onclick = () => {
stopJob(job.id);
const modal = bootstrap.Modal.getInstance(document.getElementById('jobDetailsModal'));
modal.hide();
};
} else {
stopBtn.classList.add('d-none');
}
// Populate broken items table
const brokenItemsTableBody = document.getElementById('brokenItemsTableBody');
const noBrokenItemsMessage = document.getElementById('noBrokenItemsMessage');

View File

@@ -7,7 +7,7 @@ import (
"net/http"
)
func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) {
func (wb *Web) LoginHandler(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
if cfg.NeedsAuth() {
http.Redirect(w, r, "/register", http.StatusSeeOther)
@@ -19,7 +19,7 @@ func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) {
"Page": "login",
"Title": "Login",
}
_ = templates.ExecuteTemplate(w, "layout", data)
_ = wb.templates.ExecuteTemplate(w, "layout", data)
return
}
@@ -33,8 +33,8 @@ func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) {
return
}
if ui.verifyAuth(credentials.Username, credentials.Password) {
session, _ := store.Get(r, "auth-session")
if wb.verifyAuth(credentials.Username, credentials.Password) {
session, _ := wb.cookie.Get(r, "auth-session")
session.Values["authenticated"] = true
session.Values["username"] = credentials.Username
if err := session.Save(r, w); err != nil {
@@ -48,8 +48,8 @@ func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Invalid credentials", http.StatusUnauthorized)
}
func (ui *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) {
session, _ := store.Get(r, "auth-session")
func (wb *Web) LogoutHandler(w http.ResponseWriter, r *http.Request) {
session, _ := wb.cookie.Get(r, "auth-session")
session.Values["authenticated"] = false
session.Options.MaxAge = -1
err := session.Save(r, w)
@@ -59,7 +59,7 @@ func (ui *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/login", http.StatusSeeOther)
}
func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) {
func (wb *Web) RegisterHandler(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
authCfg := cfg.GetAuth()
@@ -69,7 +69,7 @@ func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) {
"Page": "register",
"Title": "Register",
}
_ = templates.ExecuteTemplate(w, "layout", data)
_ = wb.templates.ExecuteTemplate(w, "layout", data)
return
}
@@ -99,7 +99,7 @@ func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) {
}
// Create a session
session, _ := store.Get(r, "auth-session")
session, _ := wb.cookie.Get(r, "auth-session")
session.Values["authenticated"] = true
session.Values["username"] = username
if err := session.Save(r, w); err != nil {
@@ -110,42 +110,49 @@ func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/", http.StatusSeeOther)
}
func (ui *Handler) IndexHandler(w http.ResponseWriter, r *http.Request) {
func (wb *Web) IndexHandler(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
data := map[string]interface{}{
"URLBase": cfg.URLBase,
"Page": "index",
"Title": "Torrents",
}
_ = templates.ExecuteTemplate(w, "layout", data)
_ = wb.templates.ExecuteTemplate(w, "layout", data)
}
func (ui *Handler) DownloadHandler(w http.ResponseWriter, r *http.Request) {
func (wb *Web) DownloadHandler(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
data := map[string]interface{}{
"URLBase": cfg.URLBase,
"Page": "download",
"Title": "Download",
debrids := make([]string, 0)
for _, d := range cfg.Debrids {
debrids = append(debrids, d.Name)
}
_ = templates.ExecuteTemplate(w, "layout", data)
data := map[string]interface{}{
"URLBase": cfg.URLBase,
"Page": "download",
"Title": "Download",
"Debrids": debrids,
"HasMultiDebrid": len(debrids) > 1,
"DownloadFolder": cfg.QBitTorrent.DownloadFolder,
}
_ = wb.templates.ExecuteTemplate(w, "layout", data)
}
func (ui *Handler) RepairHandler(w http.ResponseWriter, r *http.Request) {
func (wb *Web) RepairHandler(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
data := map[string]interface{}{
"URLBase": cfg.URLBase,
"Page": "repair",
"Title": "Repair",
}
_ = templates.ExecuteTemplate(w, "layout", data)
_ = wb.templates.ExecuteTemplate(w, "layout", data)
}
func (ui *Handler) ConfigHandler(w http.ResponseWriter, r *http.Request) {
func (wb *Web) ConfigHandler(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
data := map[string]interface{}{
"URLBase": cfg.URLBase,
"Page": "config",
"Title": "Config",
}
_ = templates.ExecuteTemplate(w, "layout", data)
_ = wb.templates.ExecuteTemplate(w, "layout", data)
}

View File

@@ -6,7 +6,7 @@ import (
"github.com/gorilla/sessions"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/qbit"
"github.com/sirrobot01/decypharr/pkg/store"
"html/template"
"os"
)
@@ -50,26 +50,15 @@ type RepairRequest struct {
//go:embed templates/*
var content embed.FS
type Handler struct {
qbit *qbit.QBit
logger zerolog.Logger
}
func New(qbit *qbit.QBit) *Handler {
return &Handler{
qbit: qbit,
logger: logger.New("ui"),
}
}
var (
secretKey = cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"")
store = sessions.NewCookieStore([]byte(secretKey))
type Web struct {
logger zerolog.Logger
cookie *sessions.CookieStore
templates *template.Template
)
torrents *store.TorrentStorage
}
func init() {
templates = template.Must(template.ParseFS(
func New() *Web {
templates := template.Must(template.ParseFS(
content,
"templates/layout.html",
"templates/index.html",
@@ -79,10 +68,17 @@ func init() {
"templates/login.html",
"templates/register.html",
))
store.Options = &sessions.Options{
secretKey := cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"")
cookieStore := sessions.NewCookieStore([]byte(secretKey))
cookieStore.Options = &sessions.Options{
Path: "/",
MaxAge: 86400 * 7,
HttpOnly: false,
}
return &Web{
logger: logger.New("ui"),
templates: templates,
cookie: cookieStore,
torrents: store.GetStore().GetTorrentStorage(),
}
}

View File

@@ -9,7 +9,7 @@ import (
"strings"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
)
var sharedClient = &http.Client{
@@ -28,7 +28,7 @@ var sharedClient = &http.Client{
}
type File struct {
cache *debrid.Cache
cache *store.Cache
fileId string
torrentName string
@@ -128,7 +128,7 @@ func (f *File) stream() (*http.Response, error) {
cleanupResp := func() {
if resp.Body != nil {
io.Copy(io.Discard, resp.Body)
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
@@ -192,7 +192,7 @@ func (f *File) stream() (*http.Response, error) {
if newResp.StatusCode != http.StatusOK && newResp.StatusCode != http.StatusPartialContent {
cleanupBody := func() {
if newResp.Body != nil {
io.Copy(io.Discard, newResp.Body)
_, _ = io.Copy(io.Discard, newResp.Body)
newResp.Body.Close()
}
}

View File

@@ -3,6 +3,8 @@ package webdav
import (
"context"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"golang.org/x/net/webdav"
"io"
"mime"
"net/http"
@@ -15,21 +17,19 @@ import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/version"
"golang.org/x/net/webdav"
)
type Handler struct {
Name string
logger zerolog.Logger
cache *debrid.Cache
cache *store.Cache
URLBase string
RootPath string
}
func NewHandler(name, urlBase string, cache *debrid.Cache, logger zerolog.Logger) *Handler {
func NewHandler(name, urlBase string, cache *store.Cache, logger zerolog.Logger) *Handler {
h := &Handler{
Name: name,
cache: cache,
@@ -191,7 +191,7 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F
}
name = utils.PathUnescape(path.Clean(name))
rootDir := path.Clean(h.RootPath)
metadataOnly := ctx.Value("metadataOnly") != nil
metadataOnly := ctx.Value(metadataOnlyKey) != nil
now := time.Now()
// 1) special case version.txt
@@ -490,7 +490,7 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
done := make(chan struct{})
go func() {
defer close(done)
io.Copy(w, fRaw)
_, _ = io.Copy(w, fRaw)
}()
select {
case <-ctx.Done():

View File

@@ -84,9 +84,7 @@ func filesToXML(urlPath string, fi os.FileInfo, children []os.FileInfo) stringbu
})
}
sb := builderPool.Get().(stringbuf.StringBuf)
sb.Reset()
defer builderPool.Put(sb)
sb := stringbuf.New("")
// XML header and main element
_, _ = sb.WriteString(`<?xml version="1.0" encoding="UTF-8"?>`)

View File

@@ -8,21 +8,19 @@ import (
"path"
"strconv"
"strings"
"sync"
"time"
)
var builderPool = sync.Pool{
type contextKey string
New: func() interface{} {
buf := stringbuf.New("")
return buf
},
}
const (
// metadataOnlyKey is used to indicate that the request is for metadata only
metadataOnlyKey contextKey = "metadataOnly"
)
func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
// Setup context for metadata only
ctx := context.WithValue(r.Context(), "metadataOnly", true)
ctx := context.WithValue(r.Context(), metadataOnlyKey, true)
r = r.WithContext(ctx)
cleanPath := path.Clean(r.URL.Path)
@@ -85,9 +83,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) {
})
}
sb := builderPool.Get().(stringbuf.StringBuf)
sb.Reset()
defer builderPool.Put(sb)
sb := stringbuf.New("")
// XML header and main element
_, _ = sb.WriteString(`<?xml version="1.0" encoding="UTF-8"?>`)

View File

@@ -7,7 +7,7 @@ import (
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/service"
"github.com/sirrobot01/decypharr/pkg/store"
"html/template"
"net/http"
"net/url"
@@ -90,13 +90,12 @@ type WebDav struct {
}
func New() *WebDav {
svc := service.GetService()
urlBase := config.Get().URLBase
w := &WebDav{
Handlers: make([]*Handler, 0),
URLBase: urlBase,
}
for name, c := range svc.Debrid.Caches {
for name, c := range store.GetStore().GetDebrid().GetCaches() {
h := NewHandler(name, urlBase, c, c.GetLogger())
w.Handlers = append(w.Handlers, h)
}

View File

@@ -1,72 +0,0 @@
package worker
import (
"context"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/service"
"sync"
"time"
)
var (
_logInstance zerolog.Logger
)
func getLogger() zerolog.Logger {
return _logInstance
}
func Start(ctx context.Context) error {
cfg := config.Get()
// Start Arr Refresh Worker
_logInstance = logger.New("worker")
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
cleanUpQueuesWorker(ctx, cfg)
}()
wg.Wait()
return nil
}
func cleanUpQueuesWorker(ctx context.Context, cfg *config.Config) {
// Start Clean up Queues Worker
_logger := getLogger()
_logger.Debug().Msg("Clean up Queues Worker started")
cleanupCtx := context.WithValue(ctx, "worker", "cleanup")
cleanupTicker := time.NewTicker(time.Duration(10) * time.Second)
var cleanupMutex sync.Mutex
for {
select {
case <-cleanupCtx.Done():
_logger.Debug().Msg("Clean up Queues Worker stopped")
return
case <-cleanupTicker.C:
if cleanupMutex.TryLock() {
go func() {
defer cleanupMutex.Unlock()
cleanUpQueues()
}()
}
}
}
}
func cleanUpQueues() {
// Clean up queues
_logger := getLogger()
for _, a := range service.GetService().Arr.GetAll() {
if !a.Cleanup {
continue
}
if err := a.CleanupQueue(); err != nil {
_logger.Error().Err(err).Msg("Error cleaning up queue")
}
}
}

View File

@@ -1,57 +0,0 @@
#!/bin/bash
# deploy.sh
# Function to display usage
usage() {
echo "Usage: $0 [-b|--beta] <version>"
echo "Example for main: $0 v1.0.0"
echo "Example for beta: $0 -b v1.0.0"
exit 1
}
# Parse arguments
BETA=false
while [[ "$#" -gt 0 ]]; do
case $1 in
-b|--beta) BETA=true; shift ;;
-*) echo "Unknown parameter: $1"; usage ;;
*) VERSION="$1"; shift ;;
esac
done
# Check if version is provided
if [ -z "$VERSION" ]; then
echo "Error: Version is required"
usage
fi
# Validate version format
if ! [[ $VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Error: Version must be in format v1.0.0"
exit 1
fi
# Set tag based on branch
if [ "$BETA" = true ]; then
TAG="$VERSION-beta"
BRANCH="beta"
else
TAG="$VERSION"
BRANCH="main"
fi
echo "Deploying version $VERSION to $BRANCH branch..."
# Ensure we're on the right branch
git checkout $BRANCH || exit 1
# Create and push tag
echo "Creating tag $TAG..."
git tag "$TAG" || exit 1
git push origin "$TAG" || exit 1
echo "Deployment initiated successfully!"
echo "GitHub Actions will handle the release process."
echo "Check the progress at: https://github.com/sirrobot01/decypharr/actions"