- Fix ARR flaky bug

- Refined download uncached options
- Deprecate qbittorent log level
- Skip Repair for specified arr
This commit is contained in:
Mukhtar Akere
2025-03-09 03:56:34 +01:00
parent a83f3d72ce
commit 2b2a682218
26 changed files with 276 additions and 181 deletions
+5
View File
@@ -159,3 +159,8 @@
- Minor bug fixes - Minor bug fixes
- Add Tautulli support - Add Tautulli support
- playback_failed event triggers a repair - playback_failed event triggers a repair
- Miscellaneous improvements
- Add an option to skip the repair worker for a specific arr
- Arr specific uncached downloading option
- Option to download uncached torrents from UI
- Remove QbitTorrent Log level(Use the global log level)
+2 -2
View File
@@ -140,14 +140,14 @@ This is the default config file. You can create a `config.json` file in the root
"port": "8282", "port": "8282",
"download_folder": "/mnt/symlinks/", "download_folder": "/mnt/symlinks/",
"categories": ["sonarr", "radarr"], "categories": ["sonarr", "radarr"],
"log_level": "info"
}, },
"repair": { "repair": {
"enabled": false, "enabled": false,
"interval": "12h", "interval": "12h",
"run_on_start": false "run_on_start": false
}, },
"use_auth": false "use_auth": false,
"log_level": "info"
} }
``` ```
+15 -5
View File
@@ -51,20 +51,30 @@
"download_folder": "/mnt/symlinks/", "download_folder": "/mnt/symlinks/",
"categories": ["sonarr", "radarr"], "categories": ["sonarr", "radarr"],
"refresh_interval": 5, "refresh_interval": 5,
"log_level": "info"
}, },
"arrs": [ "arrs": [
{ {
"name": "sonarr", "name": "sonarr",
"host": "http://host:8989", "host": "http://radarr:8989",
"token": "arr_key", "token": "arr_key",
"cleanup": true "cleanup": true,
"skip_repair": true,
"download_uncached": false
}, },
{ {
"name": "radarr", "name": "radarr",
"host": "http://host:7878", "host": "http://radarr:7878",
"token": "arr_key", "token": "arr_key",
"cleanup": false "cleanup": false,
"download_uncached": false
},
{
"name": "lidarr",
"host": "http://lidarr:7878",
"token": "arr_key",
"cleanup": false,
"skip_repair": true,
"download_uncached": false
} }
], ],
"repair": { "repair": {
+2 -1
View File
@@ -38,7 +38,6 @@ type QBitTorrent struct {
Username string `json:"username"` Username string `json:"username"`
Password string `json:"password"` Password string `json:"password"`
Port string `json:"port"` Port string `json:"port"`
LogLevel string `json:"log_level"`
DownloadFolder string `json:"download_folder"` DownloadFolder string `json:"download_folder"`
Categories []string `json:"categories"` Categories []string `json:"categories"`
RefreshInterval int `json:"refresh_interval"` RefreshInterval int `json:"refresh_interval"`
@@ -49,6 +48,8 @@ type Arr struct {
Host string `json:"host"` Host string `json:"host"`
Token string `json:"token"` Token string `json:"token"`
Cleanup bool `json:"cleanup"` Cleanup bool `json:"cleanup"`
SkipRepair bool `json:"skip_repair"`
DownloadUncached bool `json:"download_uncached"`
} }
type Repair struct { type Repair struct {
+6 -2
View File
@@ -30,16 +30,20 @@ type Arr struct {
Token string `json:"token"` Token string `json:"token"`
Type Type `json:"type"` Type Type `json:"type"`
Cleanup bool `json:"cleanup"` Cleanup bool `json:"cleanup"`
SkipRepair bool `json:"skip_repair"`
DownloadUncached bool `json:"download_uncached"`
client *http.Client client *http.Client
} }
func New(name, host, token string, cleanup bool) *Arr { func New(name, host, token string, cleanup, skipRepair, downloadUncached bool) *Arr {
return &Arr{ return &Arr{
Name: name, Name: name,
Host: host, Host: host,
Token: strings.TrimSpace(token), Token: strings.TrimSpace(token),
Type: InferType(host, name), Type: InferType(host, name),
Cleanup: cleanup, Cleanup: cleanup,
SkipRepair: skipRepair,
DownloadUncached: downloadUncached,
client: &http.Client{ client: &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
@@ -142,7 +146,7 @@ func NewStorage() *Storage {
arrs := make(map[string]*Arr) arrs := make(map[string]*Arr)
for _, a := range config.GetConfig().Arrs { for _, a := range config.GetConfig().Arrs {
name := a.Name name := a.Name
arrs[name] = New(name, a.Host, a.Token, a.Cleanup) arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached)
} }
return &Storage{ return &Storage{
Arrs: arrs, Arrs: arrs,
+74 -28
View File
@@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"strconv" "strconv"
"strings"
) )
type episode struct { type episode struct {
@@ -12,6 +13,17 @@ type episode struct {
EpisodeFileID int `json:"episodeFileId"` EpisodeFileID int `json:"episodeFileId"`
} }
type sonarrSearch struct {
Name string `json:"name"`
SeasonNumber int `json:"seasonNumber"`
SeriesId int `json:"episodeIds"`
}
type radarrSearch struct {
Name string `json:"name"`
MovieIds []int `json:"movieIds"`
}
func (a *Arr) GetMedia(mediaId string) ([]Content, error) { func (a *Arr) GetMedia(mediaId string) ([]Content, error) {
// Get series // Get series
if a.Type == Radarr { if a.Type == Radarr {
@@ -83,6 +95,7 @@ func (a *Arr) GetMedia(mediaId string) ([]Content, error) {
FileId: file.Id, FileId: file.Id,
Path: file.Path, Path: file.Path,
Id: eId, Id: eId,
SeasonNumber: file.SeasonNumber,
}) })
} }
if len(files) == 0 { if len(files) == 0 {
@@ -132,29 +145,64 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) {
return contents, nil return contents, nil
} }
func (a *Arr) search(ids []int) error { // searchSonarr searches for missing files in the arr
var payload interface{} // map ids are series id and season number
switch a.Type { func (a *Arr) searchSonarr(files []ContentFile) error {
case Sonarr: ids := make(map[string]any)
payload = struct { for _, f := range files {
Name string `json:"name"` // Join series id and season number
EpisodeIds []int `json:"episodeIds"` id := fmt.Sprintf("%d-%d", f.Id, f.SeasonNumber)
}{ ids[id] = nil
Name: "EpisodeSearch",
EpisodeIds: ids,
} }
case Radarr: errs := make(chan error, len(ids))
payload = struct { for id := range ids {
Name string `json:"name"` go func() {
MovieIds []int `json:"movieIds"` parts := strings.Split(id, "-")
}{ if len(parts) != 2 {
return
}
seriesId, err := strconv.Atoi(parts[0])
if err != nil {
return
}
seasonNumber, err := strconv.Atoi(parts[1])
if err != nil {
return
}
payload := sonarrSearch{
Name: "SeasonSearch",
SeasonNumber: seasonNumber,
SeriesId: seriesId,
}
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
if err != nil {
errs <- fmt.Errorf("failed to automatic search: %v", err)
return
}
if statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'; !statusOk {
errs <- fmt.Errorf("failed to automatic search. Status Code: %s", resp.Status)
return
}
}()
}
for range ids {
err := <-errs
if err != nil {
return err
}
}
return nil
}
func (a *Arr) searchRadarr(files []ContentFile) error {
ids := make([]int, 0)
for _, f := range files {
ids = append(ids, f.Id)
}
payload := radarrSearch{
Name: "MoviesSearch", Name: "MoviesSearch",
MovieIds: ids, MovieIds: ids,
} }
default:
return fmt.Errorf("unknown arr type: %s", a.Type)
}
resp, err := a.Request(http.MethodPost, "api/v3/command", payload) resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
if err != nil { if err != nil {
return fmt.Errorf("failed to automatic search: %v", err) return fmt.Errorf("failed to automatic search: %v", err)
@@ -166,16 +214,14 @@ func (a *Arr) search(ids []int) error {
} }
func (a *Arr) SearchMissing(files []ContentFile) error { func (a *Arr) SearchMissing(files []ContentFile) error {
switch a.Type {
ids := make([]int, 0) case Sonarr:
for _, f := range files { return a.searchSonarr(files)
ids = append(ids, f.Id) case Radarr:
return a.searchRadarr(files)
default:
return fmt.Errorf("unknown arr type: %s", a.Type)
} }
if len(ids) == 0 {
return nil
}
return a.search(ids)
} }
func (a *Arr) DeleteFiles(files []ContentFile) error { func (a *Arr) DeleteFiles(files []ContentFile) error {
+1
View File
@@ -21,6 +21,7 @@ type ContentFile struct {
TargetPath string `json:"targetPath"` TargetPath string `json:"targetPath"`
IsSymlink bool `json:"isSymlink"` IsSymlink bool `json:"isSymlink"`
IsBroken bool `json:"isBroken"` IsBroken bool `json:"isBroken"`
SeasonNumber int `json:"seasonNumber"`
} }
type Content struct { type Content struct {
+4 -6
View File
@@ -135,9 +135,8 @@ func flattenFiles(files []MagnetFile, parentPath string, index *int) []torrent.F
return result return result
} }
func (ad *AllDebrid) GetTorrent(id string) (*torrent.Torrent, error) { func (ad *AllDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
t := &torrent.Torrent{} url := fmt.Sprintf("%s/magnet/status?id=%s", ad.Host, t.Id)
url := fmt.Sprintf("%s/magnet/status?id=%s", ad.Host, id)
req, _ := http.NewRequest(http.MethodGet, url, nil) req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := ad.client.MakeRequest(req) resp, err := ad.client.MakeRequest(req)
if err != nil { if err != nil {
@@ -152,7 +151,6 @@ func (ad *AllDebrid) GetTorrent(id string) (*torrent.Torrent, error) {
data := res.Data.Magnets data := res.Data.Magnets
status := getAlldebridStatus(data.StatusCode) status := getAlldebridStatus(data.StatusCode)
name := data.Filename name := data.Filename
t.Id = id
t.Name = name t.Name = name
t.Status = status t.Status = status
t.Filename = name t.Filename = name
@@ -176,7 +174,7 @@ func (ad *AllDebrid) GetTorrent(id string) (*torrent.Torrent, error) {
func (ad *AllDebrid) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { func (ad *AllDebrid) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
for { for {
tb, err := ad.GetTorrent(torrent.Id) tb, err := ad.GetTorrent(torrent)
torrent = tb torrent = tb
@@ -194,7 +192,7 @@ func (ad *AllDebrid) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*tor
} }
break break
} else if slices.Contains(ad.GetDownloadingStatus(), status) { } else if slices.Contains(ad.GetDownloadingStatus(), status) {
if !ad.DownloadUncached { if !ad.DownloadUncached && !torrent.DownloadUncached {
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name) return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
} }
// Break out of the loop if the torrent is downloading. // Break out of the loop if the torrent is downloading.
+2 -1
View File
@@ -47,13 +47,14 @@ func createDebrid(dc config.Debrid, cache *cache.Cache) engine.Service {
} }
} }
func ProcessTorrent(d *engine.Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink bool) (*torrent.Torrent, error) { func ProcessTorrent(d *engine.Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, downloadUncached bool) (*torrent.Torrent, error) {
debridTorrent := &torrent.Torrent{ debridTorrent := &torrent.Torrent{
InfoHash: magnet.InfoHash, InfoHash: magnet.InfoHash,
Magnet: magnet, Magnet: magnet,
Name: magnet.Name, Name: magnet.Name,
Arr: a, Arr: a,
Size: magnet.Size, Size: magnet.Size,
DownloadUncached: cmp.Or(downloadUncached, a.DownloadUncached),
} }
errs := make([]error, 0) errs := make([]error, 0)
+4 -5
View File
@@ -97,9 +97,8 @@ func (dl *DebridLink) IsAvailable(infohashes []string) map[string]bool {
return result return result
} }
func (dl *DebridLink) GetTorrent(id string) (*torrent.Torrent, error) { func (dl *DebridLink) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
t := &torrent.Torrent{} url := fmt.Sprintf("%s/seedbox/list?ids=%s", dl.Host, t.Id)
url := fmt.Sprintf("%s/seedbox/list?ids=%s", dl.Host, id)
req, _ := http.NewRequest(http.MethodGet, url, nil) req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := dl.client.MakeRequest(req) resp, err := dl.client.MakeRequest(req)
if err != nil { if err != nil {
@@ -204,7 +203,7 @@ func (dl *DebridLink) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error)
func (dl *DebridLink) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { func (dl *DebridLink) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
for { for {
t, err := dl.GetTorrent(torrent.Id) t, err := dl.GetTorrent(torrent)
torrent = t torrent = t
if err != nil || torrent == nil { if err != nil || torrent == nil {
return torrent, err return torrent, err
@@ -218,7 +217,7 @@ func (dl *DebridLink) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*to
} }
break break
} else if slices.Contains(dl.GetDownloadingStatus(), status) { } else if slices.Contains(dl.GetDownloadingStatus(), status) {
if !dl.DownloadUncached { if !dl.DownloadUncached && !torrent.DownloadUncached {
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name) return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
} }
// Break out of the loop if the torrent is downloading. // Break out of the loop if the torrent is downloading.
+1 -1
View File
@@ -13,7 +13,7 @@ type Service interface {
DeleteTorrent(tr *torrent.Torrent) DeleteTorrent(tr *torrent.Torrent)
IsAvailable(infohashes []string) map[string]bool IsAvailable(infohashes []string) map[string]bool
GetCheckCached() bool GetCheckCached() bool
GetTorrent(id string) (*torrent.Torrent, error) GetTorrent(torrent *torrent.Torrent) (*torrent.Torrent, error)
GetTorrents() ([]*torrent.Torrent, error) GetTorrents() ([]*torrent.Torrent, error)
GetName() string GetName() string
GetLogger() zerolog.Logger GetLogger() zerolog.Logger
+3 -5
View File
@@ -160,9 +160,8 @@ func (r *RealDebrid) SubmitMagnet(t *torrent.Torrent) (*torrent.Torrent, error)
return t, nil return t, nil
} }
func (r *RealDebrid) GetTorrent(id string) (*torrent.Torrent, error) { func (r *RealDebrid) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
t := &torrent.Torrent{} url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, id)
req, _ := http.NewRequest(http.MethodGet, url, nil) req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req) resp, err := r.client.MakeRequest(req)
if err != nil { if err != nil {
@@ -174,7 +173,6 @@ func (r *RealDebrid) GetTorrent(id string) (*torrent.Torrent, error) {
return t, err return t, err
} }
name := utils.RemoveInvalidChars(data.OriginalFilename) name := utils.RemoveInvalidChars(data.OriginalFilename)
t.Id = id
t.Name = name t.Name = name
t.Bytes = data.Bytes t.Bytes = data.Bytes
t.Folder = name t.Folder = name
@@ -251,7 +249,7 @@ func (r *RealDebrid) CheckStatus(t *torrent.Torrent, isSymlink bool) (*torrent.T
} }
break break
} else if slices.Contains(r.GetDownloadingStatus(), status) { } else if slices.Contains(r.GetDownloadingStatus(), status) {
if !r.DownloadUncached { if !r.DownloadUncached && !t.DownloadUncached {
return t, fmt.Errorf("torrent: %s not cached", t.Name) return t, fmt.Errorf("torrent: %s not cached", t.Name)
} }
// Break out of the loop if the torrent is downloading. // Break out of the loop if the torrent is downloading.
+4 -6
View File
@@ -149,9 +149,8 @@ func getTorboxStatus(status string, finished bool) string {
} }
} }
func (tb *Torbox) GetTorrent(id string) (*torrent.Torrent, error) { func (tb *Torbox) GetTorrent(t *torrent.Torrent) (*torrent.Torrent, error) {
t := &torrent.Torrent{} url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", tb.Host, t.Id)
url := fmt.Sprintf("%s/api/torrents/mylist/?id=%s", tb.Host, id)
req, _ := http.NewRequest(http.MethodGet, url, nil) req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req) resp, err := tb.client.MakeRequest(req)
if err != nil { if err != nil {
@@ -164,7 +163,6 @@ func (tb *Torbox) GetTorrent(id string) (*torrent.Torrent, error) {
} }
data := res.Data data := res.Data
name := data.Name name := data.Name
t.Id = id
t.Name = name t.Name = name
t.Bytes = data.Size t.Bytes = data.Size
t.Folder = name t.Folder = name
@@ -215,7 +213,7 @@ func (tb *Torbox) GetTorrent(id string) (*torrent.Torrent, error) {
func (tb *Torbox) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) { func (tb *Torbox) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torrent.Torrent, error) {
for { for {
t, err := tb.GetTorrent(torrent.Id) t, err := tb.GetTorrent(torrent)
torrent = t torrent = t
@@ -233,7 +231,7 @@ func (tb *Torbox) CheckStatus(torrent *torrent.Torrent, isSymlink bool) (*torren
} }
break break
} else if slices.Contains(tb.GetDownloadingStatus(), status) { } else if slices.Contains(tb.GetDownloadingStatus(), status) {
if !tb.DownloadUncached { if !tb.DownloadUncached && !torrent.DownloadUncached {
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name) return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
} }
// Break out of the loop if the torrent is downloading. // Break out of the loop if the torrent is downloading.
+1 -18
View File
@@ -11,24 +11,6 @@ import (
"sync" "sync"
) )
type Arr struct {
Name string `json:"name"`
Token string `json:"-"`
Host string `json:"host"`
}
type ArrHistorySchema struct {
Page int `json:"page"`
PageSize int `json:"pageSize"`
SortKey string `json:"sortKey"`
SortDirection string `json:"sortDirection"`
TotalRecords int `json:"totalRecords"`
Records []struct {
ID int `json:"id"`
DownloadID string `json:"downloadId"`
} `json:"records"`
}
type Torrent struct { type Torrent struct {
Id string `json:"id"` Id string `json:"id"`
InfoHash string `json:"info_hash"` InfoHash string `json:"info_hash"`
@@ -54,6 +36,7 @@ type Torrent struct {
Arr *arr.Arr `json:"arr"` Arr *arr.Arr `json:"arr"`
Mu sync.Mutex `json:"-"` Mu sync.Mutex `json:"-"`
SizeDownloaded int64 `json:"-"` // This is used for local download SizeDownloaded int64 `json:"-"` // This is used for local download
DownloadUncached bool `json:"-"`
} }
type DownloadLinks struct { type DownloadLinks struct {
+1 -1
View File
@@ -187,7 +187,7 @@ func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent)
q.logger.Debug().Msgf("Found torrent path: %s", torrentPath) q.logger.Debug().Msgf("Found torrent path: %s", torrentPath)
return torrentPath, err return torrentPath, err
} }
time.Sleep(10 * time.Millisecond) time.Sleep(100 * time.Millisecond)
} }
} }
+1 -1
View File
@@ -59,7 +59,7 @@ func (q *QBit) authContext(next http.Handler) http.Handler {
// Check if arr exists // Check if arr exists
a := svc.Arr.Get(category) a := svc.Arr.Get(category)
if a == nil { if a == nil {
a = arr.New(category, "", "", false) a = arr.New(category, "", "", false, false, false)
} }
if err == nil { if err == nil {
host = strings.TrimSpace(host) host = strings.TrimSpace(host)
+4 -2
View File
@@ -20,6 +20,7 @@ type ImportRequest struct {
SeriesId int `json:"series"` SeriesId int `json:"series"`
Seasons []int `json:"seasons"` Seasons []int `json:"seasons"`
Episodes []string `json:"episodes"` Episodes []string `json:"episodes"`
DownloadUncached bool `json:"downloadUncached"`
Failed bool `json:"failed"` Failed bool `json:"failed"`
FailedAt time.Time `json:"failedAt"` FailedAt time.Time `json:"failedAt"`
@@ -40,7 +41,7 @@ type ManualImportResponseSchema struct {
Id int `json:"id"` Id int `json:"id"`
} }
func NewImportRequest(uri string, arr *arr.Arr, isSymlink bool) *ImportRequest { func NewImportRequest(uri string, arr *arr.Arr, isSymlink, downloadUncached bool) *ImportRequest {
return &ImportRequest{ return &ImportRequest{
ID: uuid.NewString(), ID: uuid.NewString(),
URI: uri, URI: uri,
@@ -49,6 +50,7 @@ func NewImportRequest(uri string, arr *arr.Arr, isSymlink bool) *ImportRequest {
Completed: false, Completed: false,
Async: false, Async: false,
IsSymlink: isSymlink, IsSymlink: isSymlink,
DownloadUncached: downloadUncached,
} }
} }
@@ -72,7 +74,7 @@ func (i *ImportRequest) Process(q *QBit) (err error) {
return fmt.Errorf("error parsing magnet link: %w", err) return fmt.Errorf("error parsing magnet link: %w", err)
} }
torrent := CreateTorrentFromMagnet(magnet, i.Arr.Name, "manual") torrent := CreateTorrentFromMagnet(magnet, i.Arr.Name, "manual")
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, magnet, i.Arr, i.IsSymlink) debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, magnet, i.Arr, i.IsSymlink, i.DownloadUncached)
if err != nil || debridTorrent == nil { if err != nil || debridTorrent == nil {
if debridTorrent != nil { if debridTorrent != nil {
dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid) dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid)
+1 -1
View File
@@ -33,7 +33,7 @@ func New() *QBit {
DownloadFolder: cfg.DownloadFolder, DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories, Categories: cfg.Categories,
Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")), Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")),
logger: logger.NewLogger("qbit", cfg.LogLevel, os.Stdout), logger: logger.NewLogger("qbit", _cfg.LogLevel, os.Stdout),
RefreshInterval: refreshInterval, RefreshInterval: refreshInterval,
} }
} }
+2 -2
View File
@@ -56,7 +56,7 @@ func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category strin
return fmt.Errorf("arr not found in context") return fmt.Errorf("arr not found in context")
} }
isSymlink := ctx.Value("isSymlink").(bool) isSymlink := ctx.Value("isSymlink").(bool)
debridTorrent, err := db.ProcessTorrent(svc.Debrid, magnet, a, isSymlink) debridTorrent, err := db.ProcessTorrent(svc.Debrid, magnet, a, isSymlink, false)
if err != nil || debridTorrent == nil { if err != nil || debridTorrent == nil {
if debridTorrent != nil { if debridTorrent != nil {
dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid) dbClient := service.GetDebrid().GetByName(debridTorrent.Debrid)
@@ -185,7 +185,7 @@ func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debrid.Torrent) *Torrent
} }
_db := service.GetDebrid().GetByName(debridTorrent.Debrid) _db := service.GetDebrid().GetByName(debridTorrent.Debrid)
if debridTorrent.Status != "downloaded" { if debridTorrent.Status != "downloaded" {
debridTorrent, _ = _db.GetTorrent(t.ID) debridTorrent, _ = _db.GetTorrent(debridTorrent)
} }
t = q.UpdateTorrentMin(t, debridTorrent) t = q.UpdateTorrentMin(t, debridTorrent)
t.ContentPath = t.TorrentPath + string(os.PathSeparator) t.ContentPath = t.TorrentPath + string(os.PathSeparator)
+40 -11
View File
@@ -12,6 +12,7 @@ import (
"github.com/sirrobot01/debrid-blackhole/pkg/arr" "github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"net"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@@ -75,13 +76,13 @@ type Job struct {
ID string `json:"id"` ID string `json:"id"`
Arrs []*arr.Arr `json:"arrs"` Arrs []*arr.Arr `json:"arrs"`
MediaIDs []string `json:"media_ids"` MediaIDs []string `json:"media_ids"`
OneOff bool `json:"one_off"`
StartedAt time.Time `json:"created_at"` StartedAt time.Time `json:"created_at"`
BrokenItems map[string][]arr.ContentFile `json:"broken_items"` BrokenItems map[string][]arr.ContentFile `json:"broken_items"`
Status JobStatus `json:"status"` Status JobStatus `json:"status"`
CompletedAt time.Time `json:"finished_at"` CompletedAt time.Time `json:"finished_at"`
FailedAt time.Time `json:"failed_at"` FailedAt time.Time `json:"failed_at"`
AutoProcess bool `json:"auto_process"` AutoProcess bool `json:"auto_process"`
Recurrent bool `json:"recurrent"`
Error string `json:"error"` Error string `json:"error"`
} }
@@ -106,10 +107,14 @@ func (j *Job) discordContext() string {
} }
func (r *Repair) getArrs(arrNames []string) []*arr.Arr { func (r *Repair) getArrs(arrNames []string) []*arr.Arr {
checkSkip := true // This is useful when user triggers repair with specific arrs
arrs := make([]*arr.Arr, 0) arrs := make([]*arr.Arr, 0)
if len(arrNames) == 0 { if len(arrNames) == 0 {
// No specific arrs, get all
// Also check if any arrs are set to skip repair
arrs = r.arrs.GetAll() arrs = r.arrs.GetAll()
} else { } else {
checkSkip = false
for _, name := range arrNames { for _, name := range arrNames {
a := r.arrs.Get(name) a := r.arrs.Get(name)
if a == nil || a.Host == "" || a.Token == "" { if a == nil || a.Host == "" || a.Token == "" {
@@ -118,7 +123,17 @@ func (r *Repair) getArrs(arrNames []string) []*arr.Arr {
arrs = append(arrs, a) arrs = append(arrs, a)
} }
} }
if !checkSkip {
return arrs return arrs
}
filtered := make([]*arr.Arr, 0)
for _, a := range arrs {
if a.SkipRepair {
continue
}
filtered = append(filtered, a)
}
return filtered
} }
func jobKey(arrNames []string, mediaIDs []string) string { func jobKey(arrNames []string, mediaIDs []string) string {
@@ -133,7 +148,7 @@ func (r *Repair) reset(j *Job) {
j.FailedAt = time.Time{} j.FailedAt = time.Time{}
j.BrokenItems = nil j.BrokenItems = nil
j.Error = "" j.Error = ""
if j.Arrs == nil { if j.Recurrent || j.Arrs == nil {
j.Arrs = r.getArrs([]string{}) // Get new arrs j.Arrs = r.getArrs([]string{}) // Get new arrs
} }
} }
@@ -166,13 +181,17 @@ func (r *Repair) preRunChecks() error {
return nil return nil
} }
func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess bool) error { func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess, recurrent bool) error {
key := jobKey(arrsNames, mediaIDs) key := jobKey(arrsNames, mediaIDs)
job, ok := r.Jobs[key] job, ok := r.Jobs[key]
if job != nil && job.Status == JobStarted {
return fmt.Errorf("job already running")
}
if !ok { if !ok {
job = r.newJob(arrsNames, mediaIDs) job = r.newJob(arrsNames, mediaIDs)
} }
job.AutoProcess = autoProcess job.AutoProcess = autoProcess
job.Recurrent = recurrent
r.reset(job) r.reset(job)
r.Jobs[key] = job r.Jobs[key] = job
go r.saveToFile() go r.saveToFile()
@@ -290,7 +309,7 @@ func (r *Repair) Start(ctx context.Context) error {
if r.runOnStart { if r.runOnStart {
r.logger.Info().Msgf("Running initial repair") r.logger.Info().Msgf("Running initial repair")
go func() { go func() {
if err := r.AddJob([]string{}, []string{}, r.autoProcess); err != nil { if err := r.AddJob([]string{}, []string{}, r.autoProcess, true); err != nil {
r.logger.Error().Err(err).Msg("Error running initial repair") r.logger.Error().Err(err).Msg("Error running initial repair")
} }
}() }()
@@ -308,7 +327,7 @@ func (r *Repair) Start(ctx context.Context) error {
return nil return nil
case t := <-ticker.C: case t := <-ticker.C:
r.logger.Info().Msgf("Running repair at %v", t.Format("15:04:05")) r.logger.Info().Msgf("Running repair at %v", t.Format("15:04:05"))
if err := r.AddJob([]string{}, []string{}, r.autoProcess); err != nil { if err := r.AddJob([]string{}, []string{}, r.autoProcess, true); err != nil {
r.logger.Error().Err(err).Msg("Error running repair") r.logger.Error().Err(err).Msg("Error running repair")
} }
@@ -483,6 +502,16 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
uniqueParents[parent] = append(uniqueParents[parent], file) uniqueParents[parent] = append(uniqueParents[parent], file)
} }
} }
client := &http.Client{
Timeout: 0,
Transport: &http.Transport{
TLSHandshakeTimeout: 60 * time.Second,
DialContext: (&net.Dialer{
Timeout: 20 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
},
}
// Access zurg url + symlink folder + first file(encoded) // Access zurg url + symlink folder + first file(encoded)
for parent, f := range uniqueParents { for parent, f := range uniqueParents {
r.logger.Debug().Msgf("Checking %s", parent) r.logger.Debug().Msgf("Checking %s", parent)
@@ -496,25 +525,25 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
continue continue
} }
resp, err := http.Get(fullURL) resp, err := client.Get(fullURL)
if err != nil { if err != nil {
r.logger.Debug().Err(err).Msgf("Failed to reach %s", fullURL) r.logger.Debug().Err(err).Msgf("Failed to reach %s", fullURL)
brokenFiles = append(brokenFiles, f...) brokenFiles = append(brokenFiles, f...)
continue continue
} }
err = resp.Body.Close()
if err != nil {
return nil
}
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL) r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
resp.Body.Close() resp.Body.Close()
brokenFiles = append(brokenFiles, f...) brokenFiles = append(brokenFiles, f...)
continue continue
} }
downloadUrl := resp.Request.URL.String() downloadUrl := resp.Request.URL.String()
resp.Body.Close()
if downloadUrl != "" { if downloadUrl != "" {
r.logger.Debug().Msgf("Found download url: %s", downloadUrl) r.logger.Trace().Msgf("Found download url: %s", downloadUrl)
} else { } else {
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL) r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
brokenFiles = append(brokenFiles, f...) brokenFiles = append(brokenFiles, f...)
+1 -1
View File
@@ -23,7 +23,7 @@ type Server struct {
func New() *Server { func New() *Server {
cfg := config.GetConfig() cfg := config.GetConfig()
l := logger.NewLogger("http", cfg.QBitTorrent.LogLevel, os.Stdout) l := logger.NewLogger("http", cfg.LogLevel, os.Stdout)
r := chi.NewRouter() r := chi.NewRouter()
r.Use(middleware.Recoverer) r.Use(middleware.Recoverer)
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static")))) r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
+1 -1
View File
@@ -47,7 +47,7 @@ func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Repair service is not enabled", http.StatusInternalServerError) http.Error(w, "Repair service is not enabled", http.StatusInternalServerError)
return return
} }
if err := repair.AddJob([]string{}, []string{mediaId}, payload.AutoProcess); err != nil { if err := repair.AddJob([]string{}, []string{mediaId}, payload.AutoProcess, false); err != nil {
http.Error(w, "Failed to add job: "+err.Error(), http.StatusInternalServerError) http.Error(w, "Failed to add job: "+err.Error(), http.StatusInternalServerError)
return return
} }
+14 -7
View File
@@ -307,10 +307,11 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
arrName := r.FormValue("arr") arrName := r.FormValue("arr")
notSymlink := r.FormValue("notSymlink") == "true" notSymlink := r.FormValue("notSymlink") == "true"
downloadUncached := r.FormValue("downloadUncached") == "true"
_arr := svc.Arr.Get(arrName) _arr := svc.Arr.Get(arrName)
if _arr == nil { if _arr == nil {
_arr = arr.New(arrName, "", "", false) _arr = arr.New(arrName, "", "", false, false, false)
} }
// Handle URLs // Handle URLs
@@ -323,7 +324,7 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
} }
for _, url := range urlList { for _, url := range urlList {
importReq := qbit.NewImportRequest(url, _arr, !notSymlink) importReq := qbit.NewImportRequest(url, _arr, !notSymlink, downloadUncached)
err := importReq.Process(ui.qbit) err := importReq.Process(ui.qbit)
if err != nil { if err != nil {
errs = append(errs, fmt.Sprintf("URL %s: %v", url, err)) errs = append(errs, fmt.Sprintf("URL %s: %v", url, err))
@@ -348,7 +349,7 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) {
continue continue
} }
importReq := qbit.NewImportRequest(magnet.Link, _arr, !notSymlink) importReq := qbit.NewImportRequest(magnet.Link, _arr, !notSymlink, downloadUncached)
err = importReq.Process(ui.qbit) err = importReq.Process(ui.qbit)
if err != nil { if err != nil {
errs = append(errs, fmt.Sprintf("File %s: %v", fileHeader.Filename, err)) errs = append(errs, fmt.Sprintf("File %s: %v", fileHeader.Filename, err))
@@ -384,7 +385,7 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
if req.Async { if req.Async {
go func() { go func() {
if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess); err != nil { if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil {
ui.logger.Error().Err(err).Msg("Failed to repair media") ui.logger.Error().Err(err).Msg("Failed to repair media")
} }
}() }()
@@ -392,10 +393,9 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
return return
} }
if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess); err != nil { if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil {
http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError)
return return
} }
request.JSONResponse(w, "Repair completed", http.StatusOK) request.JSONResponse(w, "Repair completed", http.StatusOK)
@@ -437,7 +437,14 @@ func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
arrCfgs := make([]config.Arr, 0) arrCfgs := make([]config.Arr, 0)
svc := service.GetService() svc := service.GetService()
for _, a := range svc.Arr.GetAll() { for _, a := range svc.Arr.GetAll() {
arrCfgs = append(arrCfgs, config.Arr{Host: a.Host, Name: a.Name, Token: a.Token, Cleanup: a.Cleanup}) arrCfgs = append(arrCfgs, config.Arr{
Host: a.Host,
Name: a.Name,
Token: a.Token,
Cleanup: a.Cleanup,
SkipRepair: a.SkipRepair,
DownloadUncached: a.DownloadUncached,
})
} }
cfg.Arrs = arrCfgs cfg.Arrs = arrCfgs
request.JSONResponse(w, cfg, http.StatusOK) request.JSONResponse(w, cfg, http.StatusOK)
+14 -14
View File
@@ -12,7 +12,7 @@
<div class="col-md-6"> <div class="col-md-6">
<div class="form-group"> <div class="form-group">
<label for="qbitDebug">Log Level</label> <label for="qbitDebug">Log Level</label>
<select class="form-select" name="qbit.log_level" id="log-level" disabled> <select class="form-select" name="log_level" id="log-level" disabled>
<option value="info">Info</option> <option value="info">Info</option>
<option value="debug">Debug</option> <option value="debug">Debug</option>
<option value="warn">Warning</option> <option value="warn">Warning</option>
@@ -114,18 +114,6 @@
<label class="form-label">Refresh Interval (seconds)</label> <label class="form-label">Refresh Interval (seconds)</label>
<input type="number" class="form-control" name="qbit.refresh_interval"> <input type="number" class="form-control" name="qbit.refresh_interval">
</div> </div>
<div class="col-12 mb-3">
<div class="form-group">
<label for="qbitDebug">Log Level</label>
<select class="form-select" name="qbit.log_level" id="qbitDebug" disabled>
<option value="info">Info</option>
<option value="debug">Debug</option>
<option value="warn">Warning</option>
<option value="error">Error</option>
<option value="trace">Trace</option>
</select>
</div>
</div>
</div> </div>
</div> </div>
@@ -225,10 +213,22 @@
<div class="row"> <div class="row">
<div class="col-md-2 mb-3"> <div class="col-md-2 mb-3">
<div class="form-check"> <div class="form-check">
<label class="form-check-label" for="repairOnStart">Cleanup Queue</label> <label class="form-check-label">Cleanup Queue</label>
<input type="checkbox" disabled class="form-check-input" name="arr[${index}].cleanup"> <input type="checkbox" disabled class="form-check-input" name="arr[${index}].cleanup">
</div> </div>
</div> </div>
<div class="col-md-2 mb-3">
<div class="form-check">
<label class="form-check-label">Skip Repair</label>
<input type="checkbox" disabled class="form-check-input" name="arr[${index}].skip_repair">
</div>
</div>
<div class="col-md-2 mb-3">
<div class="form-check">
<label class="form-check-label">Download Uncached</label>
<input type="checkbox" disabled class="form-check-input" name="arr[${index}].download_uncached">
</div>
</div>
</div> </div>
</div> </div>
`; `;
+20 -7
View File
@@ -22,14 +22,22 @@
<input type="text" class="form-control" id="category" name="arr" placeholder="Enter Category (e.g sonarr, radarr, radarr4k)"> <input type="text" class="form-control" id="category" name="arr" placeholder="Enter Category (e.g sonarr, radarr, radarr4k)">
</div> </div>
<div class="mb-3"> <div class="row mb-3">
<div class="form-check"> <div class="col-md-2 mb-3">
<input class="form-check-input" type="checkbox" id="isSymlink" name="notSymlink"> <div class="form-check d-inline-block me-3">
<label class="form-check-label" for="isSymlink"> <input type="checkbox" class="form-check-input" id="isSymlink" name="notSymlink">
Download real files instead of symlinks <label class="form-check-label" for="isSymlink">No Symlinks</label>
</label>
</div> </div>
</div> </div>
<div class="col-md-2 mb-3">
<div class="form-check d-inline-block">
<input type="checkbox" class="form-check-input" name="downloadUncached" id="downloadUncached">
<label class="form-check-label" for="downloadUncached">Download Uncached</label>
</div>
</div>
</div>
<button type="submit" class="btn btn-primary" id="submitDownload"> <button type="submit" class="btn btn-primary" id="submitDownload">
<i class="bi bi-cloud-upload me-2"></i>Add to Download Queue <i class="bi bi-cloud-upload me-2"></i>Add to Download Queue
@@ -44,15 +52,19 @@
const loadSavedDownloadOptions = () => { const loadSavedDownloadOptions = () => {
const savedCategory = localStorage.getItem('downloadCategory'); const savedCategory = localStorage.getItem('downloadCategory');
const savedSymlink = localStorage.getItem('downloadSymlink'); const savedSymlink = localStorage.getItem('downloadSymlink');
const savedDownloadUncached = localStorage.getItem('downloadUncached');
document.getElementById('category').value = savedCategory || ''; document.getElementById('category').value = savedCategory || '';
document.getElementById('isSymlink').checked = savedSymlink === 'true' document.getElementById('isSymlink').checked = savedSymlink === 'true';
document.getElementById('downloadUncached').checked = savedDownloadUncached === 'true';
}; };
const saveCurrentDownloadOptions = () => { const saveCurrentDownloadOptions = () => {
const category = document.getElementById('category').value; const category = document.getElementById('category').value;
const isSymlink = document.getElementById('isSymlink').checked; const isSymlink = document.getElementById('isSymlink').checked;
const downloadUncached = document.getElementById('downloadUncached').checked;
localStorage.setItem('downloadCategory', category); localStorage.setItem('downloadCategory', category);
localStorage.setItem('downloadSymlink', isSymlink.toString()); localStorage.setItem('downloadSymlink', isSymlink.toString());
localStorage.setItem('downloadUncached', downloadUncached.toString());
}; };
// Load the last used download options from local storage // Load the last used download options from local storage
@@ -98,6 +110,7 @@
formData.append('arr', document.getElementById('category').value); formData.append('arr', document.getElementById('category').value);
formData.append('notSymlink', document.getElementById('isSymlink').checked); formData.append('notSymlink', document.getElementById('isSymlink').checked);
formData.append('downloadUncached', document.getElementById('downloadUncached').checked);
const response = await fetch('/internal/add', { const response = await fetch('/internal/add', {
method: 'POST', method: 'POST',
+1 -1
View File
@@ -31,7 +31,7 @@
<div class="mb-3"> <div class="mb-3">
<div class="form-check"> <div class="form-check">
<input class="form-check-input" type="checkbox" id="autoProcess" checked> <input class="form-check-input" type="checkbox" id="autoProcess">
<label class="form-check-label" for="autoProcess"> <label class="form-check-label" for="autoProcess">
Auto Process(this will delete and re-search broken media) Auto Process(this will delete and re-search broken media)
</label> </label>