feat: restructure code; add size and ext checks (#39)

- Refractor code
- Add file size and extension checkers
- Change repair workflow to use zurg
This commit is contained in:
Mukhtar Akere
2025-02-04 11:07:19 +01:00
committed by GitHub
parent 8ca3cb32f3
commit 16c825d5ba
38 changed files with 1138 additions and 769 deletions
+11 -9
View File
@@ -3,7 +3,8 @@ package arr
import (
"bytes"
"encoding/json"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"net/http"
"strings"
"sync"
@@ -20,14 +21,15 @@ const (
)
var (
client *common.RLHTTPClient = common.NewRLHTTPClient(nil, nil)
client *request.RLHTTPClient = request.NewRLHTTPClient(nil, nil)
)
type Arr struct {
Name string `json:"name"`
Host string `json:"host"`
Token string `json:"token"`
Type Type `json:"type"`
Name string `json:"name"`
Host string `json:"host"`
Token string `json:"token"`
Type Type `json:"type"`
verifiedDirs sync.Map // map[string]struct{} -> dir -> struct{}
}
func NewArr(name, host, token string, arrType Type) *Arr {
@@ -43,7 +45,7 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo
if a.Token == "" || a.Host == "" {
return nil, nil
}
url, err := common.JoinURL(a.Host, endpoint)
url, err := request.JoinURL(a.Host, endpoint)
if err != nil {
return nil, err
}
@@ -84,9 +86,9 @@ func inferType(host, name string) Type {
}
}
func NewStorage(cfg []common.ArrConfig) *Storage {
func NewStorage() *Storage {
arrs := make(map[string]*Arr)
for _, a := range cfg {
for _, a := range config.GetConfig().Arrs {
name := a.Name
arrs[name] = NewArr(name, a.Host, a.Token, inferType(a.Host, name))
}
+92 -12
View File
@@ -3,7 +3,9 @@ package arr
import (
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
)
func (a *Arr) GetMedia(tvId string) ([]Content, error) {
@@ -14,7 +16,7 @@ func (a *Arr) GetMedia(tvId string) ([]Content, error) {
}
if resp.StatusCode == http.StatusNotFound {
// This is Radarr
repairLogger.Info().Msg("Radarr detected")
log.Println("Radarr detected")
a.Type = Radarr
return GetMovies(a, tvId)
}
@@ -44,11 +46,34 @@ func (a *Arr) GetMedia(tvId string) ([]Content, error) {
Title: d.Title,
Id: d.Id,
}
files := make([]contentFile, 0)
type episode struct {
Id int `json:"id"`
EpisodeFileID int `json:"episodeFileId"`
}
resp, err = a.Request(http.MethodGet, fmt.Sprintf("api/v3/episode?seriesId=%d", d.Id), nil)
if err != nil {
continue
}
defer resp.Body.Close()
var episodes []episode
if err = json.NewDecoder(resp.Body).Decode(&episodes); err != nil {
continue
}
episodeFileIDMap := make(map[int]int)
for _, e := range episodes {
episodeFileIDMap[e.EpisodeFileID] = e.Id
}
files := make([]ContentFile, 0)
for _, file := range seriesFiles {
files = append(files, contentFile{
Id: file.Id,
Path: file.Path,
eId, ok := episodeFileIDMap[file.Id]
if !ok {
eId = 0
}
files = append(files, ContentFile{
FileId: file.Id,
Path: file.Path,
Id: eId,
})
}
ct.Files = files
@@ -73,10 +98,11 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) {
Title: movie.Title,
Id: movie.Id,
}
files := make([]contentFile, 0)
files = append(files, contentFile{
Id: movie.MovieFile.Id,
Path: movie.MovieFile.Path,
files := make([]ContentFile, 0)
files = append(files, ContentFile{
FileId: movie.MovieFile.Id,
Id: movie.Id,
Path: movie.MovieFile.Path,
})
ct.Files = files
contents = append(contents, ct)
@@ -84,15 +110,69 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) {
return contents, nil
}
func (a *Arr) DeleteFile(id int) error {
func (a *Arr) SearchMissing(files []ContentFile) error {
var payload interface{}
ids := make([]int, 0)
for _, f := range files {
ids = append(ids, f.Id)
}
switch a.Type {
case Sonarr:
_, err := a.Request(http.MethodDelete, fmt.Sprintf("api/v3/episodefile/%d", id), nil)
payload = struct {
Name string `json:"name"`
EpisodeIds []int `json:"episodeIds"`
}{
Name: "EpisodeSearch",
EpisodeIds: ids,
}
case Radarr:
payload = struct {
Name string `json:"name"`
MovieIds []int `json:"movieIds"`
}{
Name: "MoviesSearch",
MovieIds: ids,
}
default:
return fmt.Errorf("unknown arr type: %s", a.Type)
}
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
if err != nil {
return fmt.Errorf("failed to search missing: %v", err)
}
if statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'; !statusOk {
return fmt.Errorf("failed to search missing. Status Code: %s", resp.Status)
}
return nil
}
func (a *Arr) DeleteFiles(files []ContentFile) error {
ids := make([]int, 0)
for _, f := range files {
ids = append(ids, f.FileId)
}
var payload interface{}
switch a.Type {
case Sonarr:
payload = struct {
EpisodeFileIds []int `json:"episodeFileIds"`
}{
EpisodeFileIds: ids,
}
_, err := a.Request(http.MethodDelete, "api/v3/episodefile/bulk", payload)
if err != nil {
return err
}
case Radarr:
_, err := a.Request(http.MethodDelete, fmt.Sprintf("api/v3/moviefile/%d", id), nil)
payload = struct {
MovieFileIds []int `json:"movieFileIds"`
}{
MovieFileIds: ids,
}
_, err := a.Request(http.MethodDelete, "api/v3/moviefile/bulk", payload)
if err != nil {
return err
}
+2 -2
View File
@@ -3,7 +3,7 @@ package arr
import (
"cmp"
"fmt"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"net/http"
"strconv"
"strings"
@@ -36,7 +36,7 @@ func (a *Arr) MarkAsFailed(infoHash string) error {
}
}
if torrentId != 0 {
url, err := common.JoinURL(a.Host, "history/failed/", strconv.Itoa(torrentId))
url, err := request.JoinURL(a.Host, "history/failed/", strconv.Itoa(torrentId))
if err != nil {
return err
}
-342
View File
@@ -1,342 +0,0 @@
package arr
import (
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"io"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"sync"
)
var repairLogger *zerolog.Logger
func getLogger() *zerolog.Logger {
if repairLogger == nil {
logger := common.NewLogger("repair", common.CONFIG.LogLevel, os.Stdout)
repairLogger = &logger
}
return repairLogger
}
func (a *Arr) SearchMissing(id int) {
var payload interface{}
switch a.Type {
case Sonarr:
payload = struct {
Name string `json:"name"`
SeriesId int `json:"seriesId"`
}{
Name: "SeriesSearch",
SeriesId: id,
}
case Radarr:
payload = struct {
Name string `json:"name"`
MovieId []int `json:"movieIds"`
}{
Name: "MoviesSearch",
MovieId: []int{id},
}
default:
getLogger().Info().Msgf("Unknown arr type: %s", a.Type)
return
}
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
if err != nil {
getLogger().Info().Msgf("Failed to search missing: %v", err)
return
}
if statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'; !statusOk {
getLogger().Info().Msgf("Failed to search missing: %s", resp.Status)
return
}
}
func (a *Arr) Repair(tmdbId string) error {
getLogger().Info().Msgf("Starting repair for %s", a.Name)
media, err := a.GetMedia(tmdbId)
if err != nil {
getLogger().Info().Msgf("Failed to get %s media: %v", a.Type, err)
return err
}
getLogger().Info().Msgf("Found %d %s media", len(media), a.Type)
brokenMedia := a.processMedia(media)
getLogger().Info().Msgf("Found %d %s broken media files", len(brokenMedia), a.Type)
// Automatic search for missing files
getLogger().Info().Msgf("Repair completed for %s", a.Name)
return nil
}
func (a *Arr) processMedia(media []Content) []Content {
if len(media) <= 1 {
var brokenMedia []Content
for _, m := range media {
// Check if media is accessible
if !a.isMediaAccessible(m) {
getLogger().Debug().Msgf("Skipping media check for %s - parent directory not accessible", m.Title)
continue
}
if a.checkMediaFiles(m) {
a.SearchMissing(m.Id)
brokenMedia = append(brokenMedia, m)
}
}
return brokenMedia
}
workerCount := runtime.NumCPU() * 4
if len(media) < workerCount {
workerCount = len(media)
}
jobs := make(chan Content)
results := make(chan Content)
var brokenMedia []Content
var wg sync.WaitGroup
for i := 0; i < workerCount; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for m := range jobs {
// Check if media is accessible
// First check if we can access this media's directory
if !a.isMediaAccessible(m) {
getLogger().Debug().Msgf("Skipping media check for %s - parent directory not accessible", m.Title)
continue
}
if a.checkMediaFilesParallel(m) {
a.SearchMissing(m.Id)
results <- m
}
}
}()
}
go func() {
for _, m := range media {
jobs <- m
}
close(jobs)
}()
go func() {
wg.Wait()
close(results)
}()
for m := range results {
brokenMedia = append(brokenMedia, m)
}
return brokenMedia
}
func (a *Arr) checkMediaFilesParallel(m Content) bool {
if len(m.Files) <= 1 {
return a.checkMediaFiles(m)
}
fileWorkers := runtime.NumCPU() * 2
if len(m.Files) < fileWorkers {
fileWorkers = len(m.Files)
}
fileJobs := make(chan contentFile)
brokenFiles := make(chan bool, len(m.Files))
var fileWg sync.WaitGroup
for i := 0; i < fileWorkers; i++ {
fileWg.Add(1)
go func() {
defer fileWg.Done()
for f := range fileJobs {
getLogger().Debug().Msgf("Checking file: %s", f.Path)
isBroken := false
if fileIsSymlinked(f.Path) {
getLogger().Debug().Msgf("File is symlinked: %s", f.Path)
if !fileIsCorrectSymlink(f.Path) {
getLogger().Debug().Msgf("File is broken: %s", f.Path)
isBroken = true
if err := a.DeleteFile(f.Id); err != nil {
getLogger().Info().Msgf("Failed to delete file: %s %d: %v", f.Path, f.Id, err)
}
}
} else {
getLogger().Debug().Msgf("File is not symlinked: %s", f.Path)
if !fileIsReadable(f.Path) {
getLogger().Debug().Msgf("File is broken: %s", f.Path)
isBroken = true
if err := a.DeleteFile(f.Id); err != nil {
getLogger().Info().Msgf("Failed to delete file: %s %d: %v", f.Path, f.Id, err)
}
}
}
brokenFiles <- isBroken
}
}()
}
go func() {
for _, f := range m.Files {
fileJobs <- f
}
close(fileJobs)
}()
go func() {
fileWg.Wait()
close(brokenFiles)
}()
isBroken := false
for broken := range brokenFiles {
if broken {
isBroken = true
}
}
return isBroken
}
func (a *Arr) checkMediaFiles(m Content) bool {
isBroken := false
for _, f := range m.Files {
if fileIsSymlinked(f.Path) {
if !fileIsCorrectSymlink(f.Path) {
isBroken = true
if err := a.DeleteFile(f.Id); err != nil {
getLogger().Info().Msgf("Failed to delete file: %s %d: %v", f.Path, f.Id, err)
}
}
} else {
if !fileIsReadable(f.Path) {
isBroken = true
if err := a.DeleteFile(f.Id); err != nil {
getLogger().Info().Msgf("Failed to delete file: %s %d: %v", f.Path, f.Id, err)
}
}
}
}
return isBroken
}
func (a *Arr) isMediaAccessible(m Content) bool {
// We're likely to mount the debrid path.
// So instead of checking the arr path, we check the original path
// This is because the arr path is likely to be a symlink
// And we want to check the actual path where the media is stored
// This is to avoid false positives
if len(m.Files) == 0 {
return false
}
// Get the first file to check its target location
file := m.Files[0].Path
var targetPath string
fileInfo, err := os.Lstat(file)
if err != nil {
repairLogger.Debug().Msgf("Cannot stat file %s: %v", file, err)
return false
}
if fileInfo.Mode()&os.ModeSymlink != 0 {
// If it's a symlink, get where it points to
target, err := os.Readlink(file)
if err != nil {
repairLogger.Debug().Msgf("Cannot read symlink %s: %v", file, err)
return false
}
// If the symlink target is relative, make it absolute
if !filepath.IsAbs(target) {
dir := filepath.Dir(file)
target = filepath.Join(dir, target)
}
targetPath = target
} else {
// If it's a regular file, use its path
targetPath = file
}
mediaDir := filepath.Dir(targetPath) // Gets /remote/storage/Movie
parentDir := filepath.Dir(mediaDir) // Gets /remote/storage
_, err = os.Stat(parentDir)
if err != nil {
repairLogger.Debug().Msgf("Parent directory of target not accessible for media %s: %s", m.Title, parentDir)
return false
}
return true
}
func fileIsSymlinked(file string) bool {
info, err := os.Lstat(file)
if err != nil {
return false
}
return info.Mode()&os.ModeSymlink != 0
}
func fileIsCorrectSymlink(file string) bool {
target, err := os.Readlink(file)
if err != nil {
return false
}
if !filepath.IsAbs(target) {
dir := filepath.Dir(file)
target = filepath.Join(dir, target)
}
return fileIsReadable(target)
}
func fileIsReadable(filePath string) bool {
// First check if file exists and is accessible
info, err := os.Stat(filePath)
if err != nil {
return false
}
// Check if it's a regular file
if !info.Mode().IsRegular() {
return false
}
// Try to read the first 1024 bytes
err = checkFileStart(filePath)
if err != nil {
return false
}
return true
}
func checkFileStart(filePath string) error {
f, err := os.Open(filePath)
if err != nil {
return err
}
defer f.Close()
buffer := make([]byte, 1024)
_, err = io.ReadAtLeast(f, buffer, 1024)
if err != nil && err != io.EOF {
return err
}
return nil
}
+9 -5
View File
@@ -14,16 +14,20 @@ type Movie struct {
Id int `json:"id"`
}
type contentFile struct {
Name string `json:"name"`
Path string `json:"path"`
Id int `json:"id"`
type ContentFile struct {
Name string `json:"name"`
Path string `json:"path"`
Id int `json:"id"`
FileId int `json:"fileId"`
TargetPath string `json:"targetPath"`
IsSymlink bool `json:"isSymlink"`
IsBroken bool `json:"isBroken"`
}
type Content struct {
Title string `json:"title"`
Id int `json:"id"`
Files []contentFile `json:"files"`
Files []ContentFile `json:"files"`
}
type seriesFile struct {
+14 -6
View File
@@ -5,6 +5,9 @@ import (
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/structs"
"net/http"
gourl "net/url"
@@ -85,6 +88,8 @@ func getAlldebridStatus(statusCode int) string {
func flattenFiles(files []structs.AllDebridMagnetFile, parentPath string, index *int) []TorrentFile {
result := make([]TorrentFile, 0)
cfg := config.GetConfig()
for _, f := range files {
currentPath := f.Name
if parentPath != "" {
@@ -100,7 +105,11 @@ func flattenFiles(files []structs.AllDebridMagnetFile, parentPath string, index
if common.RegexMatch(common.SAMPLEMATCH, fileName) {
continue
}
if !common.RegexMatch(common.VIDEOMATCH, fileName) && !common.RegexMatch(common.MUSICMATCH, fileName) {
if !cfg.IsAllowedFile(fileName) {
continue
}
if !cfg.IsSizeAllowed(f.Size) {
continue
}
@@ -240,13 +249,12 @@ func (r *AllDebrid) GetCheckCached() bool {
return r.CheckCached
}
func NewAllDebrid(dc common.DebridConfig, cache *common.Cache) *AllDebrid {
rl := common.ParseRateLimit(dc.RateLimit)
func NewAllDebrid(dc config.Debrid, cache *common.Cache) *AllDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
client := common.NewRLHTTPClient(rl, headers)
logger := common.NewLogger(dc.Name, common.CONFIG.LogLevel, os.Stdout)
client := request.NewRLHTTPClient(rl, headers)
return &AllDebrid{
BaseDebrid: BaseDebrid{
Name: "alldebrid",
@@ -256,7 +264,7 @@ func NewAllDebrid(dc common.DebridConfig, cache *common.Cache) *AllDebrid {
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger,
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
CheckCached: dc.CheckCached,
},
}
+15 -9
View File
@@ -1,10 +1,14 @@
package debrid
import (
"cmp"
"fmt"
"github.com/anacrolix/torrent/metainfo"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"path/filepath"
)
@@ -14,7 +18,7 @@ type BaseDebrid struct {
Host string `json:"host"`
APIKey string
DownloadUncached bool
client *common.RLHTTPClient
client *request.RLHTTPClient
cache *common.Cache
MountPath string
logger zerolog.Logger
@@ -34,12 +38,14 @@ type Service interface {
GetLogger() zerolog.Logger
}
func NewDebrid(debs []common.DebridConfig, maxCachedSize int) *DebridService {
func NewDebrid() *DebridService {
cfg := config.GetConfig()
maxCachedSize := cmp.Or(cfg.MaxCacheSize, 1000)
debrids := make([]Service, 0)
// Divide the cache size by the number of debrids
maxCacheSize := maxCachedSize / len(debs)
maxCacheSize := maxCachedSize / len(cfg.Debrids)
for _, dc := range debs {
for _, dc := range cfg.Debrids {
d := createDebrid(dc, common.NewCache(maxCacheSize))
logger := d.GetLogger()
logger.Info().Msg("Debrid Service started")
@@ -49,7 +55,7 @@ func NewDebrid(debs []common.DebridConfig, maxCachedSize int) *DebridService {
return d
}
func createDebrid(dc common.DebridConfig, cache *common.Cache) Service {
func createDebrid(dc config.Debrid, cache *common.Cache) Service {
switch dc.Name {
case "realdebrid":
return NewRealDebrid(dc, cache)
@@ -75,8 +81,8 @@ func GetTorrentInfo(filePath string) (*Torrent, error) {
}
func torrentFromMagnetFile(filePath string) (*Torrent, error) {
magnetLink := common.OpenMagnetFile(filePath)
magnet, err := common.GetMagnetInfo(magnetLink)
magnetLink := utils.OpenMagnetFile(filePath)
magnet, err := utils.GetMagnetInfo(magnetLink)
if err != nil {
return nil, err
}
@@ -102,7 +108,7 @@ func getTorrentInfo(filePath string) (*Torrent, error) {
return nil, err
}
infoLength := info.Length
magnet := &common.Magnet{
magnet := &utils.Magnet{
InfoHash: infoHash,
Name: info.Name,
Size: infoLength,
@@ -145,7 +151,7 @@ func GetLocalCache(infohashes []string, cache *common.Cache) ([]string, map[stri
return infohashes, result
}
func ProcessTorrent(d *DebridService, magnet *common.Magnet, a *arr.Arr, isSymlink bool) (*Torrent, error) {
func ProcessTorrent(d *DebridService, magnet *utils.Magnet, a *arr.Arr, isSymlink bool) (*Torrent, error) {
debridTorrent := &Torrent{
InfoHash: magnet.InfoHash,
Magnet: magnet,
+11 -5
View File
@@ -6,6 +6,9 @@ import (
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/structs"
"log"
"net/http"
@@ -129,7 +132,11 @@ func (r *DebridLink) GetTorrent(id string) (*Torrent, error) {
torrent.Filename = name
torrent.OriginalFilename = name
files := make([]TorrentFile, len(data.Files))
cfg := config.GetConfig()
for i, f := range data.Files {
if !cfg.IsSizeAllowed(f.Size) {
continue
}
files[i] = TorrentFile{
Id: f.ID,
Name: f.Name,
@@ -250,14 +257,13 @@ func (r *DebridLink) GetCheckCached() bool {
return r.CheckCached
}
func NewDebridLink(dc common.DebridConfig, cache *common.Cache) *DebridLink {
rl := common.ParseRateLimit(dc.RateLimit)
func NewDebridLink(dc config.Debrid, cache *common.Cache) *DebridLink {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
}
client := common.NewRLHTTPClient(rl, headers)
logger := common.NewLogger(dc.Name, common.CONFIG.LogLevel, os.Stdout)
client := request.NewRLHTTPClient(rl, headers)
return &DebridLink{
BaseDebrid: BaseDebrid{
Name: "debridlink",
@@ -267,7 +273,7 @@ func NewDebridLink(dc common.DebridConfig, cache *common.Cache) *DebridLink {
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger,
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
CheckCached: dc.CheckCached,
},
}
+14 -7
View File
@@ -5,6 +5,10 @@ import (
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/structs"
"net/http"
gourl "net/url"
@@ -33,13 +37,17 @@ func (r *RealDebrid) GetLogger() zerolog.Logger {
func GetTorrentFiles(data structs.RealDebridTorrentInfo) []TorrentFile {
files := make([]TorrentFile, 0)
cfg := config.GetConfig()
for _, f := range data.Files {
name := filepath.Base(f.Path)
if common.RegexMatch(common.SAMPLEMATCH, name) {
if utils.RegexMatch(utils.SAMPLEMATCH, name) {
// Skip sample files
continue
}
if !common.RegexMatch(common.VIDEOMATCH, name) && !common.RegexMatch(common.MUSICMATCH, name) {
if !cfg.IsAllowedFile(name) {
continue
}
if !cfg.IsSizeAllowed(f.Bytes) {
continue
}
fileId := f.ID
@@ -273,13 +281,12 @@ func (r *RealDebrid) GetCheckCached() bool {
return r.CheckCached
}
func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
rl := common.ParseRateLimit(dc.RateLimit)
func NewRealDebrid(dc config.Debrid, cache *common.Cache) *RealDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
client := common.NewRLHTTPClient(rl, headers)
logger := common.NewLogger(dc.Name, common.CONFIG.LogLevel, os.Stdout)
client := request.NewRLHTTPClient(rl, headers)
return &RealDebrid{
BaseDebrid: BaseDebrid{
Name: "realdebrid",
@@ -289,7 +296,7 @@ func NewRealDebrid(dc common.DebridConfig, cache *common.Cache) *RealDebrid {
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger,
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
CheckCached: dc.CheckCached,
},
}
+13 -6
View File
@@ -6,6 +6,9 @@ import (
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/structs"
"log"
"mime/multipart"
@@ -166,13 +169,18 @@ func (r *Torbox) GetTorrent(id string) (*Torrent, error) {
torrent.Filename = name
torrent.OriginalFilename = name
files := make([]TorrentFile, 0)
cfg := config.GetConfig()
for _, f := range data.Files {
fileName := filepath.Base(f.Name)
if common.RegexMatch(common.SAMPLEMATCH, fileName) {
// Skip sample files
continue
}
if !common.RegexMatch(common.VIDEOMATCH, fileName) && !common.RegexMatch(common.MUSICMATCH, fileName) {
if !cfg.IsAllowedFile(fileName) {
continue
}
if !cfg.IsSizeAllowed(f.Size) {
continue
}
file := TorrentFile{
@@ -283,13 +291,12 @@ func (r *Torbox) GetCheckCached() bool {
return r.CheckCached
}
func NewTorbox(dc common.DebridConfig, cache *common.Cache) *Torbox {
rl := common.ParseRateLimit(dc.RateLimit)
func NewTorbox(dc config.Debrid, cache *common.Cache) *Torbox {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
client := common.NewRLHTTPClient(rl, headers)
logger := common.NewLogger(dc.Name, common.CONFIG.LogLevel, os.Stdout)
client := request.NewRLHTTPClient(rl, headers)
return &Torbox{
BaseDebrid: BaseDebrid{
Name: "torbox",
@@ -299,7 +306,7 @@ func NewTorbox(dc common.DebridConfig, cache *common.Cache) *Torbox {
client: client,
cache: cache,
MountPath: dc.Folder,
logger: logger,
logger: logger.NewLogger(dc.Name, config.GetConfig().LogLevel, os.Stdout),
CheckCached: dc.CheckCached,
},
}
+4 -2
View File
@@ -3,6 +3,7 @@ package debrid
import (
"fmt"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"os"
"path/filepath"
@@ -36,7 +37,7 @@ type Torrent struct {
OriginalFilename string `json:"original_filename"`
Size int64 `json:"size"`
Bytes int64 `json:"bytes"` // Size of only the files that are downloaded
Magnet *common.Magnet `json:"magnet"`
Magnet *utils.Magnet `json:"magnet"`
Files []TorrentFile `json:"files"`
Status string `json:"status"`
Added string `json:"added"`
@@ -70,7 +71,8 @@ func (t *Torrent) GetMountFolder(rClonePath string) (string, error) {
}
for _, path := range possiblePaths {
if common.FileReady(filepath.Join(rClonePath, path)) {
_, err := os.Stat(filepath.Join(rClonePath, path))
if !os.IsNotExist(err) {
return path, nil
}
}
+11 -9
View File
@@ -10,7 +10,9 @@ import (
"github.com/elazarl/goproxy"
"github.com/elazarl/goproxy/ext/auth"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/valyala/fastjson"
"io"
@@ -78,8 +80,8 @@ type Proxy struct {
logger zerolog.Logger
}
func NewProxy(config common.Config, deb *debrid.DebridService) *Proxy {
cfg := config.Proxy
func NewProxy(deb *debrid.DebridService) *Proxy {
cfg := config.GetConfig().Proxy
port := cmp.Or(os.Getenv("PORT"), cfg.Port, "8181")
return &Proxy{
port: port,
@@ -88,7 +90,7 @@ func NewProxy(config common.Config, deb *debrid.DebridService) *Proxy {
password: cfg.Password,
cachedOnly: *cfg.CachedOnly,
debrid: deb.Get(),
logger: common.NewLogger("Proxy", cfg.LogLevel, os.Stdout),
logger: logger.NewLogger("Proxy", cfg.LogLevel, os.Stdout),
}
}
@@ -182,7 +184,7 @@ func (item Item) getHash() string {
}
if strings.Contains(item.GUID, "magnet:?") {
magnet, err := common.GetMagnetInfo(item.GUID)
magnet, err := utils.GetMagnetInfo(item.GUID)
if err == nil && magnet != nil && magnet.InfoHash != "" {
return magnet.InfoHash
}
@@ -196,22 +198,22 @@ func (item Item) getHash() string {
}
if strings.Contains(magnetLink, "magnet:?") {
magnet, err := common.GetMagnetInfo(magnetLink)
magnet, err := utils.GetMagnetInfo(magnetLink)
if err == nil && magnet != nil && magnet.InfoHash != "" {
return magnet.InfoHash
}
}
//Check Description for infohash
hash := common.ExtractInfoHash(item.Description)
hash := utils.ExtractInfoHash(item.Description)
if hash == "" {
// Check Title for infohash
hash = common.ExtractInfoHash(item.Comments)
hash = utils.ExtractInfoHash(item.Comments)
}
infohash = hash
if infohash == "" {
if strings.Contains(magnetLink, "http") {
h, _ := common.GetInfohashFromURL(magnetLink)
h, _ := utils.GetInfohashFromURL(magnetLink)
if h != "" {
infohash = h
}
+3 -3
View File
@@ -3,14 +3,14 @@ package qbit
import (
"context"
"fmt"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/qbit/server"
"github.com/sirrobot01/debrid-blackhole/pkg/repair"
)
func Start(ctx context.Context, config *common.Config, deb *debrid.DebridService, arrs *arr.Storage) error {
srv := server.NewServer(config, deb, arrs)
func Start(ctx context.Context, deb *debrid.DebridService, arrs *arr.Storage, _repair *repair.Repair) error {
srv := server.NewServer(deb, arrs, _repair)
if err := srv.Start(ctx); err != nil {
return fmt.Errorf("failed to start qbit server: %w", err)
}
+2 -2
View File
@@ -2,10 +2,10 @@ package server
import (
"fmt"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"time"
"github.com/google/uuid"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/qbit/shared"
@@ -66,7 +66,7 @@ func (i *ImportRequest) Complete() {
func (i *ImportRequest) Process(q *shared.QBit) (err error) {
// Use this for now.
// This sends the torrent to the arr
magnet, err := common.GetMagnetFromUrl(i.URI)
magnet, err := utils.GetMagnetFromUrl(i.URI)
if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err)
}
+37 -37
View File
@@ -5,7 +5,7 @@ import (
"encoding/base64"
"github.com/go-chi/chi/v5"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/qbit/shared"
"net/http"
@@ -13,7 +13,7 @@ import (
"strings"
)
type qbitHandler struct {
type QbitHandler struct {
qbit *shared.QBit
logger zerolog.Logger
debug bool
@@ -40,7 +40,7 @@ func decodeAuthHeader(header string) (string, string, error) {
return host, token, nil
}
func (q *qbitHandler) CategoryContext(next http.Handler) http.Handler {
func (q *QbitHandler) CategoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" {
@@ -59,7 +59,7 @@ func (q *qbitHandler) CategoryContext(next http.Handler) http.Handler {
})
}
func (q *qbitHandler) authContext(next http.Handler) http.Handler {
func (q *QbitHandler) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
category := r.Context().Value("category").(string)
@@ -96,29 +96,29 @@ func HashesCtx(next http.Handler) http.Handler {
})
}
func (q *qbitHandler) handleLogin(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleLogin(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("Ok."))
}
func (q *qbitHandler) handleVersion(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleVersion(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("v4.3.2"))
}
func (q *qbitHandler) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("2.7"))
}
func (q *qbitHandler) handlePreferences(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handlePreferences(w http.ResponseWriter, r *http.Request) {
preferences := shared.NewAppPreferences()
preferences.WebUiUsername = q.qbit.Username
preferences.SavePath = q.qbit.DownloadFolder
preferences.TempPath = filepath.Join(q.qbit.DownloadFolder, "temp")
common.JSONResponse(w, preferences, http.StatusOK)
request.JSONResponse(w, preferences, http.StatusOK)
}
func (q *qbitHandler) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
res := shared.BuildInfo{
Bitness: 64,
Boost: "1.75.0",
@@ -127,24 +127,24 @@ func (q *qbitHandler) handleBuildInfo(w http.ResponseWriter, r *http.Request) {
Qt: "5.15.2",
Zlib: "1.2.11",
}
common.JSONResponse(w, res, http.StatusOK)
request.JSONResponse(w, res, http.StatusOK)
}
func (q *qbitHandler) shutdown(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) shutdown(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (q *qbitHandler) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
//log all url params
ctx := r.Context()
category := ctx.Value("category").(string)
filter := strings.Trim(r.URL.Query().Get("filter"), "")
hashes, _ := ctx.Value("hashes").([]string)
torrents := q.qbit.Storage.GetAll(category, filter, hashes)
common.JSONResponse(w, torrents, http.StatusOK)
request.JSONResponse(w, torrents, http.StatusOK)
}
func (q *qbitHandler) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Parse form based on content type
@@ -209,7 +209,7 @@ func (q *qbitHandler) handleTorrentsAdd(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusOK)
}
func (q *qbitHandler) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
if len(hashes) == 0 {
@@ -223,7 +223,7 @@ func (q *qbitHandler) handleTorrentsDelete(w http.ResponseWriter, r *http.Reques
w.WriteHeader(http.StatusOK)
}
func (q *qbitHandler) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
for _, hash := range hashes {
@@ -237,7 +237,7 @@ func (q *qbitHandler) handleTorrentsPause(w http.ResponseWriter, r *http.Request
w.WriteHeader(http.StatusOK)
}
func (q *qbitHandler) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
for _, hash := range hashes {
@@ -251,7 +251,7 @@ func (q *qbitHandler) handleTorrentsResume(w http.ResponseWriter, r *http.Reques
w.WriteHeader(http.StatusOK)
}
func (q *qbitHandler) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes, _ := ctx.Value("hashes").([]string)
for _, hash := range hashes {
@@ -265,7 +265,7 @@ func (q *qbitHandler) handleTorrentRecheck(w http.ResponseWriter, r *http.Reques
w.WriteHeader(http.StatusOK)
}
func (q *qbitHandler) handleCategories(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleCategories(w http.ResponseWriter, r *http.Request) {
var categories = map[string]shared.TorrentCategory{}
for _, cat := range q.qbit.Categories {
path := filepath.Join(q.qbit.DownloadFolder, cat)
@@ -274,10 +274,10 @@ func (q *qbitHandler) handleCategories(w http.ResponseWriter, r *http.Request) {
SavePath: path,
}
}
common.JSONResponse(w, categories, http.StatusOK)
request.JSONResponse(w, categories, http.StatusOK)
}
func (q *qbitHandler) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
@@ -292,27 +292,27 @@ func (q *qbitHandler) handleCreateCategory(w http.ResponseWriter, r *http.Reques
q.qbit.Categories = append(q.qbit.Categories, name)
common.JSONResponse(w, nil, http.StatusOK)
request.JSONResponse(w, nil, http.StatusOK)
}
func (q *qbitHandler) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
hash := r.URL.Query().Get("hash")
torrent := q.qbit.Storage.Get(hash)
properties := q.qbit.GetTorrentProperties(torrent)
common.JSONResponse(w, properties, http.StatusOK)
request.JSONResponse(w, properties, http.StatusOK)
}
func (q *qbitHandler) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
hash := r.URL.Query().Get("hash")
torrent := q.qbit.Storage.Get(hash)
if torrent == nil {
return
}
files := q.qbit.GetTorrentFiles(torrent)
common.JSONResponse(w, files, http.StatusOK)
request.JSONResponse(w, files, http.StatusOK)
}
func (q *qbitHandler) handleSetCategory(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleSetCategory(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
category := ctx.Value("category").(string)
hashes, _ := ctx.Value("hashes").([]string)
@@ -321,10 +321,10 @@ func (q *qbitHandler) handleSetCategory(w http.ResponseWriter, r *http.Request)
torrent.Category = category
q.qbit.Storage.AddOrUpdate(torrent)
}
common.JSONResponse(w, nil, http.StatusOK)
request.JSONResponse(w, nil, http.StatusOK)
}
func (q *qbitHandler) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
@@ -340,10 +340,10 @@ func (q *qbitHandler) handleAddTorrentTags(w http.ResponseWriter, r *http.Reques
for _, t := range torrents {
q.qbit.SetTorrentTags(t, tags)
}
common.JSONResponse(w, nil, http.StatusOK)
request.JSONResponse(w, nil, http.StatusOK)
}
func (q *qbitHandler) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
@@ -360,14 +360,14 @@ func (q *qbitHandler) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Req
q.qbit.RemoveTorrentTags(torrent, tags)
}
common.JSONResponse(w, nil, http.StatusOK)
request.JSONResponse(w, nil, http.StatusOK)
}
func (q *qbitHandler) handleGetTags(w http.ResponseWriter, r *http.Request) {
common.JSONResponse(w, q.qbit.Tags, http.StatusOK)
func (q *QbitHandler) handleGetTags(w http.ResponseWriter, r *http.Request) {
request.JSONResponse(w, q.qbit.Tags, http.StatusOK)
}
func (q *qbitHandler) handleCreateTags(w http.ResponseWriter, r *http.Request) {
func (q *QbitHandler) handleCreateTags(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Failed to parse form data", http.StatusBadRequest)
@@ -378,5 +378,5 @@ func (q *qbitHandler) handleCreateTags(w http.ResponseWriter, r *http.Request) {
tags[i] = strings.TrimSpace(tag)
}
q.qbit.AddTags(tags)
common.JSONResponse(w, nil, http.StatusOK)
request.JSONResponse(w, nil, http.StatusOK)
}
+1 -1
View File
@@ -5,7 +5,7 @@ import (
"net/http"
)
func (q *qbitHandler) Routes(r chi.Router) http.Handler {
func (q *QbitHandler) Routes(r chi.Router) http.Handler {
r.Route("/api/v2", func(r chi.Router) {
//if q.debug {
// r.Use(middleware.Logger)
+22 -9
View File
@@ -7,10 +7,12 @@ import (
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/qbit/shared"
"github.com/sirrobot01/debrid-blackhole/pkg/repair"
"io"
"net/http"
"os"
@@ -23,12 +25,13 @@ type Server struct {
logger zerolog.Logger
}
func NewServer(config *common.Config, deb *debrid.DebridService, arrs *arr.Storage) *Server {
logger := common.NewLogger("QBit", config.QBitTorrent.LogLevel, os.Stdout)
q := shared.NewQBit(config, deb, logger, arrs)
func NewServer(deb *debrid.DebridService, arrs *arr.Storage, _repair *repair.Repair) *Server {
cfg := config.GetConfig()
l := logger.NewLogger("QBit", cfg.QBitTorrent.LogLevel, os.Stdout)
q := shared.NewQBit(deb, l, arrs, _repair)
return &Server{
qbit: q,
logger: logger,
logger: l,
}
}
@@ -38,8 +41,12 @@ func (s *Server) Start(ctx context.Context) error {
r.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
logLevel := s.logger.GetLevel().String()
debug := logLevel == "debug"
q := qbitHandler{qbit: s.qbit, logger: s.logger, debug: debug}
ui := uiHandler{qbit: s.qbit, logger: common.NewLogger("UI", s.logger.GetLevel().String(), os.Stdout), debug: debug}
q := QbitHandler{qbit: s.qbit, logger: s.logger, debug: debug}
ui := UIHandler{
qbit: s.qbit,
logger: logger.NewLogger("UI", s.logger.GetLevel().String(), os.Stdout),
debug: debug,
}
// Register routes
r.Get("/logs", s.GetLogs)
@@ -71,7 +78,7 @@ func (s *Server) Start(ctx context.Context) error {
}
func (s *Server) GetLogs(w http.ResponseWriter, r *http.Request) {
logFile := common.GetLogPath()
logFile := logger.GetLogPath()
// Open and read the file
file, err := os.Open(logFile)
@@ -79,7 +86,12 @@ func (s *Server) GetLogs(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Error reading log file", http.StatusInternalServerError)
return
}
defer file.Close()
defer func(file *os.File) {
err := file.Close()
if err != nil {
s.logger.Debug().Err(err).Msg("Error closing log file")
}
}(file)
// Set headers
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
@@ -91,6 +103,7 @@ func (s *Server) GetLogs(w http.ResponseWriter, r *http.Request) {
// Stream the file
_, err = io.Copy(w, file)
if err != nil {
s.logger.Debug().Err(err).Msg("Error streaming log file")
http.Error(w, "Error streaming log file", http.StatusInternalServerError)
return
}
+48
View File
@@ -30,6 +30,45 @@
Open Magnet Links in DecyphArr
</div>
</div>
<div class="col-12 mt-3">
<div class="form-group">
<label for="allowedExtensions">Allowed File Extensions</label>
<div class="input-group">
<textarea type="text"
class="form-control"
id="allowedExtensions"
name="allowed_file_types"
disabled
placeholder="mkv, mp4, avi, etc.">
</textarea>
</div>
</div>
</div>
<div class="col-md-6 mt-3">
<div class="form-group">
<label for="minFileSize">Minimum File Size</label>
<input type="text"
class="form-control"
id="minFileSize"
name="min_file_size"
disabled
placeholder="e.g., 10MB, 1GB">
<small class="form-text text-muted">Minimum file size to download (0 for no limit)</small>
</div>
</div>
<div class="col-md-6 mt-3">
<div class="form-group">
<label for="maxFileSize">Maximum File Size</label>
<input type="text"
class="form-control"
id="maxFileSize"
name="max_file_size"
disabled
placeholder="e.g., 50GB, 100MB">
<small class="form-text text-muted">Maximum file size to download (0 for no limit)</small>
</div>
</div>
</div>
</div>
<!-- Debrid Configuration -->
@@ -216,6 +255,15 @@
const logLevel = document.getElementById('log-level');
logLevel.value = config.log_level;
if (config.allowed_file_types && Array.isArray(config.allowed_file_types)) {
document.querySelector('[name="allowed_file_types"]').value = config.allowed_file_types.join(', ');
}
if (config.min_file_size) {
document.querySelector('[name="min_file_size"]').value = config.min_file_size;
}
if (config.max_file_size) {
document.querySelector('[name="max_file_size"]').value = config.max_file_size;
}
});
+37 -51
View File
@@ -3,15 +3,16 @@ package server
import (
"embed"
"encoding/json"
"errors"
"fmt"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"html/template"
"net/http"
"strings"
"github.com/go-chi/chi/v5"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/qbit/shared"
@@ -49,7 +50,7 @@ type RepairRequest struct {
//go:embed templates/*
var content embed.FS
type uiHandler struct {
type UIHandler struct {
qbit *shared.QBit
logger zerolog.Logger
debug bool
@@ -68,7 +69,7 @@ func init() {
))
}
func (u *uiHandler) IndexHandler(w http.ResponseWriter, r *http.Request) {
func (u *UIHandler) IndexHandler(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{
"Page": "index",
"Title": "Torrents",
@@ -79,7 +80,7 @@ func (u *uiHandler) IndexHandler(w http.ResponseWriter, r *http.Request) {
}
}
func (u *uiHandler) DownloadHandler(w http.ResponseWriter, r *http.Request) {
func (u *UIHandler) DownloadHandler(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{
"Page": "download",
"Title": "Download",
@@ -90,7 +91,7 @@ func (u *uiHandler) DownloadHandler(w http.ResponseWriter, r *http.Request) {
}
}
func (u *uiHandler) RepairHandler(w http.ResponseWriter, r *http.Request) {
func (u *UIHandler) RepairHandler(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{
"Page": "repair",
"Title": "Repair",
@@ -101,7 +102,7 @@ func (u *uiHandler) RepairHandler(w http.ResponseWriter, r *http.Request) {
}
}
func (u *uiHandler) ConfigHandler(w http.ResponseWriter, r *http.Request) {
func (u *UIHandler) ConfigHandler(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{
"Page": "config",
"Title": "Config",
@@ -112,11 +113,11 @@ func (u *uiHandler) ConfigHandler(w http.ResponseWriter, r *http.Request) {
}
}
func (u *uiHandler) handleGetArrs(w http.ResponseWriter, r *http.Request) {
common.JSONResponse(w, u.qbit.Arrs.GetAll(), http.StatusOK)
func (u *UIHandler) handleGetArrs(w http.ResponseWriter, r *http.Request) {
request.JSONResponse(w, u.qbit.Arrs.GetAll(), http.StatusOK)
}
func (u *uiHandler) handleAddContent(w http.ResponseWriter, r *http.Request) {
func (u *UIHandler) handleAddContent(w http.ResponseWriter, r *http.Request) {
if err := r.ParseMultipartForm(32 << 20); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
@@ -162,7 +163,7 @@ func (u *uiHandler) handleAddContent(w http.ResponseWriter, r *http.Request) {
continue
}
magnet, err := common.GetMagnetFromFile(file, fileHeader.Filename)
magnet, err := utils.GetMagnetFromFile(file, fileHeader.Filename)
if err != nil {
errs = append(errs, fmt.Sprintf("Failed to parse torrent file %s: %v", fileHeader.Filename, err))
continue
@@ -178,7 +179,7 @@ func (u *uiHandler) handleAddContent(w http.ResponseWriter, r *http.Request) {
}
}
common.JSONResponse(w, struct {
request.JSONResponse(w, struct {
Results []*ImportRequest `json:"results"`
Errors []string `json:"errors,omitempty"`
}{
@@ -187,7 +188,7 @@ func (u *uiHandler) handleAddContent(w http.ResponseWriter, r *http.Request) {
}, http.StatusOK)
}
func (u *uiHandler) handleCheckCached(w http.ResponseWriter, r *http.Request) {
func (u *UIHandler) handleCheckCached(w http.ResponseWriter, r *http.Request) {
_hashes := r.URL.Query().Get("hash")
if _hashes == "" {
http.Error(w, "No hashes provided", http.StatusBadRequest)
@@ -216,10 +217,10 @@ func (u *uiHandler) handleCheckCached(w http.ResponseWriter, r *http.Request) {
_, exists := res[h]
result[h] = exists
}
common.JSONResponse(w, result, http.StatusOK)
request.JSONResponse(w, result, http.StatusOK)
}
func (u *uiHandler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
func (u *UIHandler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
var req RepairRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
@@ -232,50 +233,35 @@ func (u *uiHandler) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
return
}
mediaIds := req.MediaIds
if len(mediaIds) == 0 {
mediaIds = []string{""}
}
if req.Async {
for _, tvId := range mediaIds {
go func() {
err := _arr.Repair(tvId)
if err != nil {
u.logger.Info().Msgf("Failed to repair: %v", err)
}
}()
}
common.JSONResponse(w, "Repair process started", http.StatusOK)
go func() {
if err := u.qbit.Repair.Repair([]*arr.Arr{_arr}, req.MediaIds); err != nil {
u.logger.Error().Err(err).Msg("Failed to repair media")
}
}()
request.JSONResponse(w, "Repair process started", http.StatusOK)
return
}
var errs []error
for _, tvId := range mediaIds {
if err := _arr.Repair(tvId); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
combinedErr := errors.Join(errs...)
http.Error(w, fmt.Sprintf("Failed to repair: %v", combinedErr), http.StatusInternalServerError)
if err := u.qbit.Repair.Repair([]*arr.Arr{_arr}, req.MediaIds); err != nil {
http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError)
return
}
common.JSONResponse(w, "Repair completed", http.StatusOK)
request.JSONResponse(w, "Repair completed", http.StatusOK)
}
func (u *uiHandler) handleGetVersion(w http.ResponseWriter, r *http.Request) {
func (u *UIHandler) handleGetVersion(w http.ResponseWriter, r *http.Request) {
v := version.GetInfo()
common.JSONResponse(w, v, http.StatusOK)
request.JSONResponse(w, v, http.StatusOK)
}
func (u *uiHandler) handleGetTorrents(w http.ResponseWriter, r *http.Request) {
common.JSONResponse(w, u.qbit.Storage.GetAll("", "", nil), http.StatusOK)
func (u *UIHandler) handleGetTorrents(w http.ResponseWriter, r *http.Request) {
request.JSONResponse(w, u.qbit.Storage.GetAll("", "", nil), http.StatusOK)
}
func (u *uiHandler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) {
func (u *UIHandler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) {
hash := chi.URLParam(r, "hash")
if hash == "" {
http.Error(w, "No hash provided", http.StatusBadRequest)
@@ -285,12 +271,12 @@ func (u *uiHandler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusOK)
}
func (u *uiHandler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
config := common.CONFIG
arrCfgs := make([]common.ArrConfig, 0)
func (u *UIHandler) handleGetConfig(w http.ResponseWriter, r *http.Request) {
cfg := config.GetConfig()
arrCfgs := make([]config.Arr, 0)
for _, a := range u.qbit.Arrs.GetAll() {
arrCfgs = append(arrCfgs, common.ArrConfig{Host: a.Host, Name: a.Name, Token: a.Token})
arrCfgs = append(arrCfgs, config.Arr{Host: a.Host, Name: a.Name, Token: a.Token})
}
config.Arrs = arrCfgs
common.JSONResponse(w, config, http.StatusOK)
cfg.Arrs = arrCfgs
request.JSONResponse(w, cfg, http.StatusOK)
}
+1 -1
View File
@@ -5,7 +5,7 @@ import (
"net/http"
)
func (u *uiHandler) Routes(r chi.Router) http.Handler {
func (u *UIHandler) Routes(r chi.Router) http.Handler {
r.Group(func(r chi.Router) {
r.Get("/", u.IndexHandler)
r.Get("/download", u.DownloadHandler)
+7 -4
View File
@@ -3,9 +3,10 @@ package shared
import (
"cmp"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"github.com/sirrobot01/debrid-blackhole/pkg/repair"
"os"
)
@@ -16,6 +17,7 @@ type QBit struct {
DownloadFolder string `json:"download_folder"`
Categories []string `json:"categories"`
Debrid *debrid.DebridService
Repair *repair.Repair
Storage *TorrentStorage
debug bool
logger zerolog.Logger
@@ -24,8 +26,8 @@ type QBit struct {
RefreshInterval int
}
func NewQBit(config *common.Config, deb *debrid.DebridService, logger zerolog.Logger, arrs *arr.Storage) *QBit {
cfg := config.QBitTorrent
func NewQBit(deb *debrid.DebridService, logger zerolog.Logger, arrs *arr.Storage, _repair *repair.Repair) *QBit {
cfg := config.GetConfig().QBitTorrent
port := cmp.Or(cfg.Port, os.Getenv("QBIT_PORT"), "8282")
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
return &QBit{
@@ -35,7 +37,8 @@ func NewQBit(config *common.Config, deb *debrid.DebridService, logger zerolog.Lo
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
Debrid: deb,
Storage: NewTorrentStorage("/app/torrents.json"),
Storage: NewTorrentStorage(cmp.Or(os.Getenv("TORRENT_FILE"), "/app/torrents.json")),
Repair: _repair,
logger: logger,
Arrs: arrs,
RefreshInterval: refreshInterval,
+8 -8
View File
@@ -5,7 +5,7 @@ import (
"context"
"fmt"
"github.com/google/uuid"
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"io"
@@ -20,7 +20,7 @@ import (
// All torrent related helpers goes here
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
magnet, err := common.GetMagnetFromUrl(url)
magnet, err := utils.GetMagnetFromUrl(url)
if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err)
}
@@ -35,7 +35,7 @@ func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
file, _ := fileHeader.Open()
defer file.Close()
var reader io.Reader = file
magnet, err := common.GetMagnetFromFile(reader, fileHeader.Filename)
magnet, err := utils.GetMagnetFromFile(reader, fileHeader.Filename)
if err != nil {
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
}
@@ -46,7 +46,7 @@ func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
return nil
}
func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category string) error {
func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error {
torrent := q.CreateTorrentFromMagnet(magnet, category, "auto")
a, ok := ctx.Value("arr").(*arr.Arr)
if !ok {
@@ -69,7 +69,7 @@ func (q *QBit) Process(ctx context.Context, magnet *common.Magnet, category stri
return nil
}
func (q *QBit) CreateTorrentFromMagnet(magnet *common.Magnet, category, source string) *Torrent {
func (q *QBit) CreateTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Torrent {
torrent := &Torrent{
ID: uuid.NewString(),
Hash: strings.ToLower(magnet.InfoHash),
@@ -296,8 +296,8 @@ func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool {
func (q *QBit) RemoveTorrentTags(t *Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",")
newTorrentTags := common.Remove(torrentTags, tags...)
q.Tags = common.Remove(q.Tags, tags...)
newTorrentTags := utils.RemoveItem(torrentTags, tags...)
q.Tags = utils.RemoveItem(q.Tags, tags...)
t.Tags = strings.Join(newTorrentTags, ",")
q.Storage.Update(t)
return true
@@ -316,6 +316,6 @@ func (q *QBit) AddTags(tags []string) bool {
}
func (q *QBit) RemoveTags(tags []string) bool {
q.Tags = common.Remove(q.Tags, tags...)
q.Tags = utils.RemoveItem(q.Tags, tags...)
return true
}
+3 -2
View File
@@ -1,8 +1,8 @@
package shared
import (
"github.com/sirrobot01/debrid-blackhole/common"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"os"
"path/filepath"
"sync"
"time"
@@ -16,7 +16,8 @@ func checkFileLoop(wg *sync.WaitGroup, dir string, file debrid.TorrentFile, read
for {
select {
case <-ticker.C:
if common.FileReady(path) {
_, err := os.Stat(path)
if !os.IsNotExist(err) {
ready <- file
return
}
@@ -2,6 +2,8 @@ package repair
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
@@ -68,3 +70,62 @@ func parseDurationInterval(interval string) (time.Duration, error) {
return 0, fmt.Errorf("invalid unit in interval: %c", unit)
}
}
func fileIsSymlinked(file string) bool {
info, err := os.Lstat(file)
if err != nil {
return false
}
return info.Mode()&os.ModeSymlink != 0
}
func getSymlinkTarget(file string) string {
if fileIsSymlinked(file) {
target, err := os.Readlink(file)
if err != nil {
return ""
}
if !filepath.IsAbs(target) {
dir := filepath.Dir(file)
target = filepath.Join(dir, target)
}
return target
}
return ""
}
func fileIsReadable(filePath string) error {
// First check if file exists and is accessible
info, err := os.Stat(filePath)
if err != nil {
return err
}
// Check if it's a regular file
if !info.Mode().IsRegular() {
return fmt.Errorf("not a regular file")
}
// Try to read the first 1024 bytes
err = checkFileStart(filePath)
if err != nil {
return err
}
return nil
}
func checkFileStart(filePath string) error {
f, err := os.Open(filePath)
if err != nil {
return err
}
defer f.Close()
// Read first 1kb
buffer := make([]byte, 1024)
_, err = f.Read(buffer)
if err != nil {
return err
}
return nil
}
+316 -36
View File
@@ -2,71 +2,351 @@ package repair
import (
"context"
"github.com/sirrobot01/debrid-blackhole/common"
"fmt"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid"
"log"
"net/http"
"net/url"
"os"
"os/signal"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
)
func Start(ctx context.Context, config *common.Config, arrs *arr.Storage) error {
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
logger := common.NewLogger("Repair", config.LogLevel, os.Stdout)
defer stop()
type Repair struct {
Jobs []Job `json:"jobs"`
deb debrid.Service
arrs *arr.Storage
duration time.Duration
runOnStart bool
ZurgURL string
IsZurg bool
logger zerolog.Logger
}
duration, err := parseSchedule(config.Repair.Interval)
func NewRepair(deb debrid.Service, arrs *arr.Storage) *Repair {
cfg := config.GetConfig()
duration, err := parseSchedule(cfg.Repair.Interval)
if err != nil {
log.Fatalf("Failed to parse schedule: %v", err)
duration = time.Hour * 24
}
r := &Repair{
deb: deb,
logger: logger.NewLogger("Repair", cfg.LogLevel, os.Stdout),
arrs: arrs,
duration: duration,
runOnStart: cfg.Repair.RunOnStart,
ZurgURL: cfg.Repair.ZurgURL,
}
if r.ZurgURL != "" {
r.IsZurg = true
}
return r
}
type Job struct {
ID string `json:"id"`
Arrs []*arr.Arr `json:"arrs"`
MediaIDs []string `json:"media_ids"`
StartedAt time.Time `json:"created_at"`
CompletedAt time.Time `json:"finished_at"`
FailedAt time.Time `json:"failed_at"`
Error string `json:"error"`
}
func (r *Repair) NewJob(arrs []*arr.Arr, mediaIDs []string) *Job {
return &Job{
ID: uuid.New().String(),
Arrs: arrs,
MediaIDs: mediaIDs,
StartedAt: time.Now(),
}
}
func (r *Repair) PreRunChecks() error {
// Check if zurg url is reachable
if !r.IsZurg {
return nil
}
resp, err := http.Get(fmt.Sprint(r.ZurgURL, "/http/version.txt"))
if err != nil {
r.logger.Debug().Err(err).Msgf("Precheck failed: Failed to reach zurg at %s", r.ZurgURL)
return err
}
if resp.StatusCode != http.StatusOK {
r.logger.Debug().Msgf("Precheck failed: Zurg returned %d", resp.StatusCode)
return err
}
return nil
}
func (r *Repair) Repair(arrs []*arr.Arr, mediaIds []string) error {
j := r.NewJob(arrs, mediaIds)
if err := r.PreRunChecks(); err != nil {
return err
}
var wg sync.WaitGroup
errors := make(chan error)
for _, a := range j.Arrs {
wg.Add(1)
go func(a *arr.Arr) {
defer wg.Done()
if len(j.MediaIDs) == 0 {
if err := r.RepairArr(a, ""); err != nil {
log.Printf("Error repairing %s: %v", a.Name, err)
errors <- err
}
} else {
for _, id := range j.MediaIDs {
if err := r.RepairArr(a, id); err != nil {
log.Printf("Error repairing %s: %v", a.Name, err)
errors <- err
}
}
}
}(a)
}
wg.Wait()
close(errors)
err := <-errors
if err != nil {
j.FailedAt = time.Now()
j.Error = err.Error()
return err
}
j.CompletedAt = time.Now()
return nil
}
func (r *Repair) Start(ctx context.Context) error {
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
defer stop()
cfg := config.GetConfig()
if r.runOnStart {
r.logger.Info().Msgf("Running initial repair")
go func() {
if err := r.Repair(r.arrs.GetAll(), []string{}); err != nil {
r.logger.Info().Msgf("Error during initial repair: %v", err)
}
}()
}
if config.Repair.RunOnStart {
logger.Info().Msgf("Running initial repair")
if err := repair(arrs); err != nil {
log.Printf("Error during initial repair: %v", err)
return err
}
}
ticker := time.NewTicker(duration)
ticker := time.NewTicker(r.duration)
defer ticker.Stop()
if strings.Contains(config.Repair.Interval, ":") {
logger.Info().Msgf("Starting repair worker, scheduled daily at %s", config.Repair.Interval)
} else {
logger.Info().Msgf("Starting repair worker with %v interval", duration)
}
r.logger.Info().Msgf("Starting repair worker with %v interval", r.duration)
for {
select {
case <-ctx.Done():
logger.Info().Msg("Repair worker stopped")
r.logger.Info().Msg("Repair worker stopped")
return nil
case t := <-ticker.C:
logger.Info().Msgf("Running repair at %v", t.Format("15:04:05"))
if err := repair(arrs); err != nil {
logger.Info().Msgf("Error during repair: %v", err)
return err
r.logger.Info().Msgf("Running repair at %v", t.Format("15:04:05"))
if err := r.Repair(r.arrs.GetAll(), []string{}); err != nil {
r.logger.Info().Msgf("Error during repair: %v", err)
continue
}
// If using time-of-day schedule, reset the ticker for next day
if strings.Contains(config.Repair.Interval, ":") {
nextDuration, err := parseSchedule(config.Repair.Interval)
if err != nil {
logger.Info().Msgf("Error calculating next schedule: %v", err)
return err
}
ticker.Reset(nextDuration)
if strings.Contains(cfg.Repair.Interval, ":") {
ticker.Reset(r.duration)
}
r.logger.Info().Msgf("Next scheduled repair at %v", t.Add(r.duration).Format("15:04:05"))
}
}
}
func repair(arrs *arr.Storage) error {
for _, a := range arrs.GetAll() {
go a.Repair("")
func (r *Repair) RepairArr(a *arr.Arr, tmdbId string) error {
cfg := config.GetConfig()
r.logger.Info().Msgf("Starting repair for %s", a.Name)
media, err := a.GetMedia(tmdbId)
if err != nil {
r.logger.Info().Msgf("Failed to get %s media: %v", a.Type, err)
return err
}
r.logger.Info().Msgf("Found %d %s media", len(media), a.Type)
if len(media) == 0 {
r.logger.Info().Msgf("No %s media found", a.Type)
return nil
}
// Check first media to confirm mounts are accessible
if !r.isMediaAccessible(media[0]) {
r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts")
return nil
}
semaphore := make(chan struct{}, runtime.NumCPU()*4)
totalBrokenItems := 0
var wg sync.WaitGroup
for _, m := range media {
wg.Add(1)
semaphore <- struct{}{}
go func(m arr.Content) {
defer wg.Done()
defer func() { <-semaphore }()
brokenItems := r.getBrokenFiles(m)
if brokenItems != nil {
r.logger.Debug().Msgf("Found %d broken files for %s", len(brokenItems), m.Title)
if !cfg.Repair.SkipDeletion {
if err := a.DeleteFiles(brokenItems); err != nil {
r.logger.Info().Msgf("Failed to delete broken items for %s: %v", m.Title, err)
}
}
if err := a.SearchMissing(brokenItems); err != nil {
r.logger.Info().Msgf("Failed to search missing items for %s: %v", m.Title, err)
}
totalBrokenItems += len(brokenItems)
}
}(m)
}
wg.Wait()
r.logger.Info().Msgf("Repair completed for %s. %d broken items found", a.Name, totalBrokenItems)
return nil
}
func (r *Repair) isMediaAccessible(m arr.Content) bool {
files := m.Files
if len(files) == 0 {
return false
}
firstFile := files[0]
r.logger.Debug().Msgf("Checking parent directory for %s", firstFile.Path)
if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) {
return false
}
// Check symlink parent directory
symlinkPath := getSymlinkTarget(firstFile.Path)
r.logger.Debug().Msgf("Checking symlink parent directory for %s", symlinkPath)
if symlinkPath != "" {
parentSymlink := filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents
if _, err := os.Stat(parentSymlink); os.IsNotExist(err) {
return false
}
}
return true
}
func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile {
if r.IsZurg {
return r.getZurgBrokenFiles(media)
} else {
return r.getFileBrokenFiles(media)
}
}
func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile {
// This checks symlink target, try to get read a tiny bit of the file
brokenFiles := make([]arr.ContentFile, 0)
uniqueParents := make(map[string][]arr.ContentFile)
files := media.Files
for _, file := range files {
target := getSymlinkTarget(file.Path)
if target != "" {
file.IsSymlink = true
dir, _ := filepath.Split(target)
parent := filepath.Base(filepath.Clean(dir))
uniqueParents[parent] = append(uniqueParents[parent], file)
}
}
for parent, f := range uniqueParents {
// Check stat
// Check file stat first
firstFile := f[0]
// Read a tiny bit of the file
if err := fileIsReadable(firstFile.Path); err != nil {
r.logger.Debug().Msgf("Broken file found at: %s", parent)
brokenFiles = append(brokenFiles, f...)
continue
}
}
if len(brokenFiles) == 0 {
r.logger.Debug().Msgf("No broken files found for %s", media.Title)
return nil
}
r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
return brokenFiles
}
func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
// Use zurg setup to check file availability with zurg
// This reduces bandwidth usage significantly
brokenFiles := make([]arr.ContentFile, 0)
uniqueParents := make(map[string][]arr.ContentFile)
files := media.Files
for _, file := range files {
target := getSymlinkTarget(file.Path)
if target != "" {
file.IsSymlink = true
dir, f := filepath.Split(target)
parent := filepath.Base(filepath.Clean(dir))
// Set target path folder/file.mkv
file.TargetPath = f
uniqueParents[parent] = append(uniqueParents[parent], file)
}
}
// Access zurg url + symlink folder + first file(encoded)
for parent, f := range uniqueParents {
r.logger.Debug().Msgf("Checking %s", parent)
encodedParent := url.PathEscape(parent)
encodedFile := url.PathEscape(f[0].TargetPath)
fullURL := fmt.Sprintf("%s/http/__all__/%s/%s", r.ZurgURL, encodedParent, encodedFile)
// Check file stat first
if _, err := os.Stat(f[0].Path); os.IsNotExist(err) {
r.logger.Debug().Msgf("Broken symlink found: %s", fullURL)
brokenFiles = append(brokenFiles, f...)
continue
}
resp, err := http.Get(fullURL)
if err != nil {
r.logger.Debug().Err(err).Msgf("Failed to reach %s", fullURL)
brokenFiles = append(brokenFiles, f...)
continue
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
brokenFiles = append(brokenFiles, f...)
continue
}
downloadUrl := resp.Request.URL.String()
if downloadUrl != "" {
r.logger.Debug().Msgf("Found download url: %s", downloadUrl)
} else {
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
brokenFiles = append(brokenFiles, f...)
continue
}
}
if len(brokenFiles) == 0 {
r.logger.Debug().Msgf("No broken files found for %s", media.Title)
return nil
}
r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
return brokenFiles
}