diff --git a/.air.toml b/.air.toml index 4db415e..37821e3 100644 --- a/.air.toml +++ b/.air.toml @@ -5,7 +5,7 @@ tmp_dir = "tmp" [build] args_bin = ["--config", "data/"] bin = "./tmp/main" - cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/debrid-blackhole/pkg/version.Version=0.0.0 -X github.com/sirrobot01/debrid-blackhole/pkg/version.Channel=beta\" -o ./tmp/main .'" + cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/debrid-blackhole/pkg/version.Version=0.0.0 -X github.com/sirrobot01/debrid-blackhole/pkg/version.Channel=nightly\" -o ./tmp/main .'" delay = 1000 exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"] exclude_file = [] diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d398f9..d12d947 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -144,4 +144,18 @@ - Hotfixes - Fix saving torrents error - Fix bugs with the UI -- Speed improvements \ No newline at end of file +- Speed improvements + + +#### 0.5.0 + +- A more refined repair worker(with more control) +- UI Improvements + - Pagination for torrents + - Dark mode + - Ordered torrents table +- Fix Arr API flaky behavior +- Discord Notifications +- Minor bug fixes +- Add Tautulli support + - playback_failed event triggers a repair \ No newline at end of file diff --git a/README.md b/README.md index 9e57326..363b6e9 100644 --- a/README.md +++ b/README.md @@ -48,20 +48,35 @@ The proxy is useful for filtering out un-cached Debrid torrents ### Installation -##### Docker Compose +##### Docker + +###### Registry +You can use either hub.docker.com or ghcr.io to pull the image. The image is available on both platforms. + +- Docker Hub: `cy01/blackhole:latest` +- GitHub Container Registry: `ghcr.io/sirrobot01/decypharr:latest` + +###### Tags + +- `latest`: The latest stable release +- `beta`: The latest beta release +- `vX.Y.Z`: A specific version (e.g `v0.1.0`) +- `nightly`: The latest nightly build. This is highly unstable + + ```yaml version: '3.7' services: - blackhole: + decypharr: image: cy01/blackhole:latest # or cy01/blackhole:beta - container_name: blackhole + container_name: decypharr ports: - "8282:8282" # qBittorrent - "8181:8181" # Proxy user: "1000:1000" volumes: - /mnt/:/mnt - - ~/plex/configs/blackhole/:/app # config.json must be in this directory + - ~/plex/configs/decypharr/:/app # config.json must be in this directory environment: - PUID=1000 - PGID=1000 @@ -78,7 +93,7 @@ services: Download the binary from the releases page and run it with the config file. ```bash -./blackhole --config /app +./decypharr --config /app ``` ### Usage @@ -116,7 +131,7 @@ This is the default config file. You can create a `config.json` file in the root } ], "proxy": { - "enabled": true, + "enabled": false, "port": "8100", "username": "username", "password": "password" @@ -124,7 +139,8 @@ This is the default config file. You can create a `config.json` file in the root "qbittorrent": { "port": "8282", "download_folder": "/mnt/symlinks/", - "categories": ["sonarr", "radarr"] + "categories": ["sonarr", "radarr"], + "log_level": "info" }, "repair": { "enabled": false, @@ -147,6 +163,7 @@ Full config are [here](doc/config.full.json) - The `max_cache_size` key is used to set the maximum number of infohashes that can be stored in the availability cache. This is used to prevent round trip to the debrid provider when using the proxy/Qbittorrent. The default value is `1000` - The `allowed_file_types` key is an array of allowed file types that can be downloaded. By default, all movie, tv show and music file types are allowed - The `use_auth` is used to enable basic authentication for the UI. The default value is `false` +- The `discord_webhook_url` is used to send notifications to discord ##### Debrid Config - The `debrids` key is an array of debrid providers @@ -164,7 +181,7 @@ The `repair` key is used to enable the repair worker - The `interval` key is the interval in either minutes, seconds, hours, days. Use any of this format, e.g 12:00, 5:00, 1h, 1d, 1m, 1s. - The `run_on_start` key is used to run the repair worker on start - The `zurg_url` is the url of the zurg server. Typically `http://localhost:9999` or `http://zurg:9999` -- The `skip_deletion`: true if you don't want to delete the files +- The `auto_process` is used to automatically process the repair worker. This will delete broken symlinks and re-search for missing files ##### Proxy Config - The `enabled` key is used to enable the proxy @@ -191,15 +208,6 @@ This is particularly useful if you want to use the Repair tool without using Qbi - -### Proxy - -**Note**: Proxy has stopped working for Real Debrid, Debrid Link, and All Debrid. It still works for Torbox. This is due to the changes in the API of the Debrid Providers. - -The proxy is useful in filtering out un-cached Debrid torrents. -The proxy is a simple HTTP proxy that requires basic authentication. The proxy can be enabled by setting the `proxy.enabled` to `true` in the config file. -The proxy listens on the port `8181` by default. The username and password can be set in the config file. - ### Repair Worker The repair worker is a simple worker that checks for missing files in the Arrs(Sonarr, Radarr, etc). It's particularly useful for files either deleted by the Debrid provider or files with bad symlinks. @@ -211,6 +219,14 @@ The repair worker is a simple worker that checks for missing files in the Arrs(S - Search for deleted/unreadable files +### Proxy + +#### **Note**: Proxy has stopped working for Real Debrid, Debrid Link, and All Debrid. It still works for Torbox. This is due to the changes in the API of the Debrid Providers. + +The proxy is useful in filtering out un-cached Debrid torrents. +The proxy is a simple HTTP proxy that requires basic authentication. The proxy can be enabled by setting the `proxy.enabled` to `true` in the config file. +The proxy listens on the port `8181` by default. The username and password can be set in the config file. + ### Changelog - View the [CHANGELOG.md](CHANGELOG.md) for the latest changes diff --git a/cmd/decypharr/main.go b/cmd/decypharr/main.go index 0a36567..b203bd1 100644 --- a/cmd/decypharr/main.go +++ b/cmd/decypharr/main.go @@ -20,7 +20,7 @@ func Start(ctx context.Context) error { var wg sync.WaitGroup errChan := make(chan error) - _log := logger.GetLogger(cfg.LogLevel) + _log := logger.GetDefaultLogger() _log.Info().Msgf("Version: %s", version.GetInfo().String()) _log.Debug().Msgf("Config Loaded: %s", cfg.JsonFile()) diff --git a/doc/config.full.json b/doc/config.full.json index edc6827..fc927d9 100644 --- a/doc/config.full.json +++ b/doc/config.full.json @@ -58,7 +58,7 @@ "name": "sonarr", "host": "http://host:8989", "token": "arr_key", - "cleanup": false + "cleanup": true }, { "name": "radarr", @@ -72,11 +72,12 @@ "interval": "12h", "run_on_start": false, "zurg_url": "http://zurg:9999", - "skip_deletion": false + "auto_process": false }, "log_level": "info", "min_file_size": "", "max_file_size": "", "allowed_file_types": [], - "use_auth": false + "use_auth": false, + "discord_webhook_url": "https://discord.com/api/webhooks/...", } \ No newline at end of file diff --git a/go.mod b/go.mod index 642dc75..8398fe2 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/valyala/fastjson v1.6.4 golang.org/x/crypto v0.33.0 golang.org/x/net v0.33.0 + golang.org/x/sync v0.11.0 golang.org/x/time v0.8.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) diff --git a/go.sum b/go.sum index 5395410..59ecc6b 100644 --- a/go.sum +++ b/go.sum @@ -243,6 +243,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/internal/config/config.go b/internal/config/config.go index 9268ad7..206bec8 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -52,11 +52,11 @@ type Arr struct { } type Repair struct { - Enabled bool `json:"enabled"` - Interval string `json:"interval"` - RunOnStart bool `json:"run_on_start"` - ZurgURL string `json:"zurg_url"` - SkipDeletion bool `json:"skip_deletion"` + Enabled bool `json:"enabled"` + Interval string `json:"interval"` + RunOnStart bool `json:"run_on_start"` + ZurgURL string `json:"zurg_url"` + AutoProcess bool `json:"auto_process"` } type Auth struct { @@ -65,20 +65,21 @@ type Auth struct { } type Config struct { - LogLevel string `json:"log_level"` - Debrid Debrid `json:"debrid"` - Debrids []Debrid `json:"debrids"` - Proxy Proxy `json:"proxy"` - MaxCacheSize int `json:"max_cache_size"` - QBitTorrent QBitTorrent `json:"qbittorrent"` - Arrs []Arr `json:"arrs"` - Repair Repair `json:"repair"` - AllowedExt []string `json:"allowed_file_types"` - MinFileSize string `json:"min_file_size"` // Minimum file size to download, 10MB, 1GB, etc - MaxFileSize string `json:"max_file_size"` // Maximum file size to download (0 means no limit) - Path string `json:"-"` // Path to save the config file - UseAuth bool `json:"use_auth"` - Auth *Auth `json:"-"` + LogLevel string `json:"log_level"` + Debrid Debrid `json:"debrid"` + Debrids []Debrid `json:"debrids"` + Proxy Proxy `json:"proxy"` + MaxCacheSize int `json:"max_cache_size"` + QBitTorrent QBitTorrent `json:"qbittorrent"` + Arrs []Arr `json:"arrs"` + Repair Repair `json:"repair"` + AllowedExt []string `json:"allowed_file_types"` + MinFileSize string `json:"min_file_size"` // Minimum file size to download, 10MB, 1GB, etc + MaxFileSize string `json:"max_file_size"` // Maximum file size to download (0 means no limit) + Path string `json:"-"` // Path to save the config file + UseAuth bool `json:"use_auth"` + Auth *Auth `json:"-"` + DiscordWebhook string `json:"discord_webhook_url"` } func (c *Config) JsonFile() string { @@ -207,10 +208,7 @@ func GetConfig() *Config { once.Do(func() { instance = &Config{} // Initialize instance first if err := instance.loadConfig(); err != nil { - _, err := fmt.Fprintf(os.Stderr, "configuration Error: %v\n", err) - if err != nil { - return - } + fmt.Fprintf(os.Stderr, "configuration Error: %v\n", err) os.Exit(1) } }) diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 1a9e8fc..ca6d015 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -85,9 +85,10 @@ func NewLogger(prefix string, level string, output *os.File) zerolog.Logger { return logger } -func GetLogger(level string) zerolog.Logger { +func GetDefaultLogger() zerolog.Logger { once.Do(func() { - logger = NewLogger("decypharr", level, os.Stdout) + cfg := config.GetConfig() + logger = NewLogger("decypharr", cfg.LogLevel, os.Stdout) }) return logger } diff --git a/internal/request/discord.go b/internal/request/discord.go new file mode 100644 index 0000000..115d942 --- /dev/null +++ b/internal/request/discord.go @@ -0,0 +1,100 @@ +package request + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/sirrobot01/debrid-blackhole/internal/config" + "io" + "net/http" + "strings" +) + +type DiscordEmbed struct { + Title string `json:"title"` + Description string `json:"description"` + Color int `json:"color"` +} + +type DiscordWebhook struct { + Embeds []DiscordEmbed `json:"embeds"` +} + +func getDiscordColor(status string) int { + switch status { + case "success": + return 3066993 + case "error": + return 15158332 + case "warning": + return 15844367 + case "pending": + return 3447003 + default: + return 0 + } +} + +func getDiscordHeader(event string) string { + switch event { + case "download_complete": + return "[Decypharr] Download Completed" + case "download_failed": + return "[Decypharr] Download Failed" + case "repair_pending": + return "[Decypharr] Repair Completed, Awaiting action" + case "repair_complete": + return "[Decypharr] Repair Complete" + default: + // split the event string and capitalize the first letter of each word + evs := strings.Split(event, "_") + for i, ev := range evs { + evs[i] = strings.ToTitle(ev) + } + return "[Decypharr] %s" + strings.Join(evs, " ") + } +} + +func SendDiscordMessage(event string, status string, message string) error { + cfg := config.GetConfig() + webhookURL := cfg.DiscordWebhook + if webhookURL == "" { + return nil + } + + // Create the proper Discord webhook structure + + webhook := DiscordWebhook{ + Embeds: []DiscordEmbed{ + { + Title: getDiscordHeader(event), + Description: message, + Color: getDiscordColor(status), + }, + }, + } + + payload, err := json.Marshal(webhook) + if err != nil { + return fmt.Errorf("failed to marshal discord payload: %v", err) + } + + req, err := http.NewRequest(http.MethodPost, webhookURL, bytes.NewReader(payload)) + if err != nil { + return fmt.Errorf("failed to create discord request: %v", err) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("failed to send discord message: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + bodyBytes, _ := io.ReadAll(resp.Body) + return fmt.Errorf("discord returned error status code: %s, body: %s", resp.Status, string(bodyBytes)) + } + + return nil +} diff --git a/internal/request/request.go b/internal/request/request.go index 08fffb3..fb87cae 100644 --- a/internal/request/request.go +++ b/internal/request/request.go @@ -119,14 +119,17 @@ func (c *RLHTTPClient) MakeRequest(req *http.Request) ([]byte, error) { func NewRLHTTPClient(rl *rate.Limiter, headers map[string]string) *RLHTTPClient { tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - Proxy: http.ProxyFromEnvironment, } c := &RLHTTPClient{ client: &http.Client{ Transport: tr, }, - Ratelimiter: rl, - Headers: headers, + } + if rl != nil { + c.Ratelimiter = rl + } + if headers != nil { + c.Headers = headers } return c } diff --git a/pkg/arr/arr.go b/pkg/arr/arr.go index 35ee8f9..94e1d18 100644 --- a/pkg/arr/arr.go +++ b/pkg/arr/arr.go @@ -2,13 +2,16 @@ package arr import ( "bytes" + "crypto/tls" "encoding/json" "fmt" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/request" + "io" "net/http" "strings" "sync" + "time" ) // Type is a type of arr @@ -21,51 +24,84 @@ const ( Readarr Type = "readarr" ) -var ( - client *request.RLHTTPClient = request.NewRLHTTPClient(nil, nil) -) - type Arr struct { Name string `json:"name"` Host string `json:"host"` Token string `json:"token"` Type Type `json:"type"` Cleanup bool `json:"cleanup"` + client *http.Client } func New(name, host, token string, cleanup bool) *Arr { return &Arr{ Name: name, Host: host, - Token: token, + Token: strings.TrimSpace(token), Type: InferType(host, name), Cleanup: cleanup, + client: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + Proxy: http.ProxyFromEnvironment, + }, + }, } } func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Response, error) { if a.Token == "" || a.Host == "" { - return nil, nil + return nil, fmt.Errorf("arr not configured") } url, err := request.JoinURL(a.Host, endpoint) if err != nil { return nil, err } - var jsonPayload []byte - + var body io.Reader if payload != nil { - jsonPayload, err = json.Marshal(payload) + b, err := json.Marshal(payload) if err != nil { return nil, err } + body = bytes.NewReader(b) } - req, err := http.NewRequest(method, url, bytes.NewBuffer(jsonPayload)) + req, err := http.NewRequest(method, url, body) + if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") req.Header.Set("X-Api-Key", a.Token) - return client.Do(req) + if a.client == nil { + a.client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + Proxy: http.ProxyFromEnvironment, + }, + } + } + + var resp *http.Response + + for attempts := 0; attempts < 5; attempts++ { + resp, err = a.client.Do(req) + if err != nil { + return nil, err + } + + // If we got a 401, wait briefly and retry + if resp.StatusCode == http.StatusUnauthorized { + resp.Body.Close() // Don't leak response bodies + if attempts < 4 { // Don't sleep on the last attempt + time.Sleep(time.Duration(attempts+1) * 100 * time.Millisecond) + continue + } + } + + return resp, nil + } + + return resp, err } func (a *Arr) Validate() error { diff --git a/pkg/arr/content.go b/pkg/arr/content.go index e804643..3bd314f 100644 --- a/pkg/arr/content.go +++ b/pkg/arr/content.go @@ -7,25 +7,38 @@ import ( "strconv" ) -func (a *Arr) GetMedia(tvId string) ([]Content, error) { +type episode struct { + Id int `json:"id"` + EpisodeFileID int `json:"episodeFileId"` +} + +func (a *Arr) GetMedia(mediaId string) ([]Content, error) { // Get series - resp, err := a.Request(http.MethodGet, fmt.Sprintf("api/v3/series?tvdbId=%s", tvId), nil) + if a.Type == Radarr { + return GetMovies(a, mediaId) + } + // This is likely Sonarr + resp, err := a.Request(http.MethodGet, fmt.Sprintf("api/v3/series?tvdbId=%s", mediaId), nil) if err != nil { return nil, err } + defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { // This is likely Radarr - return GetMovies(a, tvId) + return GetMovies(a, mediaId) } a.Type = Sonarr - defer resp.Body.Close() + type series struct { Title string `json:"title"` Id int `json:"id"` } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to get series: %s", resp.Status) + } var data []series if err = json.NewDecoder(resp.Body).Decode(&data); err != nil { - return nil, err + return nil, fmt.Errorf("failed to decode series: %v", err) } // Get series files contents := make([]Content, 0) @@ -43,11 +56,6 @@ func (a *Arr) GetMedia(tvId string) ([]Content, error) { Title: d.Title, Id: d.Id, } - - type episode struct { - Id int `json:"id"` - EpisodeFileID int `json:"episodeFileId"` - } resp, err = a.Request(http.MethodGet, fmt.Sprintf("api/v3/episode?seriesId=%d", d.Id), nil) if err != nil { continue @@ -67,12 +75,20 @@ func (a *Arr) GetMedia(tvId string) ([]Content, error) { if !ok { eId = 0 } + if file.Id == 0 || file.Path == "" { + // Skip files without path + continue + } files = append(files, ContentFile{ FileId: file.Id, Path: file.Path, Id: eId, }) } + if len(files) == 0 { + // Skip series without files + continue + } ct.Files = files contents = append(contents, ct) } @@ -92,7 +108,7 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) { defer resp.Body.Close() var movies []Movie if err = json.NewDecoder(resp.Body).Decode(&movies); err != nil { - return nil, err + return nil, fmt.Errorf("failed to decode movies: %v", err) } contents := make([]Content, 0) for _, movie := range movies { @@ -101,6 +117,10 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) { Id: movie.Id, } files := make([]ContentFile, 0) + if movie.MovieFile.Id == 0 || movie.MovieFile.Path == "" { + // Skip movies without files + continue + } files = append(files, ContentFile{ FileId: movie.MovieFile.Id, Id: movie.Id, diff --git a/pkg/arr/types.go b/pkg/arr/types.go index 17fd9e1..0d77195 100644 --- a/pkg/arr/types.go +++ b/pkg/arr/types.go @@ -8,7 +8,6 @@ type Movie struct { MovieId int `json:"movieId"` RelativePath string `json:"relativePath"` Path string `json:"path"` - Size int `json:"size"` Id int `json:"id"` } `json:"movieFile"` Id int `json:"id"` diff --git a/pkg/debrid/torrent/torrent.go b/pkg/debrid/torrent/torrent.go index ec4622e..282151b 100644 --- a/pkg/debrid/torrent/torrent.go +++ b/pkg/debrid/torrent/torrent.go @@ -3,6 +3,7 @@ package torrent import ( "fmt" "github.com/sirrobot01/debrid-blackhole/internal/cache" + "github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/arr" "os" @@ -66,6 +67,7 @@ func (t *Torrent) GetSymlinkFolder(parent string) string { } func (t *Torrent) GetMountFolder(rClonePath string) (string, error) { + _log := logger.GetDefaultLogger() possiblePaths := []string{ t.OriginalFilename, t.Filename, @@ -73,7 +75,9 @@ func (t *Torrent) GetMountFolder(rClonePath string) (string, error) { } for _, path := range possiblePaths { - _, err := os.Stat(filepath.Join(rClonePath, path)) + _p := filepath.Join(rClonePath, path) + _log.Trace().Msgf("Checking path: %s", _p) + _, err := os.Stat(_p) if !os.IsNotExist(err) { return path, nil } diff --git a/pkg/qbit/downloader.go b/pkg/qbit/downloader.go index c9ae894..af6ef9e 100644 --- a/pkg/qbit/downloader.go +++ b/pkg/qbit/downloader.go @@ -187,7 +187,7 @@ func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) q.logger.Debug().Msgf("Found torrent path: %s", torrentPath) return torrentPath, err } - time.Sleep(100 * time.Millisecond) + time.Sleep(10 * time.Millisecond) } } diff --git a/pkg/qbit/storage.go b/pkg/qbit/storage.go index eda588c..ed6b2f2 100644 --- a/pkg/qbit/storage.go +++ b/pkg/qbit/storage.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "sort" "sync" ) @@ -109,7 +110,46 @@ func (ts *TorrentStorage) GetAll(category string, filter string, hashes []string } } } - return filtered + torrents = filtered + } + return torrents +} + +func (ts *TorrentStorage) GetAllSorted(category string, filter string, hashes []string, sortBy string, ascending bool) []*Torrent { + torrents := ts.GetAll(category, filter, hashes) + if sortBy != "" { + sort.Slice(torrents, func(i, j int) bool { + // If ascending is false, swap i and j to get descending order + if !ascending { + i, j = j, i + } + + switch sortBy { + case "name": + return torrents[i].Name < torrents[j].Name + case "size": + return torrents[i].Size < torrents[j].Size + case "added_on": + return torrents[i].AddedOn < torrents[j].AddedOn + case "completed": + return torrents[i].Completed < torrents[j].Completed + case "progress": + return torrents[i].Progress < torrents[j].Progress + case "state": + return torrents[i].State < torrents[j].State + case "category": + return torrents[i].Category < torrents[j].Category + case "dlspeed": + return torrents[i].Dlspeed < torrents[j].Dlspeed + case "upspeed": + return torrents[i].Upspeed < torrents[j].Upspeed + case "ratio": + return torrents[i].Ratio < torrents[j].Ratio + default: + // Default sort by added_on + return torrents[i].AddedOn < torrents[j].AddedOn + } + }) } return torrents } diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index 5d3016c..74a52f1 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -4,6 +4,7 @@ import ( "cmp" "context" "fmt" + "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/pkg/arr" db "github.com/sirrobot01/debrid-blackhole/pkg/debrid" @@ -114,6 +115,11 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr } torrent.TorrentPath = torrentSymlinkPath q.UpdateTorrent(torrent, debridTorrent) + go func() { + if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil { + q.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() if err := arr.Refresh(); err != nil { q.logger.Error().Msgf("Error refreshing arr: %v", err) } @@ -122,6 +128,11 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr func (q *QBit) MarkAsFailed(t *Torrent) *Torrent { t.State = "error" q.Storage.AddOrUpdate(t) + go func() { + if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil { + q.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() return t } diff --git a/pkg/qbit/types.go b/pkg/qbit/types.go index d41836c..eaa159c 100644 --- a/pkg/qbit/types.go +++ b/pkg/qbit/types.go @@ -1,6 +1,7 @@ package qbit import ( + "fmt" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/torrent" "sync" ) @@ -230,6 +231,17 @@ func (t *Torrent) IsReady() bool { return t.AmountLeft <= 0 && t.TorrentPath != "" } +func (t *Torrent) discordContext() string { + format := ` + **Name:** %s + **Arr:** %s + **Hash:** %s + **MagnetURI:** %s + **Debrid:** %s + ` + return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid) +} + type TorrentProperties struct { AdditionDate int64 `json:"addition_date,omitempty"` Comment string `json:"comment,omitempty"` diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 040983e..c11e5b7 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -2,20 +2,23 @@ package repair import ( "context" + "encoding/json" "fmt" "github.com/google/uuid" "github.com/rs/zerolog" "github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/logger" + "github.com/sirrobot01/debrid-blackhole/internal/request" "github.com/sirrobot01/debrid-blackhole/pkg/arr" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/engine" - "log" + "golang.org/x/sync/errgroup" "net/http" "net/url" "os" "os/signal" "path/filepath" "runtime" + "sort" "strings" "sync" "syscall" @@ -23,57 +26,130 @@ import ( ) type Repair struct { - Jobs []Job `json:"jobs"` - arrs *arr.Storage - deb engine.Service - duration time.Duration - runOnStart bool - ZurgURL string - IsZurg bool - logger zerolog.Logger + Jobs map[string]*Job + arrs *arr.Storage + deb engine.Service + duration time.Duration + runOnStart bool + ZurgURL string + IsZurg bool + autoProcess bool + logger zerolog.Logger + filename string } -func New(deb *engine.Engine, arrs *arr.Storage) *Repair { +func New(arrs *arr.Storage) *Repair { cfg := config.GetConfig() duration, err := parseSchedule(cfg.Repair.Interval) if err != nil { duration = time.Hour * 24 } r := &Repair{ - arrs: arrs, - deb: deb.Get(), - logger: logger.NewLogger("repair", cfg.LogLevel, os.Stdout), - duration: duration, - runOnStart: cfg.Repair.RunOnStart, - ZurgURL: cfg.Repair.ZurgURL, + arrs: arrs, + logger: logger.NewLogger("repair", cfg.LogLevel, os.Stdout), + duration: duration, + runOnStart: cfg.Repair.RunOnStart, + ZurgURL: cfg.Repair.ZurgURL, + autoProcess: cfg.Repair.AutoProcess, + filename: filepath.Join(cfg.Path, "repair.json"), } if r.ZurgURL != "" { r.IsZurg = true } + // Load jobs from file + r.loadFromFile() + return r } +type JobStatus string + +const ( + JobStarted JobStatus = "started" + JobPending JobStatus = "pending" + JobFailed JobStatus = "failed" + JobCompleted JobStatus = "completed" +) + type Job struct { - ID string `json:"id"` - Arrs []*arr.Arr `json:"arrs"` - MediaIDs []string `json:"media_ids"` - StartedAt time.Time `json:"created_at"` - CompletedAt time.Time `json:"finished_at"` - FailedAt time.Time `json:"failed_at"` + ID string `json:"id"` + Arrs []*arr.Arr `json:"arrs"` + MediaIDs []string `json:"media_ids"` + OneOff bool `json:"one_off"` + StartedAt time.Time `json:"created_at"` + BrokenItems map[string][]arr.ContentFile `json:"broken_items"` + Status JobStatus `json:"status"` + CompletedAt time.Time `json:"finished_at"` + FailedAt time.Time `json:"failed_at"` + AutoProcess bool `json:"auto_process"` Error string `json:"error"` } -func (r *Repair) NewJob(arrs []*arr.Arr, mediaIDs []string) *Job { +func (j *Job) discordContext() string { + format := ` + **ID**: %s + **Arrs**: %s + **Media IDs**: %s + **Status**: %s + **Started At**: %s + **Completed At**: %s +` + arrs := make([]string, 0) + for _, a := range j.Arrs { + arrs = append(arrs, a.Name) + } + + dateFmt := "2006-01-02 15:04:05" + + return fmt.Sprintf(format, j.ID, strings.Join(arrs, ","), strings.Join(j.MediaIDs, ", "), j.Status, j.StartedAt.Format(dateFmt), j.CompletedAt.Format(dateFmt)) +} + +func (r *Repair) getArrs(arrNames []string) []*arr.Arr { + arrs := make([]*arr.Arr, 0) + if len(arrNames) == 0 { + arrs = r.arrs.GetAll() + } else { + for _, name := range arrNames { + a := r.arrs.Get(name) + if a == nil || a.Host == "" || a.Token == "" { + continue + } + arrs = append(arrs, a) + } + } + return arrs +} + +func jobKey(arrNames []string, mediaIDs []string) string { + return fmt.Sprintf("%s-%s", strings.Join(arrNames, ","), strings.Join(mediaIDs, ",")) +} + +func (r *Repair) reset(j *Job) { + // Update job for rerun + j.Status = JobStarted + j.StartedAt = time.Now() + j.CompletedAt = time.Time{} + j.FailedAt = time.Time{} + j.BrokenItems = nil + j.Error = "" + if j.Arrs == nil { + j.Arrs = r.getArrs([]string{}) // Get new arrs + } +} + +func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job { + arrs := r.getArrs(arrsNames) return &Job{ ID: uuid.New().String(), Arrs: arrs, MediaIDs: mediaIDs, StartedAt: time.Now(), + Status: JobStarted, } } -func (r *Repair) PreRunChecks() error { +func (r *Repair) preRunChecks() error { // Check if zurg url is reachable if !r.IsZurg { return nil @@ -90,43 +166,119 @@ func (r *Repair) PreRunChecks() error { return nil } -func (r *Repair) Repair(arrs []*arr.Arr, mediaIds []string) error { +func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess bool) error { + key := jobKey(arrsNames, mediaIDs) + job, ok := r.Jobs[key] + if !ok { + job = r.newJob(arrsNames, mediaIDs) + } + job.AutoProcess = autoProcess + r.reset(job) + r.Jobs[key] = job + go r.saveToFile() + err := r.repair(job) + go r.saveToFile() + return err +} - j := r.NewJob(arrs, mediaIds) - - if err := r.PreRunChecks(); err != nil { +func (r *Repair) repair(job *Job) error { + if err := r.preRunChecks(); err != nil { return err } - var wg sync.WaitGroup - errors := make(chan error) - for _, a := range j.Arrs { - wg.Add(1) - go func(a *arr.Arr) { - defer wg.Done() - if len(j.MediaIDs) == 0 { - if err := r.RepairArr(a, ""); err != nil { - log.Printf("Error repairing %s: %v", a.Name, err) - errors <- err + + // Create a new error group with context + g, ctx := errgroup.WithContext(context.Background()) + + // Use a mutex to protect concurrent access to brokenItems + var mu sync.Mutex + brokenItems := map[string][]arr.ContentFile{} + + for _, a := range job.Arrs { + a := a // Capture range variable + g.Go(func() error { + var items []arr.ContentFile + var err error + + if len(job.MediaIDs) == 0 { + items, err = r.repairArr(job, a, "") + if err != nil { + r.logger.Error().Err(err).Msgf("Error repairing %s", a.Name) + return err } } else { - for _, id := range j.MediaIDs { - if err := r.RepairArr(a, id); err != nil { - log.Printf("Error repairing %s: %v", a.Name, err) - errors <- err + for _, id := range job.MediaIDs { + // Check if any other goroutine has failed + select { + case <-ctx.Done(): + return ctx.Err() + default: } + + someItems, err := r.repairArr(job, a, id) + if err != nil { + r.logger.Error().Err(err).Msgf("Error repairing %s with ID %s", a.Name, id) + return err + } + items = append(items, someItems...) } } - }(a) + + // Safely append the found items to the shared slice + if len(items) > 0 { + mu.Lock() + brokenItems[a.Name] = items + mu.Unlock() + } + + return nil + }) } - wg.Wait() - close(errors) - err := <-errors - if err != nil { - j.FailedAt = time.Now() - j.Error = err.Error() + + // Wait for all goroutines to complete and check for errors + if err := g.Wait(); err != nil { + job.FailedAt = time.Now() + job.Error = err.Error() + job.Status = JobFailed + job.CompletedAt = time.Now() + go func() { + if err := request.SendDiscordMessage("repair_failed", "error", job.discordContext()); err != nil { + r.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() return err } - j.CompletedAt = time.Now() + + if len(brokenItems) == 0 { + job.CompletedAt = time.Now() + job.Status = JobCompleted + + go func() { + if err := request.SendDiscordMessage("repair_complete", "success", job.discordContext()); err != nil { + r.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() + + return nil + } + + job.BrokenItems = brokenItems + if job.AutoProcess { + // Job is already processed + job.CompletedAt = time.Now() // Mark as completed + job.Status = JobCompleted + go func() { + if err := request.SendDiscordMessage("repair_complete", "success", job.discordContext()); err != nil { + r.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() + } else { + job.Status = JobPending + go func() { + if err := request.SendDiscordMessage("repair_pending", "pending", job.discordContext()); err != nil { + r.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() + } return nil } @@ -138,8 +290,8 @@ func (r *Repair) Start(ctx context.Context) error { if r.runOnStart { r.logger.Info().Msgf("Running initial repair") go func() { - if err := r.Repair(r.arrs.GetAll(), []string{}); err != nil { - r.logger.Info().Msgf("Error during initial repair: %v", err) + if err := r.AddJob([]string{}, []string{}, r.autoProcess); err != nil { + r.logger.Error().Err(err).Msg("Error running initial repair") } }() } @@ -156,9 +308,8 @@ func (r *Repair) Start(ctx context.Context) error { return nil case t := <-ticker.C: r.logger.Info().Msgf("Running repair at %v", t.Format("15:04:05")) - err := r.Repair(r.arrs.GetAll(), []string{}) - if err != nil { - r.logger.Info().Msgf("Error during repair: %v", err) + if err := r.AddJob([]string{}, []string{}, r.autoProcess); err != nil { + r.logger.Error().Err(err).Msg("Error running repair") } // If using time-of-day schedule, reset the ticker for next day @@ -171,55 +322,78 @@ func (r *Repair) Start(ctx context.Context) error { } } -func (r *Repair) RepairArr(a *arr.Arr, tmdbId string) error { - - cfg := config.GetConfig() +func (r *Repair) repairArr(j *Job, a *arr.Arr, tmdbId string) ([]arr.ContentFile, error) { + brokenItems := make([]arr.ContentFile, 0) r.logger.Info().Msgf("Starting repair for %s", a.Name) media, err := a.GetMedia(tmdbId) if err != nil { r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err) - return err + return brokenItems, err } r.logger.Info().Msgf("Found %d %s media", len(media), a.Name) if len(media) == 0 { r.logger.Info().Msgf("No %s media found", a.Name) - return nil + return brokenItems, nil } // Check first media to confirm mounts are accessible if !r.isMediaAccessible(media[0]) { r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts") - return nil + return brokenItems, nil } - semaphore := make(chan struct{}, runtime.NumCPU()*4) - totalBrokenItems := 0 - var wg sync.WaitGroup + // Create a new error group + g, ctx := errgroup.WithContext(context.Background()) + + // Limit concurrent goroutines + g.SetLimit(runtime.NumCPU() * 4) + + // Mutex for brokenItems + var mu sync.Mutex + for _, m := range media { - wg.Add(1) - semaphore <- struct{}{} - go func(m arr.Content) { - defer wg.Done() - defer func() { <-semaphore }() - brokenItems := r.getBrokenFiles(m) - if brokenItems != nil { - r.logger.Debug().Msgf("Found %d broken files for %s", len(brokenItems), m.Title) - if !cfg.Repair.SkipDeletion { - if err := a.DeleteFiles(brokenItems); err != nil { - r.logger.Info().Msgf("Failed to delete broken items for %s: %v", m.Title, err) + m := m // Create a new variable scoped to the loop iteration + g.Go(func() error { + // Check if context was canceled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + items := r.getBrokenFiles(m) + if items != nil { + r.logger.Debug().Msgf("Found %d broken files for %s", len(items), m.Title) + if j.AutoProcess { + r.logger.Info().Msgf("Auto processing %d broken items for %s", len(items), m.Title) + + // Delete broken items + + if err := a.DeleteFiles(items); err != nil { + r.logger.Debug().Msgf("Failed to delete broken items for %s: %v", m.Title, err) + } + + // Search for missing items + if err := a.SearchMissing(items); err != nil { + r.logger.Debug().Msgf("Failed to search missing items for %s: %v", m.Title, err) } } - if err := a.SearchMissing(brokenItems); err != nil { - r.logger.Info().Msgf("Failed to search missing items for %s: %v", m.Title, err) - } - totalBrokenItems += len(brokenItems) + + mu.Lock() + brokenItems = append(brokenItems, items...) + mu.Unlock() } - }(m) + return nil + }) } - wg.Wait() - r.logger.Info().Msgf("Repair completed for %s. %d broken items found", a.Name, totalBrokenItems) - return nil + + if err := g.Wait(); err != nil { + return brokenItems, err + } + + r.logger.Info().Msgf("Repair completed for %s. %d broken items found", a.Name, len(brokenItems)) + return brokenItems, nil } func (r *Repair) isMediaAccessible(m arr.Content) bool { @@ -328,9 +502,13 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { brokenFiles = append(brokenFiles, f...) continue } - resp.Body.Close() + err = resp.Body.Close() + if err != nil { + return nil + } if resp.StatusCode != http.StatusOK { r.logger.Debug().Msgf("Failed to get download url for %s", fullURL) + resp.Body.Close() brokenFiles = append(brokenFiles, f...) continue } @@ -350,3 +528,129 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title) return brokenFiles } + +func (r *Repair) GetJob(id string) *Job { + for _, job := range r.Jobs { + if job.ID == id { + return job + } + } + return nil +} + +func (r *Repair) GetJobs() []*Job { + jobs := make([]*Job, 0) + for _, job := range r.Jobs { + jobs = append(jobs, job) + } + sort.Slice(jobs, func(i, j int) bool { + return jobs[i].StartedAt.After(jobs[j].StartedAt) + }) + + return jobs +} + +func (r *Repair) ProcessJob(id string) error { + job := r.GetJob(id) + if job == nil { + return fmt.Errorf("job %s not found", id) + } + if job.Status != JobPending { + return fmt.Errorf("job %s not pending", id) + } + if job.StartedAt.IsZero() { + return fmt.Errorf("job %s not started", id) + } + if !job.CompletedAt.IsZero() { + return fmt.Errorf("job %s already completed", id) + } + if !job.FailedAt.IsZero() { + return fmt.Errorf("job %s already failed", id) + } + + brokenItems := job.BrokenItems + if len(brokenItems) == 0 { + r.logger.Info().Msgf("No broken items found for job %s", id) + job.CompletedAt = time.Now() + job.Status = JobCompleted + return nil + } + + // Create a new error group + g := new(errgroup.Group) + + for arrName, items := range brokenItems { + items := items + arrName := arrName + g.Go(func() error { + a := r.arrs.Get(arrName) + if a == nil { + r.logger.Error().Msgf("Arr %s not found", arrName) + return nil + } + + if err := a.DeleteFiles(items); err != nil { + r.logger.Error().Err(err).Msgf("Failed to delete broken items for %s", arrName) + return nil + + } + // Search for missing items + if err := a.SearchMissing(items); err != nil { + r.logger.Error().Err(err).Msgf("Failed to search missing items for %s", arrName) + return nil + } + return nil + + }) + } + + if err := g.Wait(); err != nil { + job.FailedAt = time.Now() + job.Error = err.Error() + job.CompletedAt = time.Now() + job.Status = JobFailed + return err + } + + job.CompletedAt = time.Now() + job.Status = JobCompleted + + return nil +} + +func (r *Repair) saveToFile() { + // Save jobs to file + data, err := json.Marshal(r.Jobs) + if err != nil { + r.logger.Debug().Err(err).Msg("Failed to marshal jobs") + } + err = os.WriteFile(r.filename, data, 0644) +} + +func (r *Repair) loadFromFile() { + data, err := os.ReadFile(r.filename) + if err != nil && os.IsNotExist(err) { + r.Jobs = make(map[string]*Job) + return + } + jobs := make(map[string]*Job) + err = json.Unmarshal(data, &jobs) + if err != nil { + r.logger.Debug().Err(err).Msg("Failed to unmarshal jobs") + } + r.Jobs = jobs +} + +func (r *Repair) DeleteJobs(ids []string) { + for _, id := range ids { + if id == "" { + continue + } + for k, job := range r.Jobs { + if job.ID == id { + delete(r.Jobs, k) + } + } + } + go r.saveToFile() +} diff --git a/pkg/server/server.go b/pkg/server/server.go index 424c380..414a1d4 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -37,6 +37,10 @@ func New() *Server { func (s *Server) Start(ctx context.Context) error { cfg := config.GetConfig() // Register routes + // Register webhooks + s.router.Post("/webhooks/tautulli", s.handleTautulli) + + // Register logs s.router.Get("/logs", s.getLogs) port := fmt.Sprintf(":%s", cfg.QBitTorrent.Port) s.logger.Info().Msgf("Starting server on %s", port) diff --git a/pkg/server/webhook.go b/pkg/server/webhook.go new file mode 100644 index 0000000..cb3c620 --- /dev/null +++ b/pkg/server/webhook.go @@ -0,0 +1,54 @@ +package server + +import ( + "cmp" + "encoding/json" + "github.com/sirrobot01/debrid-blackhole/pkg/service" + "net/http" +) + +func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) { + // Verify it's a POST request + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // Parse the JSON body from Tautulli + var payload struct { + Type string `json:"type"` + TvdbID string `json:"tvdb_id"` + TmdbID string `json:"tmdb_id"` + Topic string `json:"topic"` + AutoProcess bool `json:"autoProcess"` + } + + if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { + s.logger.Error().Err(err).Msg("Failed to parse webhook body") + http.Error(w, "Failed to parse webhook body: "+err.Error(), http.StatusBadRequest) + return + } + + if payload.Topic != "tautulli" { + http.Error(w, "Invalid topic", http.StatusBadRequest) + return + } + + if payload.TmdbID == "" && payload.TvdbID == "" { + http.Error(w, "Invalid ID", http.StatusBadRequest) + return + } + svc := service.GetService() + repair := svc.Repair + + mediaId := cmp.Or(payload.TmdbID, payload.TvdbID) + + if repair == nil { + http.Error(w, "Repair service is not enabled", http.StatusInternalServerError) + return + } + if err := repair.AddJob([]string{}, []string{mediaId}, payload.AutoProcess); err != nil { + http.Error(w, "Failed to add job: "+err.Error(), http.StatusInternalServerError) + return + } +} diff --git a/pkg/service/service.go b/pkg/service/service.go index 7eb5315..104a7c6 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -24,7 +24,7 @@ func New() *Service { arrs := arr.NewStorage() deb := debrid.New() instance = &Service{ - Repair: repair.New(deb, arrs), + Repair: repair.New(arrs), Arr: arrs, Debrid: deb, } @@ -44,7 +44,7 @@ func Update() *Service { arrs := arr.NewStorage() deb := debrid.New() instance = &Service{ - Repair: repair.New(deb, arrs), + Repair: repair.New(arrs), Arr: arrs, Debrid: deb, } diff --git a/pkg/web/routes.go b/pkg/web/routes.go index 23d078e..db7924e 100644 --- a/pkg/web/routes.go +++ b/pkg/web/routes.go @@ -23,6 +23,9 @@ func (ui *Handler) Routes() http.Handler { r.Get("/arrs", ui.handleGetArrs) r.Post("/add", ui.handleAddContent) r.Post("/repair", ui.handleRepairMedia) + r.Get("/repair/jobs", ui.handleGetRepairJobs) + r.Post("/repair/jobs/{id}/process", ui.handleProcessRepairJob) + r.Delete("/repair/jobs", ui.handleDeleteRepairJob) r.Get("/torrents", ui.handleGetTorrents) r.Delete("/torrents/{category}/{hash}", ui.handleDeleteTorrent) r.Delete("/torrents/", ui.handleDeleteTorrents) diff --git a/pkg/web/ui.go b/pkg/web/ui.go index 591f0df..ec7a96e 100644 --- a/pkg/web/ui.go +++ b/pkg/web/ui.go @@ -46,9 +46,10 @@ type ContentResponse struct { } type RepairRequest struct { - ArrName string `json:"arr"` - MediaIds []string `json:"mediaIds"` - Async bool `json:"async"` + ArrName string `json:"arr"` + MediaIds []string `json:"mediaIds"` + Async bool `json:"async"` + AutoProcess bool `json:"autoProcess"` } //go:embed web/* @@ -383,7 +384,7 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) { if req.Async { go func() { - if err := svc.Repair.Repair([]*arr.Arr{_arr}, req.MediaIds); err != nil { + if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess); err != nil { ui.logger.Error().Err(err).Msg("Failed to repair media") } }() @@ -391,7 +392,7 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) { return } - if err := svc.Repair.Repair([]*arr.Arr{_arr}, req.MediaIds); err != nil { + if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess); err != nil { http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError) return @@ -441,3 +442,41 @@ func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) { cfg.Arrs = arrCfgs request.JSONResponse(w, cfg, http.StatusOK) } + +func (ui *Handler) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) { + svc := service.GetService() + request.JSONResponse(w, svc.Repair.GetJobs(), http.StatusOK) +} + +func (ui *Handler) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) { + id := chi.URLParam(r, "id") + if id == "" { + http.Error(w, "No job ID provided", http.StatusBadRequest) + return + } + svc := service.GetService() + if err := svc.Repair.ProcessJob(id); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) +} + +func (ui *Handler) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) { + // Read ids from body + var req struct { + IDs []string `json:"ids"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if len(req.IDs) == 0 { + http.Error(w, "No job IDs provided", http.StatusBadRequest) + return + } + + svc := service.GetService() + svc.Repair.DeleteJobs(req.IDs) + w.WriteHeader(http.StatusOK) +} diff --git a/pkg/web/web/config.html b/pkg/web/web/config.html index e4b5dbc..331a5f5 100644 --- a/pkg/web/web/config.html +++ b/pkg/web/web/config.html @@ -27,14 +27,27 @@