- Add more indepth stats like number of torrents, profile details etc

- Add torrent ingest endpoints
- Add issue template
This commit is contained in:
Mukhtar Akere
2025-05-29 04:05:44 +01:00
parent f9c49cbbef
commit 1cd09239f9
25 changed files with 411 additions and 369 deletions

76
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View File

@@ -0,0 +1,76 @@
name: Bug Report
description: 'Report a new bug'
labels: ['Type: Bug', 'Status: Needs Triage']
body:
- type: checkboxes
attributes:
label: Is there an existing issue for this?
description: Please search to see if an open or closed issue already exists for the bug you encountered. If a bug exists and is closed note that it may only be fixed in an unstable branch.
options:
- label: I have searched the existing open and closed issues
required: true
- type: textarea
attributes:
label: Current Behavior
description: A concise description of what you're experiencing.
validations:
required: true
- type: textarea
attributes:
label: Expected Behavior
description: A concise description of what you expected to happen.
validations:
required: true
- type: textarea
attributes:
label: Steps To Reproduce
description: Steps to reproduce the behavior.
placeholder: |
1. In this environment...
2. With this config...
3. Run '...'
4. See error...
validations:
required: false
- type: textarea
attributes:
label: Environment
description: |
examples:
- **OS**: Ubuntu 20.04
- **Version**: v1.0.0
- **Docker Install**: Yes
- **Browser**: Firefox 90 (If UI related)
value: |
- OS:
- Version:
- Docker Install:
- Browser:
render: markdown
validations:
required: true
- type: dropdown
attributes:
label: What branch are you running?
options:
- Main/Latest
- Beta
- Experimental
validations:
required: true
- type: textarea
attributes:
label: Trace Logs? **Not Optional**
description: |
Trace Logs
- are **required** for bug reports
- are not optional
validations:
required: true
- type: checkboxes
attributes:
label: Trace Logs have been provided as applicable
description: Trace logs are **generally required** and are not optional for all bug reports and contain `trace`. Info logs are invalid for bug reports and do not contain `debug` nor `trace`
options:
- label: I have read and followed the steps in the wiki link above and provided the required trace logs - the logs contain `trace` - that are relevant and show this issue.
required: true

View File

@@ -0,0 +1,38 @@
name: Feature Request
description: 'Suggest an idea for Decypharr'
labels: ['Type: Feature Request', 'Status: Needs Triage']
body:
- type: checkboxes
attributes:
label: Is there an existing issue for this?
description: Please search to see if an open or closed issue already exists for the feature you are requesting. If a request exists and is closed note that it may only be fixed in an unstable branch.
options:
- label: I have searched the existing open and closed issues
required: true
- type: textarea
attributes:
label: Is your feature request related to a problem? Please describe
description: A clear and concise description of what the problem is.
validations:
required: true
- type: textarea
attributes:
label: Describe the solution you'd like
description: A clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
attributes:
label: Describe alternatives you've considered
description: A clear and concise description of any alternative solutions or features you've considered.
validations:
required: true
- type: textarea
attributes:
label: Anything else?
description: |
Links? References? Mockups? Anything that will give us more context about the feature you are encountering!
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
validations:
required: true

View File

@@ -36,14 +36,9 @@ services:
container_name: decypharr
ports:
- "8282:8282" # qBittorrent
user: "1000:1000"
volumes:
- /mnt/:/mnt
- ./configs/:/app # config.json must be in this directory
environment:
- PUID=1000
- PGID=1000
- UMASK=002
restart: unless-stopped
```

View File

@@ -52,14 +52,9 @@ services:
container_name: decypharr
ports:
- "8282:8282"
user: "1000:1000"
volumes:
- /mnt/:/mnt # Mount your media directory
- ./config/:/app # config.json must be in this directory
environment:
- PUID=1000
- PGID=1000
- UMASK=002
- QBIT_PORT=8282 # qBittorrent Port (optional)
restart: unless-stopped
```
@@ -69,6 +64,13 @@ Run the Docker Compose setup:
docker-compose up -d
```
#### Notes for Docker Users
- Ensure that the `/mnt/` directory is mounted correctly to access your media files.
- The `./config/` directory should contain your `config.json` file.
- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions.
- The `UMASK` environment variable can be set to control file permissions created by Decypharr.
## Binary Installation
If you prefer not to use Docker, you can download and run the binary directly.
@@ -107,10 +109,4 @@ You can also configure Decypharr through the web interface, but it's recommended
"log_level": "info",
"port": "8282"
}
```
### Few Notes
- Make sure decypharr has access to the directories specified in the configuration file.
- Ensure decypharr have write permissions to the qbittorrent download folder.
- Make sure decypharr can write to the `./config/` directory.
```

View File

@@ -2,7 +2,6 @@ package request
import (
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"encoding/json"
@@ -383,31 +382,6 @@ func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
}
}
func Gzip(body []byte) []byte {
if len(body) == 0 {
return nil
}
// Check if the pool is nil
buf := bytes.NewBuffer(make([]byte, 0, len(body)))
gz, err := gzip.NewWriterLevel(buf, gzip.BestSpeed)
if err != nil {
return nil
}
if _, err := gz.Write(body); err != nil {
return nil
}
if err := gz.Close(); err != nil {
return nil
}
result := make([]byte, buf.Len())
copy(result, buf.Bytes())
return result
}
func Default() *Client {
once.Do(func() {
instance = New()
@@ -435,7 +409,7 @@ func isRetryableError(err error) bool {
var netErr net.Error
if errors.As(err, &netErr) {
// Retry on timeout errors and temporary errors
return netErr.Timeout() || netErr.Temporary()
return netErr.Timeout()
}
// Not a retryable error

View File

@@ -31,7 +31,11 @@ type AllDebrid struct {
addSamples bool
}
func New(dc config.Debrid) *AllDebrid {
func (ad *AllDebrid) GetProfile() (*types.Profile, error) {
return nil, nil
}
func New(dc config.Debrid) (*AllDebrid, error) {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
@@ -65,7 +69,7 @@ func New(dc config.Debrid) *AllDebrid {
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
}, nil
}
func (ad *AllDebrid) GetName() string {

View File

@@ -13,7 +13,7 @@ import (
"strings"
)
func createDebridClient(dc config.Debrid) types.Client {
func createDebridClient(dc config.Debrid) (types.Client, error) {
switch dc.Name {
case "realdebrid":
return realdebrid.New(dc)

View File

@@ -52,6 +52,12 @@ func (c *downloadLinkCache) Delete(key string) {
delete(c.data, key)
}
func (c *downloadLinkCache) Len() int {
c.mu.Lock()
defer c.mu.Unlock()
return len(c.data)
}
type downloadLinkRequest struct {
result string
err error
@@ -245,3 +251,7 @@ func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, e
file := ct.Files[filename]
return file.ByteRange, nil
}
func (c *Cache) GetTotalActiveDownloadLinks() int {
return c.downloadLinks.Len()
}

View File

@@ -2,6 +2,7 @@ package debrid
import (
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
)
@@ -10,7 +11,7 @@ type Engine struct {
Clients map[string]types.Client
clientsMu sync.Mutex
Caches map[string]*Cache
CacheMu sync.Mutex
cacheMu sync.Mutex
LastUsed string
}
@@ -18,16 +19,22 @@ func NewEngine() *Engine {
cfg := config.Get()
clients := make(map[string]types.Client)
_logger := logger.Default()
caches := make(map[string]*Cache)
for _, dc := range cfg.Debrids {
client := createDebridClient(dc)
logger := client.GetLogger()
client, err := createDebridClient(dc)
if err != nil {
_logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client")
continue
}
_log := client.GetLogger()
if dc.UseWebDav {
caches[dc.Name] = New(dc, client)
logger.Info().Msg("Debrid Service started with WebDAV")
_log.Info().Msg("Debrid Service started with WebDAV")
} else {
logger.Info().Msg("Debrid Service started")
_log.Info().Msg("Debrid Service started")
}
clients[dc.Name] = client
}
@@ -51,9 +58,9 @@ func (d *Engine) Reset() {
d.Clients = make(map[string]types.Client)
d.clientsMu.Unlock()
d.CacheMu.Lock()
d.cacheMu.Lock()
d.Caches = make(map[string]*Cache)
d.CacheMu.Unlock()
d.cacheMu.Unlock()
}
func (d *Engine) GetDebrids() map[string]types.Client {

View File

@@ -25,3 +25,18 @@ func mergeFiles(torrents ...CachedTorrent) map[string]types.File {
}
return merged
}
func (c *Cache) GetIngests() ([]types.IngestData, error) {
torrents := c.GetTorrents()
debridName := c.client.GetName()
var ingests []types.IngestData
for _, torrent := range torrents {
ingests = append(ingests, types.IngestData{
Debrid: debridName,
Name: torrent.Filename,
Hash: torrent.InfoHash,
Size: torrent.Bytes,
})
}
return ingests, nil
}

View File

@@ -31,6 +31,48 @@ type DebridLink struct {
addSamples bool
}
func New(dc config.Debrid) (*DebridLink, error) {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
}
_log := logger.New(dc.Name)
client := request.New(
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithRateLimiter(rl),
request.WithProxy(dc.Proxy),
)
accounts := make(map[string]types.Account)
for idx, key := range dc.DownloadAPIKeys {
id := strconv.Itoa(idx)
accounts[id] = types.Account{
Name: key,
ID: id,
Token: key,
}
}
return &DebridLink{
Name: "debridlink",
Host: "https://debrid-link.com/api/v2",
APIKey: dc.APIKey,
accounts: accounts,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}, nil
}
func (dl *DebridLink) GetProfile() (*types.Profile, error) {
return nil, nil
}
func (dl *DebridLink) GetName() string {
return dl.Name
}
@@ -335,44 +377,6 @@ func (dl *DebridLink) GetDownloadUncached() bool {
return dl.DownloadUncached
}
func New(dc config.Debrid) *DebridLink {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
}
_log := logger.New(dc.Name)
client := request.New(
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithRateLimiter(rl),
request.WithProxy(dc.Proxy),
)
accounts := make(map[string]types.Account)
for idx, key := range dc.DownloadAPIKeys {
id := strconv.Itoa(idx)
accounts[id] = types.Account{
Name: key,
ID: id,
Token: key,
}
}
return &DebridLink{
Name: "debridlink",
Host: "https://debrid-link.com/api/v2",
APIKey: dc.APIKey,
accounts: accounts,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
}
func (dl *DebridLink) GetTorrents() ([]*types.Torrent, error) {
page := 0
perPage := 100

View File

@@ -45,9 +45,10 @@ type RealDebrid struct {
rarSemaphore chan struct{}
checkCached bool
addSamples bool
Profile *types.Profile
}
func New(dc config.Debrid) *RealDebrid {
func New(dc config.Debrid) (*RealDebrid, error) {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
@@ -70,7 +71,7 @@ func New(dc config.Debrid) *RealDebrid {
"Authorization": fmt.Sprintf("Bearer %s", currentDownloadKey),
}
return &RealDebrid{
r := &RealDebrid{
Name: "realdebrid",
Host: "https://api.real-debrid.com/rest/1.0",
APIKey: dc.APIKey,
@@ -99,6 +100,12 @@ func New(dc config.Debrid) *RealDebrid {
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
if _, err := r.GetProfile(); err != nil {
return nil, err
} else {
return r, nil
}
}
func (r *RealDebrid) GetName() string {
@@ -908,3 +915,30 @@ func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
}
return nil
}
func (r *RealDebrid) GetProfile() (*types.Profile, error) {
if r.Profile != nil {
return r.Profile, nil
}
url := fmt.Sprintf("%s/user", r.Host)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
return nil, err
}
var data profileResponse
if json.Unmarshal(resp, &data) != nil {
return nil, err
}
profile := &types.Profile{
Id: data.Id,
Username: data.Username,
Email: data.Email,
Points: data.Points,
Premium: data.Premium,
Expiration: data.Expiration,
Type: data.Type,
}
return profile, nil
}

View File

@@ -139,3 +139,15 @@ type ErrorResponse struct {
Error string `json:"error"`
ErrorCode int `json:"error_code"`
}
type profileResponse struct {
Id int64 `json:"id"`
Username string `json:"username"`
Email string `json:"email"`
Points int64 `json:"points"`
Locale string `json:"locale"`
Avatar string `json:"avatar"`
Type string `json:"type"`
Premium int `json:"premium"`
Expiration time.Time `json:"expiration"`
}

View File

@@ -37,7 +37,11 @@ type Torbox struct {
addSamples bool
}
func New(dc config.Debrid) *Torbox {
func (tb *Torbox) GetProfile() (*types.Profile, error) {
return nil, nil
}
func New(dc config.Debrid) (*Torbox, error) {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
@@ -73,7 +77,7 @@ func New(dc config.Debrid) *Torbox {
logger: _log,
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
}, nil
}
func (tb *Torbox) GetName() string {

View File

@@ -25,4 +25,5 @@ type Client interface {
DisableAccount(string)
ResetActiveDownloadKeys()
DeleteDownloadLink(linkId string) error
GetProfile() (*Profile, error)
}

View File

@@ -125,3 +125,25 @@ type Account struct {
Name string `json:"name"`
Token string `json:"token"`
}
type IngestData struct {
Debrid string `json:"debrid"`
Name string `json:"name"`
Hash string `json:"hash"`
Size int64 `json:"size"`
}
type Profile struct {
Name string `json:"name"`
Id int64 `json:"id"`
Username string `json:"username"`
Email string `json:"email"`
Points int64 `json:"points"`
Type string `json:"type"`
Premium int `json:"premium"`
Expiration time.Time `json:"expiration"`
LibrarySize int `json:"library_size"`
BadTorrents int `json:"bad_torrents"`
ActiveLinks int `json:"active_links"`
}

View File

@@ -309,70 +309,6 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, t
return symlinkPath, nil
}
func (q *QBit) createSymlinks(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) {
files := debridTorrent.Files
symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err := os.MkdirAll(symlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
}
remainingFiles := make(map[string]debrid.File)
for _, file := range files {
remainingFiles[file.Path] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(files))
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
entries, err := os.ReadDir(rclonePath)
if err != nil {
continue
}
// Check which files exist in this batch
for _, entry := range entries {
filename := entry.Name()
if file, exists := remainingFiles[filename]; exists {
fullFilePath := filepath.Join(rclonePath, filename)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
q.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
}
}
if q.SkipPreCache {
return symlinkPath, nil
}
go func() {
if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil {
q.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
q.logger.Trace().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
return symlinkPath, nil
}
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
for {
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)

View File

@@ -1,159 +0,0 @@
package repair
//func (r *Repair) clean(job *Job) error {
// // Create a new error group
// g, ctx := errgroup.WithContext(context.Background())
//
// uniqueItems := make(map[string]string)
// mu := sync.Mutex{}
//
// // Limit concurrent goroutines
// g.SetLimit(10)
//
// for _, a := range job.Arrs {
// a := a // Capture range variable
// g.Go(func() error {
// // Check if context was canceled
// select {
// case <-ctx.Done():
// return ctx.Err()
// default:
// }
//
// items, err := r.cleanArr(job, a, "")
// if err != nil {
// r.logger.Error().Err(err).Msgf("Error cleaning %s", a)
// return err
// }
//
// // Safely append the found items to the shared slice
// if len(items) > 0 {
// mu.Lock()
// for k, v := range items {
// uniqueItems[k] = v
// }
// mu.Unlock()
// }
//
// return nil
// })
// }
//
// if err := g.Wait(); err != nil {
// return err
// }
//
// if len(uniqueItems) == 0 {
// job.CompletedAt = time.Now()
// job.Status = JobCompleted
//
// go func() {
// if err := request.SendDiscordMessage("repair_clean_complete", "success", job.discordContext()); err != nil {
// r.logger.Error().Msgf("Error sending discord message: %v", err)
// }
// }()
//
// return nil
// }
//
// cache := r.deb.Caches["realdebrid"]
// if cache == nil {
// return fmt.Errorf("cache not found")
// }
// torrents := cache.GetTorrents()
//
// dangling := make([]string, 0)
// for _, t := range torrents {
// if _, ok := uniqueItems[t.Name]; !ok {
// dangling = append(dangling, t.Id)
// }
// }
//
// r.logger.Info().Msgf("Found %d delapitated items", len(dangling))
//
// if len(dangling) == 0 {
// job.CompletedAt = time.Now()
// job.Status = JobCompleted
// return nil
// }
//
// client := r.deb.Clients["realdebrid"]
// if client == nil {
// return fmt.Errorf("client not found")
// }
// for _, id := range dangling {
// err := client.DeleteTorrent(id)
// if err != nil {
// return err
// }
// }
//
// return nil
//}
//
//func (r *Repair) cleanArr(j *Job, _arr string, tmdbId string) (map[string]string, error) {
// uniqueItems := make(map[string]string)
// a := r.arrs.Get(_arr)
//
// r.logger.Info().Msgf("Starting repair for %s", a.Name)
// media, err := a.GetMedia(tmdbId)
// if err != nil {
// r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err)
// return uniqueItems, err
// }
//
// // Create a new error group
// g, ctx := errgroup.WithContext(context.Background())
//
// mu := sync.Mutex{}
//
// // Limit concurrent goroutines
// g.SetLimit(runtime.NumCPU() * 4)
//
// for _, m := range media {
// m := m // Create a new variable scoped to the loop iteration
// g.Go(func() error {
// // Check if context was canceled
// select {
// case <-ctx.Done():
// return ctx.Err()
// default:
// }
//
// u := r.getUniquePaths(m)
// for k, v := range u {
// mu.Lock()
// uniqueItems[k] = v
// mu.Unlock()
// }
// return nil
// })
// }
//
// if err := g.Wait(); err != nil {
// return uniqueItems, err
// }
//
// r.logger.Info().Msgf("Repair completed for %s. %d unique items", a.Name, len(uniqueItems))
// return uniqueItems, nil
//}
//func (r *Repair) getUniquePaths(media arr.Content) map[string]string {
// // Use zurg setup to check file availability with zurg
// // This reduces bandwidth usage significantly
//
// uniqueParents := make(map[string]string)
// files := media.Files
// for _, file := range files {
// target := getSymlinkTarget(file.Path)
// if target != "" {
// file.IsSymlink = true
// dir, f := filepath.Split(target)
// parent := filepath.Base(filepath.Clean(dir))
// // Set target path folder/file.mkv
// file.TargetPath = f
// uniqueParents[parent] = target
// }
// }
// return uniqueParents
//}

116
pkg/server/debug.go Normal file
View File

@@ -0,0 +1,116 @@
package server
import (
"fmt"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/request"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/service"
"net/http"
"runtime"
)
func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) {
ingests := make([]debridTypes.IngestData, 0)
svc := service.GetService()
if svc.Debrid == nil {
http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError)
return
}
for _, cache := range svc.Debrid.Caches {
if cache == nil {
s.logger.Error().Msg("Debrid cache is nil, skipping")
continue
}
data, err := cache.GetIngests()
if err != nil {
s.logger.Error().Err(err).Msg("Failed to get ingests from debrid cache")
http.Error(w, "Failed to get ingests: "+err.Error(), http.StatusInternalServerError)
return
}
ingests = append(ingests, data...)
}
request.JSONResponse(w, ingests, 200)
}
func (s *Server) handleIngestsByDebrid(w http.ResponseWriter, r *http.Request) {
debridName := chi.URLParam(r, "debrid")
if debridName == "" {
http.Error(w, "Debrid name is required", http.StatusBadRequest)
return
}
svc := service.GetService()
if svc.Debrid == nil {
http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError)
return
}
cache, exists := svc.Debrid.Caches[debridName]
if !exists {
http.Error(w, "Debrid cache not found: "+debridName, http.StatusNotFound)
return
}
data, err := cache.GetIngests()
if err != nil {
s.logger.Error().Err(err).Msg("Failed to get ingests from debrid cache")
http.Error(w, "Failed to get ingests: "+err.Error(), http.StatusInternalServerError)
return
}
request.JSONResponse(w, data, 200)
}
func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
stats := map[string]any{
// Memory stats
"heap_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.HeapAlloc)/1024/1024),
"total_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.TotalAlloc)/1024/1024),
"memory_used": fmt.Sprintf("%.2fMB", float64(memStats.Sys)/1024/1024),
// GC stats
"gc_cycles": memStats.NumGC,
// Goroutine stats
"goroutines": runtime.NumGoroutine(),
// System info
"num_cpu": runtime.NumCPU(),
// OS info
"os": runtime.GOOS,
"arch": runtime.GOARCH,
"go_version": runtime.Version(),
}
svc := service.GetService()
if svc.Debrid == nil {
request.JSONResponse(w, stats, http.StatusOK)
return
}
clients := svc.Debrid.GetDebrids()
profiles := make([]*debridTypes.Profile, 0)
for debridName, client := range clients {
profile, err := client.GetProfile()
profile.Name = debridName
if err != nil {
s.logger.Error().Err(err).Msg("Failed to get debrid profile")
continue
}
cache, ok := svc.Debrid.Caches[debridName]
if ok {
// Get torrent data
profile.LibrarySize = len(cache.GetTorrents())
profile.BadTorrents = len(cache.GetListing("__bad__"))
profile.ActiveLinks = cache.GetTotalActiveDownloadLinks()
}
profiles = append(profiles, profile)
}
stats["debrids"] = profiles
request.JSONResponse(w, stats, http.StatusOK)
}

View File

@@ -9,12 +9,10 @@ import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"io"
"net/http"
"net/url"
"os"
"runtime"
)
type Server struct {
@@ -45,8 +43,12 @@ func New(handlers map[string]http.Handler) *Server {
//logs
r.Get("/logs", s.getLogs)
//stats
r.Get("/stats", s.getStats)
//debugs
r.Route("/debug", func(r chi.Router) {
r.Get("/stats", s.handleStats)
r.Get("/ingests", s.handleIngests)
r.Get("/ingests/{debrid}", s.handleIngestsByDebrid)
})
//webhooks
r.Post("/webhooks/tautulli", s.handleTautulli)
@@ -108,29 +110,3 @@ func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) {
return
}
}
func (s *Server) getStats(w http.ResponseWriter, r *http.Request) {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
stats := map[string]interface{}{
// Memory stats
"heap_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.HeapAlloc)/1024/1024),
"total_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.TotalAlloc)/1024/1024),
"memory_used": fmt.Sprintf("%.2fMB", float64(memStats.Sys)/1024/1024),
// GC stats
"gc_cycles": memStats.NumGC,
// Goroutine stats
"goroutines": runtime.NumGoroutine(),
// System info
"num_cpu": runtime.NumCPU(),
// OS info
"os": runtime.GOOS,
"arch": runtime.GOARCH,
"go_version": runtime.Version(),
}
request.JSONResponse(w, stats, http.StatusOK)
}

View File

@@ -37,12 +37,6 @@ func Reset() {
if instance.Debrid != nil {
instance.Debrid.Reset()
}
if instance.Arr != nil {
//instance.Arr.Reset()
}
if instance.Repair != nil {
//instance.Repair.Reset()
}
}
once = sync.Once{}
instance = nil

View File

@@ -169,7 +169,7 @@
<i class="bi bi-sun-fill" id="lightIcon"></i>
<i class="bi bi-moon-fill d-none" id="darkIcon"></i>
</div>
<a href="{{.URLBase}}stats" class="me-2">
<a href="{{.URLBase}}debug/stats" class="me-2">
<i class="bi bi-bar-chart-line me-1"></i>Stats
</a>
<span class="badge bg-primary" id="version-badge">Loading...</span>

View File

@@ -31,7 +31,6 @@ type File struct {
cache *debrid.Cache
fileId string
torrentName string
torrentId string
modTime time.Time
@@ -47,7 +46,6 @@ type File struct {
downloadLink string
link string
canDelete bool
}
// File interface implementations for File

View File

@@ -326,7 +326,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
},
}
handler.ServeHTTP(w, r)
return
}
func getContentType(fileName string) string {

View File

@@ -11,16 +11,6 @@ import (
"time"
)
// getName: Returns the torrent name and filename from the path
func getName(rootDir, path string) (string, string) {
path = strings.TrimPrefix(path, rootDir)
parts := strings.Split(strings.TrimPrefix(path, string(os.PathSeparator)), string(os.PathSeparator))
if len(parts) < 2 {
return "", ""
}
return parts[1], strings.Join(parts[2:], string(os.PathSeparator)) // Note the change from [0] to [1]
}
func isValidURL(str string) bool {
u, err := url.Parse(str)
// A valid URL should parse without error, and have a non-empty scheme and host.