- Add more limit to number of gorutines

- Add gorutine stats to logs
- Fix issues with repair
This commit is contained in:
Mukhtar Akere
2025-03-27 08:24:40 +01:00
parent 7bd38736b1
commit d49fbea60f
11 changed files with 163 additions and 139 deletions

View File

@@ -24,9 +24,11 @@ import (
type WebDavFolderNaming string
const (
WebDavUseFileName WebDavFolderNaming = "filename"
WebDavUseOriginalName WebDavFolderNaming = "original"
WebDavUseID WebDavFolderNaming = "use_id"
WebDavUseFileNameNoExt WebDavFolderNaming = "filename_no_ext"
WebDavUseOriginalNameNoExt WebDavFolderNaming = "original_no_ext"
WebDavUseID WebDavFolderNaming = "id"
)
type PropfindResponse struct {
@@ -78,6 +80,8 @@ type Cache struct {
listingRefreshMu sync.RWMutex // for refreshing torrents
downloadLinksRefreshMu sync.RWMutex // for refreshing download links
torrentsRefreshMu sync.RWMutex // for refreshing torrents
saveSemaphore chan struct{}
}
func NewCache(dc config.Debrid, client types.Client) *Cache {
@@ -99,7 +103,7 @@ func NewCache(dc config.Debrid, client types.Client) *Cache {
torrents: xsync.NewMapOf[string, *CachedTorrent](),
torrentsNames: xsync.NewMapOf[string, *CachedTorrent](),
client: client,
logger: logger.NewLogger(fmt.Sprintf("%s-cache", client.GetName())),
logger: logger.NewLogger(fmt.Sprintf("%s-webdav", client.GetName())),
workers: 200,
downloadLinks: xsync.NewMapOf[string, downloadLinkCache](),
torrentRefreshInterval: torrentRefreshInterval,
@@ -108,17 +112,25 @@ func NewCache(dc config.Debrid, client types.Client) *Cache {
folderNaming: WebDavFolderNaming(dc.WebDavFolderNaming),
autoExpiresLinksAfter: autoExpiresLinksAfter,
repairsInProgress: xsync.NewMapOf[string, bool](),
saveSemaphore: make(chan struct{}, 10),
}
}
func (c *Cache) GetTorrentFolder(torrent *types.Torrent) string {
folderName := torrent.Filename
if c.folderNaming == WebDavUseID {
folderName = torrent.Id
} else if c.folderNaming == WebDavUseOriginalNameNoExt {
folderName = utils.RemoveExtension(folderName)
switch c.folderNaming {
case WebDavUseFileName:
return torrent.Filename
case WebDavUseOriginalName:
return torrent.OriginalFilename
case WebDavUseFileNameNoExt:
return utils.RemoveExtension(torrent.Filename)
case WebDavUseOriginalNameNoExt:
return utils.RemoveExtension(torrent.OriginalFilename)
case WebDavUseID:
return torrent.Id
default:
return torrent.Filename
}
return folderName
}
func (c *Cache) setTorrent(t *CachedTorrent) {
@@ -126,11 +138,7 @@ func (c *Cache) setTorrent(t *CachedTorrent) {
c.torrentsNames.Store(c.GetTorrentFolder(t.Torrent), t)
go func() {
if err := c.SaveTorrent(t); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
}
}()
c.SaveTorrent(t)
}
func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) {
@@ -141,11 +149,7 @@ func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) {
c.refreshListings()
go func() {
if err := c.SaveTorrents(); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to save torrents")
}
}()
c.SaveTorrents()
}
func (c *Cache) GetListing() []os.FileInfo {
@@ -260,20 +264,31 @@ func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
return nil
}
func (c *Cache) SaveTorrents() error {
func (c *Cache) SaveTorrents() {
c.torrents.Range(func(key string, value *CachedTorrent) bool {
if err := c.SaveTorrent(value); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", key)
}
c.SaveTorrent(value)
return true
})
return nil
}
func (c *Cache) SaveTorrent(ct *CachedTorrent) error {
func (c *Cache) SaveTorrent(ct *CachedTorrent) {
// Try to acquire semaphore without blocking
select {
case c.saveSemaphore <- struct{}{}:
go func() {
defer func() { <-c.saveSemaphore }()
c.saveTorrent(ct)
}()
default:
c.saveTorrent(ct)
}
}
func (c *Cache) saveTorrent(ct *CachedTorrent) {
data, err := json.MarshalIndent(ct, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal torrent: %w", err)
c.logger.Debug().Err(err).Msgf("Failed to marshal torrent: %s", ct.Id)
return
}
fileName := ct.Torrent.Id + ".json"
@@ -282,20 +297,25 @@ func (c *Cache) SaveTorrent(ct *CachedTorrent) error {
f, err := os.Create(tmpFile)
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
c.logger.Debug().Err(err).Msgf("Failed to create file: %s", tmpFile)
return
}
defer f.Close()
w := bufio.NewWriter(f)
if _, err := w.Write(data); err != nil {
return fmt.Errorf("failed to write data: %w", err)
c.logger.Debug().Err(err).Msgf("Failed to write data: %s", tmpFile)
return
}
if err := w.Flush(); err != nil {
return fmt.Errorf("failed to flush data: %w", err)
c.logger.Debug().Err(err).Msgf("Failed to flush data: %s", tmpFile)
}
return os.Rename(tmpFile, filePath)
if err := os.Rename(tmpFile, filePath); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to rename file: %s", tmpFile)
}
return
}
func (c *Cache) Sync() error {
@@ -508,11 +528,7 @@ func (c *Cache) GenerateDownloadLinks(t *CachedTorrent) {
c.updateDownloadLink(file.Link, file.DownloadLink)
}
go func() {
if err := c.SaveTorrent(t); err != nil {
c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", t.Id)
}
}()
c.SaveTorrent(t)
}
func (c *Cache) AddTorrent(t *types.Torrent) error {
@@ -559,7 +575,7 @@ func (c *Cache) DeleteTorrent(id string) {
if t, ok := c.torrents.Load(id); ok {
c.torrents.Delete(id)
c.torrentsNames.Delete(c.GetTorrentFolder(t.Torrent))
go c.removeFromDB(id)
c.removeFromDB(id)
c.refreshListings()
}
}
@@ -570,7 +586,7 @@ func (c *Cache) DeleteTorrents(ids []string) {
if t, ok := c.torrents.Load(id); ok {
c.torrents.Delete(id)
c.torrentsNames.Delete(c.GetTorrentFolder(t.Torrent))
go c.removeFromDB(id)
c.removeFromDB(id)
}
}
c.refreshListings()
@@ -585,6 +601,10 @@ func (c *Cache) removeFromDB(torrentId string) {
func (c *Cache) OnRemove(torrentId string) {
c.logger.Debug().Msgf("OnRemove triggered for %s", torrentId)
go c.DeleteTorrent(torrentId)
go c.refreshListings()
c.DeleteTorrent(torrentId)
c.refreshListings()
}
func (c *Cache) GetLogger() zerolog.Logger {
return c.logger
}

View File

@@ -1,10 +1,12 @@
package debrid
import (
"context"
"fmt"
"github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/types"
"golang.org/x/sync/errgroup"
"io"
"net/http"
"os"
@@ -12,7 +14,6 @@ import (
"slices"
"sort"
"strings"
"sync"
"time"
)
@@ -148,20 +149,28 @@ func (c *Cache) refreshTorrents() {
}
c.logger.Info().Msgf("Found %d new torrents", len(newTorrents))
// No need for a complex sync process, just add the new torrents
wg := sync.WaitGroup{}
wg.Add(len(newTorrents))
g, ctx := errgroup.WithContext(context.Background())
for _, t := range newTorrents {
// ProcessTorrent is concurrent safe
go func() {
defer wg.Done()
if err := c.ProcessTorrent(t, true); err != nil {
c.logger.Info().Err(err).Msg("Failed to process torrent")
t := t
g.Go(func() error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
}()
if err := c.ProcessTorrent(t, true); err != nil {
return err
}
return nil
})
}
wg.Wait()
if err := g.Wait(); err != nil {
c.logger.Debug().Err(err).Msg("Failed to process new torrents")
}
}
func (c *Cache) RefreshRclone() error {

View File

@@ -41,22 +41,15 @@ func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool {
break
} else {
// Check if file.Link not in the downloadLink Cache
if _, ok := c.downloadLinks.Load(f.Link); !ok {
// File not in cache
// Check link
if err := c.client.CheckLink(f.Link); err != nil {
if errors.Is(err, request.ErrLinkBroken) {
isBroken = true
break
} else {
// This might just be a temporary error
}
if err := c.client.CheckLink(f.Link); err != nil {
if errors.Is(err, request.ErrLinkBroken) {
isBroken = true
break
} else {
// Generate a new download link?
// This might just be a temporary error
}
} else {
// Link is in cache
// We might skip checking for now, it seems rd removes uncached links
// Generate a new download link?
}
}
}

View File

@@ -445,35 +445,20 @@ func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
}
// Prepare for concurrent fetching
var wg sync.WaitGroup
var mu sync.Mutex
var fetchError error
// Calculate how many more requests we need
batchCount := (remaining + limit - 1) / limit // ceiling division
for i := 1; i <= batchCount; i++ {
wg.Add(1)
go func(batchOffset int) {
defer wg.Done()
_, batch, err := r.getTorrents(batchOffset, limit)
if err != nil {
mu.Lock()
fetchError = err
mu.Unlock()
return
}
mu.Lock()
allTorrents = append(allTorrents, batch...)
mu.Unlock()
}(i * limit)
_, batch, err := r.getTorrents(i*limit, limit)
if err != nil {
fetchError = err
continue
}
allTorrents = append(allTorrents, batch...)
}
// Wait for all fetches to complete
wg.Wait()
if fetchError != nil {
return nil, fetchError
}