- Add support for multi-season imports

- Improve in-memoery storage, whic reduces memory usage
- Fix issues with rclone integration
This commit is contained in:
Mukhtar Akere
2025-08-24 16:25:37 +01:00
parent f8667938b6
commit 618eb73067
10 changed files with 979 additions and 418 deletions

View File

@@ -979,8 +979,8 @@ func (r *RealDebrid) SyncAccounts() error {
if len(r.accounts.Active()) == 0 {
return nil
}
for idx, account := range r.accounts.Active() {
if err := r.syncAccount(idx, account); err != nil {
for _, account := range r.accounts.All() {
if err := r.syncAccount(account); err != nil {
r.logger.Error().Err(err).Msgf("Error syncing account %s", account.Username)
continue // Skip this account and continue with the next
}
@@ -988,7 +988,7 @@ func (r *RealDebrid) SyncAccounts() error {
return nil
}
func (r *RealDebrid) syncAccount(index int, account *types.Account) error {
func (r *RealDebrid) syncAccount(account *types.Account) error {
if account.Token == "" {
return fmt.Errorf("account %s has no token", account.Username)
}
@@ -1038,6 +1038,6 @@ func (r *RealDebrid) syncAccount(index int, account *types.Account) error {
account.TrafficUsed = todayData.Bytes
}
r.accounts.Update(index, account)
r.accounts.Update(account)
return nil
}

View File

@@ -40,19 +40,27 @@ type directoryFilter struct {
ageThreshold time.Duration // only for last_added
}
type torrents struct {
sync.RWMutex
storage map[string]CachedTorrent // id to CachedTorrent
byName map[string]string // name to id
}
type folders struct {
sync.RWMutex
listing map[string][]os.FileInfo // folder name to file listing
}
type CachedTorrentEntry struct {
CachedTorrent
deleted bool // Tombstone flag
}
type torrentCache struct {
torrents torrents
mu sync.RWMutex
torrents []CachedTorrentEntry // Changed to store entries with tombstone
// Lookup indices
idIndex map[string]int
nameIndex map[string]int
// Compaction tracking
deletedCount atomic.Int32
compactThreshold int // Trigger compaction when deletedCount exceeds this
listing atomic.Value
folders folders
@@ -69,12 +77,11 @@ type sortableFile struct {
}
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
tc := &torrentCache{
torrents: torrents{
storage: make(map[string]CachedTorrent), // id to CachedTorrent
byName: make(map[string]string),
},
torrents: []CachedTorrentEntry{},
idIndex: make(map[string]int),
nameIndex: make(map[string]int),
compactThreshold: 100, // Compact when 100+ deleted entries
folders: folders{
listing: make(map[string][]os.FileInfo),
},
@@ -87,10 +94,12 @@ func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
}
func (tc *torrentCache) reset() {
tc.torrents.Lock()
tc.torrents.byName = make(map[string]string)
tc.torrents.storage = make(map[string]CachedTorrent) // Reset the storage map
tc.torrents.Unlock()
tc.mu.Lock()
tc.torrents = tc.torrents[:0] // Clear the slice
tc.idIndex = make(map[string]int) // Reset the ID index
tc.nameIndex = make(map[string]int) // Reset the name index
tc.deletedCount.Store(0)
tc.mu.Unlock()
// reset the sorted listing
tc.sortNeeded.Store(false)
@@ -103,70 +112,183 @@ func (tc *torrentCache) reset() {
}
func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
torrent, exists := tc.torrents.storage[id]
return torrent, exists
tc.mu.RLock()
defer tc.mu.RUnlock()
if index, exists := tc.idIndex[id]; exists && index < len(tc.torrents) {
entry := tc.torrents[index]
if !entry.deleted {
return entry.CachedTorrent, true
}
}
return CachedTorrent{}, false
}
func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
torrentID, exists := tc.torrents.byName[name]
if !exists {
return CachedTorrent{}, false
tc.mu.RLock()
defer tc.mu.RUnlock()
if index, exists := tc.nameIndex[name]; exists && index < len(tc.torrents) {
entry := tc.torrents[index]
if !entry.deleted {
return entry.CachedTorrent, true
}
}
torrent, exists := tc.torrents.storage[torrentID]
return torrent, exists
return CachedTorrent{}, false
}
func (tc *torrentCache) set(name string, torrent CachedTorrent) {
tc.torrents.Lock()
// Set the id first
tc.mu.Lock()
defer tc.mu.Unlock()
tc.torrents.byName[name] = torrent.Id
tc.torrents.storage[torrent.Id] = torrent
tc.torrents.Unlock()
// Check if this torrent already exists (update case)
if existingIndex, exists := tc.idIndex[torrent.Id]; exists && existingIndex < len(tc.torrents) {
if !tc.torrents[existingIndex].deleted {
// Update existing entry
tc.torrents[existingIndex].CachedTorrent = torrent
tc.sortNeeded.Store(true)
return
}
}
// Add new torrent
entry := CachedTorrentEntry{
CachedTorrent: torrent,
deleted: false,
}
tc.torrents = append(tc.torrents, entry)
index := len(tc.torrents) - 1
tc.idIndex[torrent.Id] = index
tc.nameIndex[name] = index
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) getListing() []os.FileInfo {
// Fast path: if we have a sorted list and no changes since last sort
if !tc.sortNeeded.Load() {
return tc.listing.Load().([]os.FileInfo)
}
func (tc *torrentCache) removeId(id string) {
tc.mu.Lock()
defer tc.mu.Unlock()
// Slow path: need to sort
tc.refreshListing()
return tc.listing.Load().([]os.FileInfo)
if index, exists := tc.idIndex[id]; exists && index < len(tc.torrents) {
if !tc.torrents[index].deleted {
// Mark as deleted (tombstone)
tc.torrents[index].deleted = true
tc.deletedCount.Add(1)
// Remove from indices
delete(tc.idIndex, id)
// Find and remove from name index
for name, idx := range tc.nameIndex {
if idx == index {
delete(tc.nameIndex, name)
break
}
}
tc.sortNeeded.Store(true)
// Trigger compaction if threshold exceeded
if tc.deletedCount.Load() > int32(tc.compactThreshold) {
go tc.compact()
}
}
}
}
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
tc.folders.RLock()
defer tc.folders.RUnlock()
if folderName == "" {
return tc.getListing()
func (tc *torrentCache) remove(name string) {
tc.mu.Lock()
defer tc.mu.Unlock()
if index, exists := tc.nameIndex[name]; exists && index < len(tc.torrents) {
if !tc.torrents[index].deleted {
// Mark as deleted (tombstone)
torrentID := tc.torrents[index].CachedTorrent.Id
tc.torrents[index].deleted = true
tc.deletedCount.Add(1)
// Remove from indices
delete(tc.nameIndex, name)
delete(tc.idIndex, torrentID)
tc.sortNeeded.Store(true)
// Trigger compaction if threshold exceeded
if tc.deletedCount.Load() > int32(tc.compactThreshold) {
go tc.compact()
}
}
}
if folder, ok := tc.folders.listing[folderName]; ok {
return folder
}
// Compact removes tombstoned entries and rebuilds indices
func (tc *torrentCache) compact() {
tc.mu.Lock()
defer tc.mu.Unlock()
deletedCount := tc.deletedCount.Load()
if deletedCount == 0 {
return // Nothing to compact
}
// If folder not found, return empty slice
return []os.FileInfo{}
// Create new slice with only non-deleted entries
newTorrents := make([]CachedTorrentEntry, 0, len(tc.torrents)-int(deletedCount))
newIdIndex := make(map[string]int, len(tc.idIndex))
newNameIndex := make(map[string]int, len(tc.nameIndex))
// Copy non-deleted entries
for oldIndex, entry := range tc.torrents {
if !entry.deleted {
newIndex := len(newTorrents)
newTorrents = append(newTorrents, entry)
// Find the name for this torrent (reverse lookup)
for name, nameIndex := range tc.nameIndex {
if nameIndex == oldIndex {
newNameIndex[name] = newIndex
break
}
}
newIdIndex[entry.CachedTorrent.Id] = newIndex
}
}
// Replace old data with compacted data
tc.torrents = newTorrents
tc.idIndex = newIdIndex
tc.nameIndex = newNameIndex
tc.deletedCount.Store(0)
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) ForceCompact() {
tc.compact()
}
func (tc *torrentCache) GetStats() (total, active, deleted int) {
tc.mu.RLock()
defer tc.mu.RUnlock()
total = len(tc.torrents)
deleted = int(tc.deletedCount.Load())
active = total - deleted
return total, active, deleted
}
func (tc *torrentCache) refreshListing() {
tc.torrents.RLock()
all := make([]sortableFile, 0, len(tc.torrents.byName))
for name, torrentID := range tc.torrents.byName {
t, exists := tc.torrents.storage[torrentID]
if !exists {
continue // Skip if torrent not found
tc.mu.RLock()
all := make([]sortableFile, 0, len(tc.nameIndex))
for name, index := range tc.nameIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
t := tc.torrents[index].CachedTorrent
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
}
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
}
tc.sortNeeded.Store(false)
tc.torrents.RUnlock()
tc.mu.RUnlock()
sort.Slice(all, func(i, j int) bool {
if all[i].name != all[j].name {
@@ -242,8 +364,31 @@ func (tc *torrentCache) refreshListing() {
wg.Wait()
}
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
func (tc *torrentCache) getListing() []os.FileInfo {
// Fast path: if we have a sorted list and no changes since last sort
if !tc.sortNeeded.Load() {
return tc.listing.Load().([]os.FileInfo)
}
// Slow path: need to sort
tc.refreshListing()
return tc.listing.Load().([]os.FileInfo)
}
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
tc.folders.RLock()
defer tc.folders.RUnlock()
if folderName == "" {
return tc.getListing()
}
if folder, ok := tc.folders.listing[folderName]; ok {
return folder
}
// If folder not found, return empty slice
return []os.FileInfo{}
}
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
torrentName := strings.ToLower(file.name)
for _, filter := range filters {
matched := false
@@ -286,55 +431,46 @@ func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file so
}
func (tc *torrentCache) getAll() map[string]CachedTorrent {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
result := make(map[string]CachedTorrent, len(tc.torrents.storage))
for torrentID, torrent := range tc.torrents.storage {
result[torrentID] = torrent
tc.mu.RLock()
defer tc.mu.RUnlock()
result := make(map[string]CachedTorrent)
for _, entry := range tc.torrents {
if !entry.deleted {
result[entry.CachedTorrent.Id] = entry.CachedTorrent
}
}
return result
}
func (tc *torrentCache) getAllCount() int {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
return len(tc.torrents.storage)
tc.mu.RLock()
defer tc.mu.RUnlock()
return len(tc.torrents) - int(tc.deletedCount.Load())
}
func (tc *torrentCache) getAllByName() map[string]CachedTorrent {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
results := make(map[string]CachedTorrent, len(tc.torrents.byName))
for name, torrentID := range tc.torrents.byName {
torrent, exists := tc.torrents.storage[torrentID]
if !exists {
continue // Skip if torrent not found
tc.mu.RLock()
defer tc.mu.RUnlock()
results := make(map[string]CachedTorrent, len(tc.nameIndex))
for name, index := range tc.nameIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
results[name] = tc.torrents[index].CachedTorrent
}
results[name] = torrent
}
return results
}
func (tc *torrentCache) getIdMaps() map[string]struct{} {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
res := make(map[string]struct{}, len(tc.torrents.storage))
for id := range tc.torrents.storage {
res[id] = struct{}{}
tc.mu.RLock()
defer tc.mu.RUnlock()
res := make(map[string]struct{}, len(tc.idIndex))
for id, index := range tc.idIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
res[id] = struct{}{}
}
}
return res
}
func (tc *torrentCache) removeId(id string) {
tc.torrents.Lock()
defer tc.torrents.Unlock()
delete(tc.torrents.storage, id)
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) remove(name string) {
tc.torrents.Lock()
defer tc.torrents.Unlock()
delete(tc.torrents.byName, name)
tc.sortNeeded.Store(true)
}

View File

@@ -2,34 +2,33 @@ package types
import (
"github.com/sirrobot01/decypharr/internal/config"
"slices"
"sync"
"time"
)
type Accounts struct {
current *Account
accounts []*Account
mu sync.RWMutex
accounts sync.Map // map[string]*Account // key is token
}
func NewAccounts(debridConf config.Debrid) *Accounts {
accounts := make([]*Account, 0)
a := &Accounts{
accounts: sync.Map{},
}
var current *Account
for idx, token := range debridConf.DownloadAPIKeys {
if token == "" {
continue
}
account := newAccount(debridConf.Name, token, idx)
accounts = append(accounts, account)
}
var current *Account
if len(accounts) > 0 {
current = accounts[0]
}
return &Accounts{
accounts: accounts,
current: current,
a.accounts.Store(token, account)
if current == nil {
current = account
}
}
a.current = current
return a
}
type Account struct {
@@ -44,82 +43,85 @@ type Account struct {
}
func (a *Accounts) Active() []*Account {
a.mu.RLock()
defer a.mu.RUnlock()
activeAccounts := make([]*Account, 0)
for _, acc := range a.accounts {
if !acc.Disabled {
a.accounts.Range(func(key, value interface{}) bool {
acc, ok := value.(*Account)
if ok && !acc.Disabled {
activeAccounts = append(activeAccounts, acc)
}
}
return true
})
// Sort active accounts by their Order field
slices.SortFunc(activeAccounts, func(i, j *Account) int {
return i.Order - j.Order
})
return activeAccounts
}
func (a *Accounts) All() []*Account {
a.mu.RLock()
defer a.mu.RUnlock()
return a.accounts
allAccounts := make([]*Account, 0)
a.accounts.Range(func(key, value interface{}) bool {
acc, ok := value.(*Account)
if ok {
allAccounts = append(allAccounts, acc)
}
return true
})
// Sort all accounts by their Order field
slices.SortFunc(allAccounts, func(i, j *Account) int {
return i.Order - j.Order
})
return allAccounts
}
func (a *Accounts) Current() *Account {
a.mu.RLock()
if a.current != nil {
if a.current != nil && !a.current.Disabled {
current := a.current
a.mu.RUnlock()
return current
}
a.mu.RUnlock()
a.mu.Lock()
defer a.mu.Unlock()
// Double-check after acquiring write lock
if a.current != nil {
activeAccounts := a.Active()
if len(activeAccounts) == 0 {
return a.current
}
a.current = activeAccounts[0]
activeAccounts := make([]*Account, 0)
for _, acc := range a.accounts {
if !acc.Disabled {
activeAccounts = append(activeAccounts, acc)
}
}
if len(activeAccounts) > 0 {
a.current = activeAccounts[0]
}
return a.current
}
func (a *Accounts) Disable(account *Account) {
a.mu.Lock()
defer a.mu.Unlock()
account.disable()
account.Disabled = true
a.accounts.Store(account.Token, account)
if a.current == account {
if a.current.Equals(account) {
var newCurrent *Account
for _, acc := range a.accounts {
if !acc.Disabled {
a.accounts.Range(func(key, value interface{}) bool {
acc, ok := value.(*Account)
if ok && !acc.Disabled {
newCurrent = acc
break
return false // Break the loop
}
}
return true // Continue the loop
})
a.current = newCurrent
}
}
func (a *Accounts) Reset() {
a.mu.Lock()
defer a.mu.Unlock()
for _, acc := range a.accounts {
acc.resetDownloadLinks()
acc.Disabled = false
}
if len(a.accounts) > 0 {
a.current = a.accounts[0]
} else {
a.current = nil
}
a.accounts.Range(func(key, value interface{}) bool {
acc, ok := value.(*Account)
if ok {
acc.resetDownloadLinks()
acc.Disabled = false
a.accounts.Store(key, acc)
if a.current == nil {
a.current = acc
}
}
return true
})
}
func (a *Accounts) GetDownloadLink(fileLink string) (*DownloadLink, error) {
@@ -185,21 +187,11 @@ func (a *Accounts) SetDownloadLinks(links map[string]*DownloadLink) {
a.Current().setLinks(links)
}
func (a *Accounts) Update(index int, account *Account) {
a.mu.Lock()
defer a.mu.Unlock()
if index < 0 || index >= len(a.accounts) {
return // Index out of bounds
}
// Update the account at the specified index
a.accounts[index] = account
// If the updated account is the current one, update the current reference
if a.current == nil || a.current.Order == index {
a.current = account
func (a *Accounts) Update(account *Account) {
if account == nil {
return
}
a.accounts.Store(account.Token, account)
}
func newAccount(debridName, token string, index int) *Account {
@@ -211,6 +203,13 @@ func newAccount(debridName, token string, index int) *Account {
}
}
func (a *Account) Equals(other *Account) bool {
if other == nil {
return false
}
return a.Token == other.Token && a.Debrid == other.Debrid
}
func (a *Account) getLink(fileLink string) (*DownloadLink, bool) {
a.mu.RLock()
defer a.mu.RUnlock()
@@ -238,9 +237,6 @@ func (a *Account) LinksCount() int {
defer a.mu.RUnlock()
return len(a.links)
}
func (a *Account) disable() {
a.Disabled = true
}
func (a *Account) setLinks(links map[string]*DownloadLink) {
a.mu.Lock()

View File

@@ -42,6 +42,38 @@ type Torrent struct {
sync.Mutex
}
func (t *Torrent) Copy() *Torrent {
t.Lock()
defer t.Unlock()
newFiles := make(map[string]File, len(t.Files))
for k, v := range t.Files {
newFiles[k] = v
}
return &Torrent{
Id: t.Id,
InfoHash: t.InfoHash,
Name: t.Name,
Folder: t.Folder,
Filename: t.Filename,
OriginalFilename: t.OriginalFilename,
Size: t.Size,
Bytes: t.Bytes,
Magnet: t.Magnet,
Files: newFiles,
Status: t.Status,
Added: t.Added,
Progress: t.Progress,
Speed: t.Speed,
Seeders: t.Seeders,
Links: append([]string{}, t.Links...),
MountPath: t.MountPath,
Debrid: t.Debrid,
Arr: t.Arr,
}
}
func (t *Torrent) GetSymlinkFolder(parent string) string {
return filepath.Join(parent, t.Arr.Name, t.Folder)
}

View File

@@ -93,64 +93,62 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
}
debrids := wire.Get().Debrid()
if debrids == nil {
request.JSONResponse(w, stats, http.StatusOK)
return
if debrids != nil {
clients := debrids.Clients()
caches := debrids.Caches()
debridStats := make([]debridTypes.Stats, 0)
for debridName, client := range clients {
debridStat := debridTypes.Stats{}
libraryStat := debridTypes.LibraryStats{}
profile, err := client.GetProfile()
if err != nil {
s.logger.Error().Err(err).Str("debrid", debridName).Msg("Failed to get debrid profile")
profile = &debridTypes.Profile{
Name: debridName,
}
}
profile.Name = debridName
debridStat.Profile = profile
cache, ok := caches[debridName]
if ok {
// Get torrent data
libraryStat.Total = cache.TotalTorrents()
libraryStat.Bad = len(cache.GetListing("__bad__"))
libraryStat.ActiveLinks = cache.GetTotalActiveDownloadLinks()
}
debridStat.Library = libraryStat
// Get detailed account information
accounts := client.Accounts().All()
accountDetails := make([]map[string]any, 0)
for _, account := range accounts {
// Mask token - show first 8 characters and last 4 characters
maskedToken := ""
if len(account.Token) > 12 {
maskedToken = account.Token[:8] + "****" + account.Token[len(account.Token)-4:]
} else if len(account.Token) > 8 {
maskedToken = account.Token[:4] + "****" + account.Token[len(account.Token)-2:]
} else {
maskedToken = "****"
}
accountDetail := map[string]any{
"order": account.Order,
"disabled": account.Disabled,
"token_masked": maskedToken,
"username": account.Username,
"traffic_used": account.TrafficUsed,
"links_count": account.LinksCount(),
"debrid": account.Debrid,
}
accountDetails = append(accountDetails, accountDetail)
}
debridStat.Accounts = accountDetails
debridStats = append(debridStats, debridStat)
}
stats["debrids"] = debridStats
}
clients := debrids.Clients()
caches := debrids.Caches()
debridStats := make([]debridTypes.Stats, 0)
for debridName, client := range clients {
debridStat := debridTypes.Stats{}
libraryStat := debridTypes.LibraryStats{}
profile, err := client.GetProfile()
if err != nil {
s.logger.Error().Err(err).Str("debrid", debridName).Msg("Failed to get debrid profile")
profile = &debridTypes.Profile{
Name: debridName,
}
}
profile.Name = debridName
debridStat.Profile = profile
cache, ok := caches[debridName]
if ok {
// Get torrent data
libraryStat.Total = cache.TotalTorrents()
libraryStat.Bad = len(cache.GetListing("__bad__"))
libraryStat.ActiveLinks = cache.GetTotalActiveDownloadLinks()
}
debridStat.Library = libraryStat
// Get detailed account information
accounts := client.Accounts().All()
accountDetails := make([]map[string]any, 0)
for _, account := range accounts {
// Mask token - show first 8 characters and last 4 characters
maskedToken := ""
if len(account.Token) > 12 {
maskedToken = account.Token[:8] + "****" + account.Token[len(account.Token)-4:]
} else if len(account.Token) > 8 {
maskedToken = account.Token[:4] + "****" + account.Token[len(account.Token)-2:]
} else {
maskedToken = "****"
}
accountDetail := map[string]any{
"order": account.Order,
"disabled": account.Disabled,
"token_masked": maskedToken,
"username": account.Username,
"traffic_used": account.TrafficUsed,
"links_count": account.LinksCount(),
"debrid": account.Debrid,
}
accountDetails = append(accountDetails, accountDetail)
}
debridStat.Accounts = accountDetails
debridStats = append(debridStats, debridStat)
}
stats["debrids"] = debridStats
// Add rclone stats if available
if rcManager := wire.Get().RcloneManager(); rcManager != nil && rcManager.IsReady() {

View File

@@ -19,12 +19,11 @@ var streamingTransport = &http.Transport{
MaxConnsPerHost: 200,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second, // give the upstream a minute to send headers
ResponseHeaderTimeout: 60 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableKeepAlives: true, // close after each request
ForceAttemptHTTP2: false, // dont speak HTTP/2
// this line is what truly blocks HTTP/2:
TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
DisableKeepAlives: true,
ForceAttemptHTTP2: false,
TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
}
var sharedClient = &http.Client{
@@ -228,7 +227,7 @@ func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int
if n, err := src.Read(smallBuf); n > 0 {
// Write status code just before first successful write
w.WriteHeader(statusCode)
if _, werr := w.Write(smallBuf[:n]); werr != nil {
if isClientDisconnection(werr) {
return &streamError{Err: werr, StatusCode: 0, IsClientDisconnection: true}

View File

@@ -1,10 +1,15 @@
package wire
import (
"crypto/md5"
"fmt"
"github.com/google/uuid"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
@@ -14,6 +19,244 @@ import (
"github.com/sirrobot01/decypharr/internal/utils"
)
// Multi-season detection patterns
var (
// Pre-compiled patterns for multi-season replacement
multiSeasonReplacements = []multiSeasonPattern{
// S01-08 -> S01 (or whatever target season)
{regexp.MustCompile(`(?i)S(\d{1,2})-\d{1,2}`), "S%02d"},
// S01-S08 -> S01
{regexp.MustCompile(`(?i)S(\d{1,2})-S\d{1,2}`), "S%02d"},
// Season 1-8 -> Season 1
{regexp.MustCompile(`(?i)Season\.?\s*\d{1,2}-\d{1,2}`), "Season %02d"},
// Seasons 1-8 -> Season 1
{regexp.MustCompile(`(?i)Seasons\.?\s*\d{1,2}-\d{1,2}`), "Season %02d"},
// Complete Series -> Season X
{regexp.MustCompile(`(?i)Complete\.?Series`), "Season %02d"},
// All Seasons -> Season X
{regexp.MustCompile(`(?i)All\.?Seasons?`), "Season %02d"},
}
// Also pre-compile other patterns
seasonPattern = regexp.MustCompile(`(?i)(?:season\.?\s*|s)(\d{1,2})`)
qualityIndicators = regexp.MustCompile(`(?i)\b(2160p|1080p|720p|BluRay|WEB-DL|HDTV|x264|x265|HEVC)`)
multiSeasonIndicators = []*regexp.Regexp{
regexp.MustCompile(`(?i)complete\.?series`),
regexp.MustCompile(`(?i)all\.?seasons?`),
regexp.MustCompile(`(?i)season\.?\s*\d+\s*-\s*\d+`),
regexp.MustCompile(`(?i)s\d+\s*-\s*s?\d+`),
regexp.MustCompile(`(?i)seasons?\s*\d+\s*-\s*\d+`),
}
)
type multiSeasonPattern struct {
pattern *regexp.Regexp
replacement string
}
type SeasonInfo struct {
SeasonNumber int
Files []types.File
InfoHash string
Name string
}
func (s *Store) replaceMultiSeasonPattern(name string, targetSeason int) string {
result := name
// Apply each pre-compiled pattern replacement
for _, msp := range multiSeasonReplacements {
if msp.pattern.MatchString(result) {
replacement := fmt.Sprintf(msp.replacement, targetSeason)
result = msp.pattern.ReplaceAllString(result, replacement)
s.logger.Debug().Msgf("Applied pattern replacement: %s -> %s", name, result)
return result
}
}
// If no multi-season pattern found, try to insert season info intelligently
return s.insertSeasonIntoName(result, targetSeason)
}
func (s *Store) insertSeasonIntoName(name string, seasonNum int) string {
// Check if season info already exists
if seasonPattern.MatchString(name) {
return name // Already has season info, keep as is
}
// Try to find a good insertion point (before quality indicators)
if loc := qualityIndicators.FindStringIndex(name); loc != nil {
// Insert season before quality info
before := strings.TrimSpace(name[:loc[0]])
after := name[loc[0]:]
return fmt.Sprintf("%s S%02d %s", before, seasonNum, after)
}
// If no quality indicators found, append at the end
return fmt.Sprintf("%s S%02d", name, seasonNum)
}
func (s *Store) detectMultiSeason(debridTorrent *types.Torrent) (bool, []SeasonInfo, error) {
torrentName := debridTorrent.Name
files := debridTorrent.GetFiles()
s.logger.Debug().Msgf("Analyzing torrent for multi-season: %s", torrentName)
// Find all seasons present in the files
seasonsFound := s.findAllSeasons(files)
// Check if this is actually a multi-season torrent
isMultiSeason := len(seasonsFound) > 1 || s.hasMultiSeasonIndicators(torrentName)
if !isMultiSeason {
return false, nil, nil
}
s.logger.Info().Msgf("Multi-season torrent detected with seasons: %v", getSortedSeasons(seasonsFound))
// Group files by season
seasonGroups := s.groupFilesBySeason(files, seasonsFound)
// Create SeasonInfo objects with proper naming
var seasons []SeasonInfo
for seasonNum, seasonFiles := range seasonGroups {
if len(seasonFiles) == 0 {
continue
}
// Generate season-specific name preserving all metadata
seasonName := s.generateSeasonSpecificName(torrentName, seasonNum)
seasons = append(seasons, SeasonInfo{
SeasonNumber: seasonNum,
Files: seasonFiles,
InfoHash: s.generateSeasonHash(debridTorrent.InfoHash, seasonNum),
Name: seasonName,
})
}
return true, seasons, nil
}
// generateSeasonSpecificName creates season name preserving all original metadata
func (s *Store) generateSeasonSpecificName(originalName string, seasonNum int) string {
// Find and replace the multi-season pattern with single season
seasonName := s.replaceMultiSeasonPattern(originalName, seasonNum)
s.logger.Debug().Msgf("Generated season name for S%02d: %s", seasonNum, seasonName)
return seasonName
}
func (s *Store) findAllSeasons(files []types.File) map[int]bool {
seasons := make(map[int]bool)
for _, file := range files {
// Check filename first
if season := s.extractSeason(file.Name); season > 0 {
seasons[season] = true
continue
}
// Check full path
if season := s.extractSeason(file.Path); season > 0 {
seasons[season] = true
}
}
return seasons
}
// extractSeason pulls season number from a string
func (s *Store) extractSeason(text string) int {
matches := seasonPattern.FindStringSubmatch(text)
if len(matches) > 1 {
if num, err := strconv.Atoi(matches[1]); err == nil && num > 0 && num < 100 {
return num
}
}
return 0
}
func (s *Store) hasMultiSeasonIndicators(torrentName string) bool {
for _, pattern := range multiSeasonIndicators {
if pattern.MatchString(torrentName) {
return true
}
}
return false
}
// groupFilesBySeason puts files into season buckets
func (s *Store) groupFilesBySeason(files []types.File, knownSeasons map[int]bool) map[int][]types.File {
groups := make(map[int][]types.File)
// Initialize groups
for season := range knownSeasons {
groups[season] = []types.File{}
}
for _, file := range files {
// Try to find season from filename or path
season := s.extractSeason(file.Name)
if season == 0 {
season = s.extractSeason(file.Path)
}
// If we found a season and it's known, add the file
if season > 0 && knownSeasons[season] {
groups[season] = append(groups[season], file)
} else {
// If no season found, try path-based inference
inferredSeason := s.inferSeasonFromPath(file.Path, knownSeasons)
if inferredSeason > 0 {
groups[inferredSeason] = append(groups[inferredSeason], file)
} else if len(knownSeasons) == 1 {
// If only one season exists, default to it
for season := range knownSeasons {
groups[season] = append(groups[season], file)
}
}
}
}
return groups
}
func (s *Store) inferSeasonFromPath(path string, knownSeasons map[int]bool) int {
pathParts := strings.Split(path, "/")
for _, part := range pathParts {
if season := s.extractSeason(part); season > 0 && knownSeasons[season] {
return season
}
}
return 0
}
// Helper to get sorted season list for logging
func getSortedSeasons(seasons map[int]bool) []int {
var result []int
for season := range seasons {
result = append(result, season)
}
return result
}
// generateSeasonHash creates a unique hash for a season based on original hash
func (s *Store) generateSeasonHash(originalHash string, seasonNumber int) string {
source := fmt.Sprintf("%s-season-%d", originalHash, seasonNumber)
hash := md5.Sum([]byte(source))
return fmt.Sprintf("%x", hash)
}
func grabber(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error {
req, err := grab.NewRequest(filename, url)
if err != nil {
@@ -150,105 +393,21 @@ func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, pa
s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
}
func (s *Store) processSymlink(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
files := debridTorrent.Files
func (s *Store) processSymlink(debridTorrent *types.Torrent, torrentRclonePath, torrentSymlinkPath string) (string, error) {
files := debridTorrent.GetFiles()
if len(files) == 0 {
return "", fmt.Errorf("no valid files found")
}
s.logger.Info().Msgf("Checking symlinks for %d files...", len(files))
rCloneBase := debridTorrent.MountPath
torrentPath, err := s.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
// This returns filename.ext for alldebrid instead of the parent folder filename/
torrentFolder := torrentPath
if err != nil {
return "", fmt.Errorf("failed to get torrent path: %v", err)
}
// Check if the torrent path is a file
torrentRclonePath := filepath.Join(rCloneBase, torrentPath) // leave it as is
if debridTorrent.Debrid == "alldebrid" && utils.IsMediaFile(torrentPath) {
// Alldebrid hotfix for single file torrents
torrentFolder = utils.RemoveExtension(torrentFolder)
torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder
}
torrentSymlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
s.logger.Info().Msgf("Creating symlinks for %d files ...", len(files))
// Create symlink directory
err := os.MkdirAll(torrentSymlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
}
realPaths := make(map[string]string)
err = filepath.WalkDir(torrentRclonePath, func(path string, d os.DirEntry, err error) error {
if err != nil {
return nil
}
if !d.IsDir() {
filename := d.Name()
rel, _ := filepath.Rel(torrentRclonePath, path)
realPaths[filename] = rel
}
return nil
})
if err != nil {
s.logger.Warn().Msgf("Error while scanning rclone path: %v", err)
}
pending := make(map[string]types.File)
for _, file := range files {
if realRelPath, ok := realPaths[file.Name]; ok {
file.Path = realRelPath
}
pending[file.Path] = file
}
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(pending))
for len(pending) > 0 {
select {
case <-ticker.C:
for path, file := range pending {
fullFilePath := filepath.Join(torrentRclonePath, file.Path)
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
s.logger.Warn().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(pending, path)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending))
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(pending))
}
}
if s.skipPreCache {
return torrentSymlinkPath, nil
}
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Trace().Msgf("Pre-cached %d files", len(filePaths))
}
}()
return torrentSymlinkPath, nil
}
func (s *Store) createSymlinksWebdav(torrent *Torrent, debridTorrent *types.Torrent, rclonePath, torrentFolder string) (string, error) {
files := debridTorrent.Files
symlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err := os.MkdirAll(symlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
}
// Track pending files
remainingFiles := make(map[string]types.File)
for _, file := range files {
remainingFiles[file.Name] = file
@@ -257,61 +416,209 @@ func (s *Store) createSymlinksWebdav(torrent *Torrent, debridTorrent *types.Torr
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(files))
filePaths := make([]string, 0, len(remainingFiles))
var checkDirectory func(string) // Recursive function
checkDirectory = func(dirPath string) {
entries, err := os.ReadDir(dirPath)
if err != nil {
return
}
for _, entry := range entries {
entryName := entry.Name()
fullPath := filepath.Join(dirPath, entryName)
// Check if this matches a remaining file
if file, exists := remainingFiles[entryName]; exists {
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
if err := os.Symlink(fullPath, fileSymlinkPath); err == nil || os.IsExist(err) {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, entryName)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
} else if entry.IsDir() {
// If not found and it's a directory, check inside
checkDirectory(fullPath)
}
}
}
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
entries, err := os.ReadDir(rclonePath)
if err != nil {
continue
}
// Check which files exist in this batch
for _, entry := range entries {
filename := entry.Name()
if file, exists := remainingFiles[filename]; exists {
fullFilePath := filepath.Join(rclonePath, filename)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
checkDirectory(torrentRclonePath)
case <-timeout:
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(remainingFiles))
}
}
if s.skipPreCache {
return symlinkPath, nil
// Pre-cache files if enabled
if !s.skipPreCache && len(filePaths) > 0 {
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}()
}
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
// Pre-cache the first 256KB and 1MB of the file
return symlinkPath, nil
return torrentSymlinkPath, nil
}
func (s *Store) getTorrentPath(rclonePath string, debridTorrent *types.Torrent) (string, error) {
// getTorrentPaths returns mountPath and symlinkPath for a torrent
func (s *Store) getTorrentPaths(arrFolder string, debridTorrent *types.Torrent) (string, string, error) {
for {
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)
torrentFolder, err := debridTorrent.GetMountFolder(debridTorrent.MountPath)
if err == nil {
return torrentPath, err
// Found mountPath
mountPath := filepath.Join(debridTorrent.MountPath, torrentFolder)
if debridTorrent.Debrid == "alldebrid" && utils.IsMediaFile(torrentFolder) {
torrentFolder = utils.RemoveExtension(torrentFolder)
mountPath = debridTorrent.MountPath
}
// Return mountPath and symlink path
return mountPath, filepath.Join(arrFolder, torrentFolder), nil
}
time.Sleep(100 * time.Millisecond)
}
}
func (s *Store) processMultiSeasonSymlinks(torrent *Torrent, debridTorrent *types.Torrent, seasons []SeasonInfo, importReq *ImportRequest) error {
for _, seasonInfo := range seasons {
// Create a season-specific debrid torrent
seasonDebridTorrent := debridTorrent.Copy()
// Update the season torrent with season-specific data
seasonDebridTorrent.InfoHash = seasonInfo.InfoHash
seasonDebridTorrent.Name = seasonInfo.Name
seasonTorrent := torrent.Copy()
seasonTorrent.ID = seasonInfo.InfoHash
seasonTorrent.Name = seasonInfo.Name
seasonTorrent.Hash = seasonInfo.InfoHash
torrentFiles := make([]*File, 0)
size := int64(0)
// Filter files to only include this season's files
seasonFiles := make(map[string]types.File)
for index, file := range seasonInfo.Files {
seasonFiles[file.Name] = file
torrentFiles = append(torrentFiles, &File{
Index: index,
Name: file.Path,
Size: file.Size,
})
size += file.Size
}
seasonDebridTorrent.Files = seasonFiles
seasonTorrent.Files = torrentFiles
seasonTorrent.Size = size
// Create a season-specific torrent record
// Create season folder path using the extracted season name
seasonFolderName := seasonInfo.Name
s.logger.Info().Msgf("Processing season %s with %d files", seasonTorrent.Name, len(seasonInfo.Files))
var err error
cache := s.debrid.Debrid(debridTorrent.Debrid).Cache()
var torrentRclonePath, torrentSymlinkPath string
if cache != nil {
torrentRclonePath = filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent))
} else {
// Regular mount mode
torrentRclonePath, _, err = s.getTorrentPaths(seasonTorrent.SavePath, seasonDebridTorrent)
if err != nil {
return err
}
}
torrentSymlinkPath = filepath.Join(seasonTorrent.SavePath, seasonFolderName)
torrentSymlinkPath, err = s.processSymlink(seasonDebridTorrent, torrentRclonePath, torrentSymlinkPath)
if err != nil {
return err
}
if torrentSymlinkPath == "" {
return fmt.Errorf("no symlink found for season %d", seasonInfo.SeasonNumber)
}
// Update season torrent with final path
seasonTorrent.TorrentPath = torrentSymlinkPath
seasonTorrent.ContentPath = torrentSymlinkPath
seasonTorrent.State = "pausedUP"
// Add the season torrent to storage
s.torrents.AddOrUpdate(seasonTorrent)
s.logger.Info().Str("path", torrentSymlinkPath).Msgf("Successfully created season %d torrent: %s", seasonInfo.SeasonNumber, seasonTorrent.ID)
}
s.torrents.Delete(torrent.Hash, "", false)
s.logger.Info().Msgf("Multi-season processing completed for %s", debridTorrent.Name)
return nil
}
// processMultiSeasonDownloads handles multi-season torrent downloading
func (s *Store) processMultiSeasonDownloads(torrent *Torrent, debridTorrent *types.Torrent, seasons []SeasonInfo, importReq *ImportRequest) error {
s.logger.Info().Msgf("Creating separate download records for %d seasons", len(seasons))
for _, seasonInfo := range seasons {
// Create a season-specific debrid torrent
seasonDebridTorrent := debridTorrent.Copy()
// Update the season torrent with season-specific data
seasonDebridTorrent.InfoHash = seasonInfo.InfoHash
seasonDebridTorrent.Name = seasonInfo.Name
// Filter files to only include this season's files
seasonFiles := make(map[string]types.File)
for _, file := range seasonInfo.Files {
seasonFiles[file.Name] = file
}
seasonDebridTorrent.Files = seasonFiles
// Create a season-specific torrent record
seasonTorrent := torrent.Copy()
seasonTorrent.ID = uuid.New().String()
seasonTorrent.Name = seasonInfo.Name
seasonTorrent.Hash = seasonInfo.InfoHash
seasonTorrent.SavePath = torrent.SavePath
s.logger.Info().Msgf("Downloading season %d with %d files", seasonInfo.SeasonNumber, len(seasonInfo.Files))
// Generate download links for season files
client := s.debrid.Debrid(debridTorrent.Debrid).Client()
if err := client.GetFileDownloadLinks(seasonDebridTorrent); err != nil {
s.logger.Error().Msgf("Failed to get download links for season %d: %v", seasonInfo.SeasonNumber, err)
return fmt.Errorf("failed to get download links for season %d: %v", seasonInfo.SeasonNumber, err)
}
// Download files for this season
seasonDownloadPath, err := s.processDownload(seasonTorrent, seasonDebridTorrent)
if err != nil {
s.logger.Error().Msgf("Failed to download season %d: %v", seasonInfo.SeasonNumber, err)
return fmt.Errorf("failed to download season %d: %v", seasonInfo.SeasonNumber, err)
}
// Update season torrent with final path
seasonTorrent.TorrentPath = seasonDownloadPath
torrent.ContentPath = seasonDownloadPath
seasonTorrent.State = "pausedUP"
// Add the season torrent to storage
s.torrents.AddOrUpdate(seasonTorrent)
s.logger.Info().Msgf("Successfully downloaded season %d torrent: %s", seasonInfo.SeasonNumber, seasonTorrent.ID)
}
s.logger.Debug().Msgf("Deleting original torrent with hash: %s, category: %s", torrent.Hash, torrent.Category)
s.torrents.Delete(torrent.Hash, torrent.Category, false)
s.logger.Info().Msgf("Multi-season download processing completed for %s", debridTorrent.Name)
return nil
}

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"math"
"os"
"path/filepath"
"time"
@@ -99,8 +98,7 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
backoff.Reset(nextInterval)
}
}
var torrentSymlinkPath string
var err error
var torrentSymlinkPath, torrentRclonePath string
debridTorrent.Arr = _arr
// Check if debrid supports webdav by checking cache
@@ -134,11 +132,20 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
}()
}
// Check for multi-season torrent support
isMultiSeason, seasons, err := s.detectMultiSeason(debridTorrent)
if err != nil {
s.logger.Warn().Msgf("Error detecting multi-season for %s: %v", debridTorrent.Name, err)
// Continue with normal processing if detection fails
isMultiSeason = false
}
switch importReq.Action {
case "symlink":
// Symlink action, we will create a symlink to the torrent
s.logger.Debug().Msgf("Post-Download Action: Symlink")
cache := deb.Cache()
if cache != nil {
s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
// Use webdav to download the file
@@ -146,14 +153,45 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
onFailed(err)
return
}
}
if isMultiSeason {
s.logger.Info().Msgf("Processing multi-season torrent with %d seasons", len(seasons))
// Remove any torrent already added
err := s.processMultiSeasonSymlinks(torrent, debridTorrent, seasons, importReq)
if err == nil {
// If an error occurred during multi-season processing, send it to normal processing
s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer))
go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed
go func() {
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
s.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
go func() {
_arr.Refresh()
}()
return
}
}
if cache != nil {
torrentRclonePath = filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentSymlinkPath = filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.Name)) // /mnt/symlinks/{category}/MyTVShow/
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
torrentSymlinkPath, err = s.createSymlinksWebdav(torrent, debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
} else {
// User is using either zurg or debrid webdav
torrentSymlinkPath, err = s.processSymlink(torrent, debridTorrent) // /mnt/symlinks/{category}/MyTVShow/
torrentRclonePath, torrentSymlinkPath, err = s.getTorrentPaths(torrent.SavePath, debridTorrent)
if err != nil {
onFailed(err)
return
}
}
torrentSymlinkPath, err = s.processSymlink(debridTorrent, torrentRclonePath, torrentSymlinkPath)
if err != nil {
onFailed(err)
return
@@ -168,6 +206,19 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
// Download action, we will download the torrent to the specified folder
// Generate download links
s.logger.Debug().Msgf("Post-Download Action: Download")
if isMultiSeason {
s.logger.Info().Msgf("Processing multi-season download with %d seasons", len(seasons))
err := s.processMultiSeasonDownloads(torrent, debridTorrent, seasons, importReq)
if err != nil {
onFailed(err)
return
}
// Multi-season processing completed successfully
onSuccess(torrent.SavePath)
return
}
if err := client.GetFileDownloadLinks(debridTorrent); err != nil {
onFailed(err)
return
@@ -252,7 +303,6 @@ func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) *
t.Eta = eta
t.Dlspeed = speed
t.Upspeed = speed
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
return t
}
@@ -267,7 +317,7 @@ func (s *Store) updateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent
}
}
t = s.partialTorrentUpdate(t, debridTorrent)
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
t.ContentPath = t.TorrentPath
if t.IsReady() {
t.State = "pausedUP"

View File

@@ -167,44 +167,33 @@ func (ts *TorrentStorage) Update(torrent *Torrent) {
func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) {
ts.mu.Lock()
defer ts.mu.Unlock()
key := keyPair(hash, category)
torrent, exists := ts.torrents[key]
if !exists && category == "" {
// Remove the torrent without knowing the category
for k, t := range ts.torrents {
if t.Hash == hash {
key = k
torrent = t
break
wireStore := Get()
for key, torrent := range ts.torrents {
if torrent == nil {
continue
}
if torrent.Hash == hash && (category == "" || torrent.Category == category) {
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
wireStore.importsQueue.Delete(torrent.ID)
}
}
}
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
dbClient := wireStore.debrid.Client(torrent.Debrid)
if dbClient != nil {
_ = dbClient.DeleteTorrent(torrent.DebridID)
}
}
delete(ts.torrents, key)
if torrent == nil {
return
}
st := Get()
// Check if torrent is queued for download
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
st.importsQueue.Delete(torrent.ID)
}
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
dbClient := st.debrid.Client(torrent.Debrid)
if dbClient != nil {
_ = dbClient.DeleteTorrent(torrent.DebridID)
}
}
delete(ts.torrents, key)
// Delete the torrent folder
if torrent.ContentPath != "" {
err := os.RemoveAll(torrent.ContentPath)
if err != nil {
return
// Delete the torrent folder
if torrent.ContentPath != "" {
err := os.RemoveAll(torrent.ContentPath)
if err != nil {
return
}
}
break
}
}
go func() {
@@ -227,12 +216,11 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
if torrent == nil {
continue
}
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
st.importsQueue.Delete(torrent.ID)
}
if torrent.Hash == hash {
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
st.importsQueue.Delete(torrent.ID)
}
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
toDelete[torrent.DebridID] = torrent.Debrid
}
@@ -243,6 +231,7 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
return
}
}
break
}
}
}

View File

@@ -72,6 +72,60 @@ type Torrent struct {
sync.Mutex
}
func (t *Torrent) Copy() *Torrent {
return &Torrent{
ID: t.ID,
DebridID: t.DebridID,
Debrid: t.Debrid,
TorrentPath: t.TorrentPath,
AddedOn: t.AddedOn,
AmountLeft: t.AmountLeft,
AutoTmm: t.AutoTmm,
Availability: t.Availability,
Category: t.Category,
Completed: t.Completed,
CompletionOn: t.CompletionOn,
ContentPath: t.ContentPath,
DlLimit: t.DlLimit,
Dlspeed: t.Dlspeed,
Downloaded: t.Downloaded,
DownloadedSession: t.DownloadedSession,
Eta: t.Eta,
FlPiecePrio: t.FlPiecePrio,
ForceStart: t.ForceStart,
Hash: t.Hash,
LastActivity: t.LastActivity,
MagnetUri: t.MagnetUri,
MaxRatio: t.MaxRatio,
MaxSeedingTime: t.MaxSeedingTime,
Name: t.Name,
NumComplete: t.NumComplete,
NumIncomplete: t.NumIncomplete,
NumLeechs: t.NumLeechs,
NumSeeds: t.NumSeeds,
Priority: t.Priority,
Progress: t.Progress,
Ratio: t.Ratio,
RatioLimit: t.RatioLimit,
SavePath: t.SavePath,
SeedingTimeLimit: t.SeedingTimeLimit,
SeenComplete: t.SeenComplete,
SeqDl: t.SeqDl,
Size: t.Size,
State: t.State,
SuperSeeding: t.SuperSeeding,
Tags: t.Tags,
TimeActive: t.TimeActive,
TotalSize: t.TotalSize,
Tracker: t.Tracker,
UpLimit: t.UpLimit,
Uploaded: t.Uploaded,
UploadedSession: t.UploadedSession,
Upspeed: t.Upspeed,
Source: t.Source,
}
}
func (t *Torrent) IsReady() bool {
return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != ""
}