Migrate to full rclone rcd

This commit is contained in:
Mukhtar Akere
2025-08-08 05:22:52 +01:00
parent eba24c9d63
commit 6f9fafd7d8
17 changed files with 1363 additions and 900 deletions
+15 -15
View File
@@ -12,22 +12,22 @@ import (
"github.com/sirrobot01/decypharr/pkg/debrid/providers/debrid_link"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
debridStore "github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/mount"
"github.com/sirrobot01/decypharr/pkg/rclone"
"sync"
)
type Debrid struct {
cache *store.Cache // Could be nil if not using WebDAV
client types.Client // HTTP client for making requests to the debrid service
cache *debridStore.Cache // Could be nil if not using WebDAV
client types.Client // HTTP client for making requests to the debrid service
}
func (de *Debrid) Client() types.Client {
return de.client
}
func (de *Debrid) Cache() *store.Cache {
func (de *Debrid) Cache() *debridStore.Cache {
return de.cache
}
@@ -43,7 +43,7 @@ type Storage struct {
lastUsed string
}
func NewStorage() *Storage {
func NewStorage(rcManager *rclone.Manager) *Storage {
cfg := config.Get()
_logger := logger.Default()
@@ -63,15 +63,15 @@ func NewStorage() *Storage {
continue
}
var (
cache *store.Cache
mounter *mount.Mount
cache *debridStore.Cache
mounter *rclone.Mount
)
_log := client.Logger()
if dc.UseWebDav {
if cfg.Rclone.Enabled {
mounter = mount.NewMount(dc.Name, webdavUrl)
if cfg.Rclone.Enabled && rcManager != nil {
mounter = rclone.NewMount(dc.Name, webdavUrl, rcManager)
}
cache = store.NewDebridCache(dc, client, mounter)
cache = debridStore.NewDebridCache(dc, client, mounter)
_log.Info().Msg("Debrid Service started with WebDAV")
} else {
_log.Info().Msg("Debrid Service started")
@@ -147,10 +147,10 @@ func (d *Storage) Clients() map[string]types.Client {
return clientsCopy
}
func (d *Storage) Caches() map[string]*store.Cache {
func (d *Storage) Caches() map[string]*debridStore.Cache {
d.mu.RLock()
defer d.mu.RUnlock()
cachesCopy := make(map[string]*store.Cache)
cachesCopy := make(map[string]*debridStore.Cache)
for name, debrid := range d.debrids {
if debrid != nil && debrid.cache != nil {
cachesCopy[name] = debrid.cache
@@ -221,7 +221,7 @@ func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet
debridTorrent.DownloadUncached = false
}
for index, db := range clients {
for _, db := range clients {
_logger := db.Logger()
_logger.Info().
Str("Debrid", db.Name()).
@@ -242,7 +242,7 @@ func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet
}
dbt.Arr = a
_logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.Name())
store.lastUsed = index
store.lastUsed = db.Name()
torrent, err := db.CheckStatus(dbt)
if err != nil && torrent != nil && torrent.Id != "" {
+3 -3
View File
@@ -6,7 +6,7 @@ import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/mount"
"github.com/sirrobot01/decypharr/pkg/rclone"
"os"
"path"
"path/filepath"
@@ -107,10 +107,10 @@ type Cache struct {
config config.Debrid
customFolders []string
mounter *mount.Mount
mounter *rclone.Mount
}
func NewDebridCache(dc config.Debrid, client types.Client, mounter *mount.Mount) *Cache {
func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount) *Cache {
cfg := config.Get()
cet, err := time.LoadLocation("CET")
if err != nil {
+6 -4
View File
@@ -29,6 +29,12 @@ func (fi *fileInfo) IsDir() bool { return fi.isDir }
func (fi *fileInfo) ID() string { return fi.id }
func (fi *fileInfo) Sys() interface{} { return nil }
type RcloneRC struct {
URL string
User string
Pass string
}
func (c *Cache) RefreshListings(refreshRclone bool) {
// Copy the torrents to a string|time map
c.torrents.refreshListing() // refresh torrent listings
@@ -147,10 +153,6 @@ func (c *Cache) refreshRcloneWithRC(dirs []string) error {
return nil
}
if cfg.RcUrl == "" {
return nil
}
client := http.DefaultClient
// Create form data
data := c.buildRcloneRequestData(dirs)
-443
View File
@@ -1,443 +0,0 @@
package mount
import (
"context"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/logger"
"net/url"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync/atomic"
"time"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
_ "github.com/rclone/rclone/backend/local" // Local backend (required for VFS cache)
_ "github.com/rclone/rclone/backend/webdav" // WebDAV backend
_ "github.com/rclone/rclone/cmd/cmount" // Custom mount for macOS
_ "github.com/rclone/rclone/cmd/mount" // Standard FUSE mount
// Try to import available mount backends for macOS
// These are conditional imports that may or may not work depending on system setup
_ "github.com/rclone/rclone/cmd/mount2" // Alternative FUSE mount
configPkg "github.com/sirrobot01/decypharr/internal/config"
)
func getMountFn() (mountlib.MountFn, error) {
// Try mount methods in order of preference
for _, method := range []string{"", "mount", "cmount", "mount2"} {
_, mountFn := mountlib.ResolveMountMethod(method)
if mountFn != nil {
return mountFn, nil
}
}
return nil, errors.New("no suitable mount function found")
}
type Mount struct {
Provider string
LocalPath string
WebDAVURL string
mountPoint *mountlib.MountPoint
cancel context.CancelFunc
mounted atomic.Bool
logger zerolog.Logger
}
func NewMount(provider, webdavURL string) *Mount {
cfg := configPkg.Get()
_logger := logger.New("mount-" + provider)
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
_url, err := url.JoinPath(webdavURL, provider)
if err != nil {
_url = fmt.Sprintf("%s/%s", webdavURL, provider)
}
// Get mount function to validate if FUSE is available
mountFn, err := getMountFn()
if err != nil || mountFn == nil {
_logger.Warn().Err(err).Msgf("Mount function not available for %s, using WebDAV URL %s", provider, _url)
return nil
}
return &Mount{
Provider: provider,
LocalPath: mountPath,
WebDAVURL: _url,
logger: _logger,
}
}
func (m *Mount) Mount(ctx context.Context) error {
if m.mounted.Load() {
m.logger.Info().Msgf("Mount %s is already mounted at %s", m.Provider, m.LocalPath)
return nil
}
if err := os.MkdirAll(m.LocalPath, 0755); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create mount directory %s: %w", m.LocalPath, err)
}
// Check if the mount point is already busy/mounted
if m.isMountBusy() {
m.logger.Warn().Msgf("Mount point %s appears to be busy, attempting cleanup", m.LocalPath)
if err := m.forceUnmount(); err != nil {
m.logger.Error().Err(err).Msgf("Failed to cleanup busy mount point %s", m.LocalPath)
return fmt.Errorf("mount point %s is busy and cleanup failed: %w", m.LocalPath, err)
}
m.logger.Info().Msgf("Successfully cleaned up busy mount point %s", m.LocalPath)
}
// Create mount context
mountCtx, cancel := context.WithCancel(ctx)
m.cancel = cancel
if err := setRcloneConfig(m.Provider, m.WebDAVURL); err != nil {
return fmt.Errorf("failed to set rclone config: %w", err)
}
// Get the mount function - try different mount methods
mountFn, err := getMountFn()
if err != nil {
return fmt.Errorf("failed to get mount function for %s: %w", m.Provider, err)
}
go func() {
if err := m.performMount(mountCtx, mountFn); err != nil {
m.logger.Error().Err(err).Msgf("Failed to mount %s at %s", m.Provider, m.LocalPath)
return
}
m.mounted.Store(true)
m.logger.Info().Msgf("Successfully mounted %s WebDAV at %s", m.Provider, m.LocalPath)
<-mountCtx.Done() // Wait for context cancellation
}()
m.logger.Info().Msgf("Mount process started for %s at %s", m.Provider, m.LocalPath)
return nil
}
func setRcloneConfig(configName, webdavURL string) error {
// Set configuration in rclone's config system using FileSetValue
config.FileSetValue(configName, "type", "webdav")
config.FileSetValue(configName, "url", webdavURL)
config.FileSetValue(configName, "vendor", "other")
config.FileSetValue(configName, "pacer_min_sleep", "0")
return nil
}
func (m *Mount) performMount(ctx context.Context, mountfn mountlib.MountFn) error {
// Create filesystem from config
fsrc, err := fs.NewFs(ctx, fmt.Sprintf("%s:", m.Provider))
if err != nil {
return fmt.Errorf("failed to create filesystem: %w", err)
}
// Get global rclone config
rcloneOpt := configPkg.Get().Rclone
// Parse cache mode
var cacheMode vfscommon.CacheMode
switch rcloneOpt.VfsCacheMode {
case "off":
cacheMode = vfscommon.CacheModeOff
case "minimal":
cacheMode = vfscommon.CacheModeMinimal
case "writes":
cacheMode = vfscommon.CacheModeWrites
case "full":
cacheMode = vfscommon.CacheModeFull
default:
cacheMode = vfscommon.CacheModeOff
}
vfsOpt := &vfscommon.Options{}
vfsOpt.Init() // Initialize VFS options with default values
vfsOpt.CacheMode = cacheMode
// Set VFS options based on rclone configuration
if rcloneOpt.NoChecksum {
vfsOpt.NoChecksum = rcloneOpt.NoChecksum
}
if rcloneOpt.NoModTime {
vfsOpt.NoModTime = rcloneOpt.NoModTime
}
if rcloneOpt.UID != 0 {
vfsOpt.UID = rcloneOpt.UID
}
if rcloneOpt.GID != 0 {
vfsOpt.GID = rcloneOpt.GID
}
if rcloneOpt.Umask != "" {
var umask vfscommon.FileMode
if err := umask.Set(rcloneOpt.Umask); err == nil {
vfsOpt.Umask = umask
}
}
// Parse duration strings
if rcloneOpt.DirCacheTime != "" {
if dirCacheTime, err := time.ParseDuration(rcloneOpt.DirCacheTime); err == nil {
vfsOpt.DirCacheTime = fs.Duration(dirCacheTime)
}
}
if rcloneOpt.VfsCachePollInterval != "" {
if vfsCachePollInterval, err := time.ParseDuration(rcloneOpt.VfsCachePollInterval); err == nil {
vfsOpt.CachePollInterval = fs.Duration(vfsCachePollInterval)
}
}
if rcloneOpt.VfsCacheMaxAge != "" {
if vfsCacheMaxAge, err := time.ParseDuration(rcloneOpt.VfsCacheMaxAge); err == nil {
vfsOpt.CacheMaxAge = fs.Duration(vfsCacheMaxAge)
}
}
if rcloneOpt.VfsReadChunkSizeLimit != "" {
var chunkSizeLimit fs.SizeSuffix
if err := chunkSizeLimit.Set(rcloneOpt.VfsReadChunkSizeLimit); err == nil {
vfsOpt.ChunkSizeLimit = chunkSizeLimit
}
}
if rcloneOpt.VfsReadAhead != "" {
var readAhead fs.SizeSuffix
if err := readAhead.Set(rcloneOpt.VfsReadAhead); err == nil {
vfsOpt.ReadAhead = readAhead
}
}
if rcloneOpt.VfsReadChunkSize != "" {
var chunkSize fs.SizeSuffix
if err := chunkSize.Set(rcloneOpt.VfsReadChunkSize); err == nil {
vfsOpt.ChunkSize = chunkSize
}
}
// Parse and set buffer size globally for rclone
if rcloneOpt.BufferSize != "" {
var bufferSize fs.SizeSuffix
if err := bufferSize.Set(rcloneOpt.BufferSize); err == nil {
fs.GetConfig(ctx).BufferSize = bufferSize
}
}
fs.GetConfig(ctx).UseMmap = true
if rcloneOpt.VfsCacheMaxSize != "" {
var cacheMaxSize fs.SizeSuffix
if err := cacheMaxSize.Set(rcloneOpt.VfsCacheMaxSize); err == nil {
vfsOpt.CacheMaxSize = cacheMaxSize
}
}
// Create mount options using global config
mountOpt := &mountlib.Options{
DebugFUSE: false,
AllowNonEmpty: true,
AllowOther: true,
Daemon: false,
AsyncRead: true,
DeviceName: fmt.Sprintf("decypharr-%s", m.Provider),
VolumeName: fmt.Sprintf("decypharr-%s", m.Provider),
}
if rcloneOpt.AttrTimeout != "" {
if attrTimeout, err := time.ParseDuration(rcloneOpt.AttrTimeout); err == nil {
mountOpt.AttrTimeout = fs.Duration(attrTimeout)
}
}
// Set cache dir
if rcloneOpt.CacheDir != "" {
cacheDir := filepath.Join(rcloneOpt.CacheDir, m.Provider)
if err := os.MkdirAll(cacheDir, 0755); err != nil {
// Log error but continue
m.logger.Error().Err(err).Msgf("Failed to create cache directory %s, using default cache", cacheDir)
}
if err := config.SetCacheDir(cacheDir); err != nil {
// Log error but continue
m.logger.Error().Err(err).Msgf("Failed to set cache directory %s, using default cache", cacheDir)
}
}
// Create mount point using rclone's internal mounting
m.mountPoint = mountlib.NewMountPoint(mountfn, m.LocalPath, fsrc, mountOpt, vfsOpt)
// Start the mount
_, err = m.mountPoint.Mount()
if err != nil {
// Cleanup mount point if it failed
if m.mountPoint != nil && m.mountPoint.UnmountFn != nil {
if unmountErr := m.Unmount(); unmountErr != nil {
m.logger.Error().Err(unmountErr).Msgf("Failed to cleanup mount point %s after mount failure", m.LocalPath)
} else {
m.logger.Info().Msgf("Successfully cleaned up mount point %s after mount failure", m.LocalPath)
}
}
return fmt.Errorf("failed to mount %s at %s: %w", m.Provider, m.LocalPath, err)
}
return nil
}
func (m *Mount) Unmount() error {
if !m.mounted.Load() {
m.logger.Info().Msgf("Mount %s is not mounted, skipping unmount", m.Provider)
return nil
}
m.mounted.Store(false)
m.logger.Debug().Msgf("Shutting down VFS for provider %s", m.Provider)
m.mountPoint.VFS.Shutdown()
if m.mountPoint == nil || m.mountPoint.UnmountFn == nil {
m.logger.Warn().Msgf("Mount point for provider %s is nil or unmount function is not set, skipping unmount", m.Provider)
return nil
}
if err := m.mountPoint.Unmount(); err != nil {
// Try to force unmount if normal unmount fails
if err := m.forceUnmount(); err != nil {
m.logger.Error().Err(err).Msgf("Failed to force unmount %s at %s", m.Provider, m.LocalPath)
return fmt.Errorf("failed to unmount %s at %s: %w", m.Provider, m.LocalPath, err)
}
}
return nil
}
func (m *Mount) forceUnmount() error {
switch runtime.GOOS {
case "linux", "darwin", "freebsd", "openbsd":
if err := m.tryUnmount("umount", m.LocalPath); err == nil {
m.logger.Info().Msgf("Successfully unmounted %s", m.LocalPath)
return nil
}
// Try lazy unmount
if err := m.tryUnmount("umount", "-l", m.LocalPath); err == nil {
m.logger.Info().Msgf("Successfully lazy unmounted %s", m.LocalPath)
return nil
}
if err := m.tryUnmount("fusermount", "-uz", m.LocalPath); err == nil {
m.logger.Info().Msgf("Successfully unmounted %s using fusermount3", m.LocalPath)
return nil
}
if err := m.tryUnmount("fusermount3", "-uz", m.LocalPath); err == nil {
m.logger.Info().Msgf("Successfully unmounted %s using fusermount3", m.LocalPath)
return nil
}
return fmt.Errorf("all unmount attempts failed for %s", m.LocalPath)
default:
return fmt.Errorf("force unmount not supported on %s", runtime.GOOS)
}
}
func (m *Mount) tryUnmount(command string, args ...string) error {
cmd := exec.Command(command, args...)
return cmd.Run()
}
func (m *Mount) isMountBusy() bool {
switch runtime.GOOS {
case "linux", "darwin", "freebsd", "openbsd":
// Check if the mount point is listed in /proc/mounts or mount output
cmd := exec.Command("mount")
output, err := cmd.Output()
if err != nil {
return false
}
return strings.Contains(string(output), m.LocalPath)
default:
return false
}
}
func (m *Mount) IsMounted() bool {
return m.mounted.Load() && m.mountPoint != nil && m.mountPoint.VFS != nil
}
func (m *Mount) RefreshDir(dirs []string) error {
if !m.IsMounted() {
return fmt.Errorf("provider %s not properly mounted. Skipping refreshes", m.Provider)
}
// Use atomic forget-and-refresh to avoid race conditions
return m.forceRefreshVFS(dirs)
}
// forceRefreshVFS atomically forgets and refreshes VFS directories to ensure immediate visibility
func (m *Mount) forceRefreshVFS(dirs []string) error {
vfsInstance := m.mountPoint.VFS
root, err := vfsInstance.Root()
if err != nil {
return fmt.Errorf("failed to get VFS root for %s: %w", m.Provider, err)
}
getDir := func(path string) (*vfs.Dir, error) {
path = strings.Trim(path, "/")
if path == "" {
return root, nil
}
segments := strings.Split(path, "/")
var node vfs.Node = root
for _, s := range segments {
if dir, ok := node.(*vfs.Dir); ok {
node, err = dir.Stat(s)
if err != nil {
return nil, err
}
}
}
if dir, ok := node.(*vfs.Dir); ok {
return dir, nil
}
return nil, vfs.EINVAL
}
// If no specific directories provided, work with root
if len(dirs) == 0 {
// Atomically forget and refresh root
root.ForgetAll()
if _, err := root.ReadDirAll(); err != nil {
return fmt.Errorf("failed to force-refresh root for %s: %w", m.Provider, err)
}
return nil
}
var errs []error
// Process each directory atomically
for _, dir := range dirs {
if dir != "" {
dir = strings.Trim(dir, "/")
// Get the directory handle
vfsDir, err := getDir(dir)
if err != nil {
errs = append(errs, fmt.Errorf("failed to find directory '%s' for force-refresh in %s: %w", dir, m.Provider, err))
continue
}
// Atomically forget and refresh this specific directory
vfsDir.ForgetAll()
if _, err := vfsDir.ReadDirAll(); err != nil {
errs = append(errs, fmt.Errorf("failed to force-refresh directory '%s' in %s: %w", dir, m.Provider, err))
}
}
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
+413
View File
@@ -0,0 +1,413 @@
package rclone
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/sirrobot01/decypharr/internal/config"
)
// Mount creates a mount using the rclone RC API with retry logic
func (m *Manager) Mount(provider, webdavURL string) error {
return m.mountWithRetry(provider, webdavURL, 3)
}
// mountWithRetry attempts to mount with retry logic
func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) error {
if !m.IsReady() {
if err := m.WaitForReady(30 * time.Second); err != nil {
return fmt.Errorf("rclone RC server not ready: %w", err)
}
}
for attempt := 0; attempt <= maxRetries; attempt++ {
if attempt > 0 {
// Wait before retry
wait := time.Duration(attempt*2) * time.Second
m.logger.Debug().
Int("attempt", attempt).
Str("provider", provider).
Msg("Retrying mount operation")
time.Sleep(wait)
}
if err := m.performMount(provider, webdavURL); err != nil {
m.logger.Error().
Err(err).
Str("provider", provider).
Int("attempt", attempt+1).
Msg("Mount attempt failed")
continue
}
return nil // Success
}
return fmt.Errorf("mount failed for %s", provider)
}
// performMount performs a single mount attempt
func (m *Manager) performMount(provider, webdavURL string) error {
cfg := config.Get()
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
cacheDir := ""
if cfg.Rclone.CacheDir != "" {
cacheDir = filepath.Join(cfg.Rclone.CacheDir, provider)
}
// Create mount directory
if err := os.MkdirAll(mountPath, 0755); err != nil {
return fmt.Errorf("failed to create mount directory %s: %w", mountPath, err)
}
// Check if already mounted
m.mountsMutex.RLock()
existingMount, exists := m.mounts[provider]
m.mountsMutex.RUnlock()
if exists && existingMount.Mounted {
m.logger.Info().Str("provider", provider).Str("path", mountPath).Msg("Already mounted")
return nil
}
// Clean up any stale mount first
if exists && !existingMount.Mounted {
m.forceUnmountPath(mountPath)
}
// Create rclone config for this provider
configName := fmt.Sprintf("decypharr-%s", provider)
if err := m.createConfig(configName, webdavURL); err != nil {
return fmt.Errorf("failed to create rclone config: %w", err)
}
// Prepare mount arguments
mountArgs := map[string]interface{}{
"fs": fmt.Sprintf("%s:", configName),
"mountPoint": mountPath,
"mountType": "mount", // Use standard FUSE mount
"mountOpt": map[string]interface{}{
"AllowNonEmpty": true,
"AllowOther": true,
"DebugFUSE": false,
"DeviceName": fmt.Sprintf("decypharr-%s", provider),
"VolumeName": fmt.Sprintf("decypharr-%s", provider),
},
}
configOpts := map[string]interface{}{
"BufferSize": cfg.Rclone.BufferSize,
}
if cacheDir != "" {
// Create cache directory if specified
if err := os.MkdirAll(cacheDir, 0755); err != nil {
m.logger.Warn().Str("cacheDir", cacheDir).Msg("Failed to create cache directory")
}
configOpts["CacheDir"] = cacheDir
}
mountArgs["_config"] = configOpts
// Add VFS options if caching is enabled
if cfg.Rclone.VfsCacheMode != "off" {
vfsOpt := map[string]interface{}{
"CacheMode": cfg.Rclone.VfsCacheMode,
}
if cfg.Rclone.VfsCacheMaxAge != "" {
vfsOpt["CacheMaxAge"] = cfg.Rclone.VfsCacheMaxAge
}
if cfg.Rclone.VfsCacheMaxSize != "" {
vfsOpt["CacheMaxSize"] = cfg.Rclone.VfsCacheMaxSize
}
if cfg.Rclone.VfsCachePollInterval != "" {
vfsOpt["CachePollInterval"] = cfg.Rclone.VfsCachePollInterval
}
if cfg.Rclone.VfsReadChunkSize != "" {
vfsOpt["ChunkSize"] = cfg.Rclone.VfsReadChunkSize
}
if cfg.Rclone.VfsReadAhead != "" {
vfsOpt["ReadAhead"] = cfg.Rclone.VfsReadAhead
}
if cfg.Rclone.NoChecksum {
vfsOpt["NoChecksum"] = cfg.Rclone.NoChecksum
}
if cfg.Rclone.NoModTime {
vfsOpt["NoModTime"] = cfg.Rclone.NoModTime
}
mountArgs["vfsOpt"] = vfsOpt
}
// Add mount options based on configuration
if cfg.Rclone.UID != 0 {
mountArgs["mountOpt"].(map[string]interface{})["UID"] = cfg.Rclone.UID
}
if cfg.Rclone.GID != 0 {
mountArgs["mountOpt"].(map[string]interface{})["GID"] = cfg.Rclone.GID
}
if cfg.Rclone.AttrTimeout != "" {
if attrTimeout, err := time.ParseDuration(cfg.Rclone.AttrTimeout); err == nil {
mountArgs["mountOpt"].(map[string]interface{})["AttrTimeout"] = attrTimeout.String()
}
}
// Make the mount request
req := RCRequest{
Command: "mount/mount",
Args: mountArgs,
}
_, err := m.makeRequest(req)
if err != nil {
// Clean up mount point on failure
m.forceUnmountPath(mountPath)
return fmt.Errorf("failed to create mount for %s: %w", provider, err)
}
// Store mount info
mountInfo := &MountInfo{
Provider: provider,
LocalPath: mountPath,
WebDAVURL: webdavURL,
Mounted: true,
MountedAt: time.Now().Format(time.RFC3339),
ConfigName: configName,
}
m.mountsMutex.Lock()
m.mounts[provider] = mountInfo
m.mountsMutex.Unlock()
return nil
}
// Unmount unmounts a specific provider
func (m *Manager) Unmount(provider string) error {
return m.unmount(provider)
}
// unmount is the internal unmount function
func (m *Manager) unmount(provider string) error {
m.mountsMutex.RLock()
mountInfo, exists := m.mounts[provider]
m.mountsMutex.RUnlock()
if !exists || !mountInfo.Mounted {
m.logger.Info().Str("provider", provider).Msg("Mount not found or already unmounted")
return nil
}
m.logger.Info().Str("provider", provider).Str("path", mountInfo.LocalPath).Msg("Unmounting")
// Try RC unmount first
req := RCRequest{
Command: "mount/unmount",
Args: map[string]interface{}{
"mountPoint": mountInfo.LocalPath,
},
}
var rcErr error
if m.IsReady() {
_, rcErr = m.makeRequest(req)
}
// If RC unmount fails or server is not ready, try force unmount
if rcErr != nil {
m.logger.Warn().Err(rcErr).Str("provider", provider).Msg("RC unmount failed, trying force unmount")
if err := m.forceUnmountPath(mountInfo.LocalPath); err != nil {
m.logger.Error().Err(err).Str("provider", provider).Msg("Force unmount failed")
// Don't return error here, update the state anyway
}
}
// Update mount info
m.mountsMutex.Lock()
if info, exists := m.mounts[provider]; exists {
info.Mounted = false
info.Error = ""
if rcErr != nil {
info.Error = rcErr.Error()
}
}
m.mountsMutex.Unlock()
m.logger.Info().Str("provider", provider).Msg("Unmount completed")
return nil
}
// UnmountAll unmounts all mounts
func (m *Manager) UnmountAll() error {
m.mountsMutex.RLock()
providers := make([]string, 0, len(m.mounts))
for provider, mount := range m.mounts {
if mount.Mounted {
providers = append(providers, provider)
}
}
m.mountsMutex.RUnlock()
var lastError error
for _, provider := range providers {
if err := m.unmount(provider); err != nil {
lastError = err
m.logger.Error().Err(err).Str("provider", provider).Msg("Failed to unmount")
}
}
return lastError
}
// GetMountInfo returns information about a specific mount
func (m *Manager) GetMountInfo(provider string) (*MountInfo, bool) {
m.mountsMutex.RLock()
defer m.mountsMutex.RUnlock()
info, exists := m.mounts[provider]
if !exists {
return nil, false
}
// Create a copy to avoid race conditions
mountInfo := *info
return &mountInfo, true
}
// GetAllMounts returns information about all mounts
func (m *Manager) GetAllMounts() map[string]*MountInfo {
m.mountsMutex.RLock()
defer m.mountsMutex.RUnlock()
result := make(map[string]*MountInfo, len(m.mounts))
for provider, info := range m.mounts {
// Create a copy to avoid race conditions
mountInfo := *info
result[provider] = &mountInfo
}
return result
}
// IsMounted checks if a provider is mounted
func (m *Manager) IsMounted(provider string) bool {
info, exists := m.GetMountInfo(provider)
return exists && info.Mounted
}
// RefreshDir refreshes directories in the VFS cache
func (m *Manager) RefreshDir(provider string, dirs []string) error {
if !m.IsReady() {
return fmt.Errorf("rclone RC server not ready")
}
mountInfo, exists := m.GetMountInfo(provider)
if !exists || !mountInfo.Mounted {
return fmt.Errorf("provider %s not mounted", provider)
}
// If no specific directories provided, refresh root
if len(dirs) == 0 {
dirs = []string{"/"}
}
args := map[string]interface{}{
"fs": fmt.Sprintf("decypharr-%s:", provider),
}
for i, dir := range dirs {
if dir != "" {
if i == 0 {
args["dir"] = dir
} else {
args[fmt.Sprintf("dir%d", i+1)] = dir
}
}
}
req := RCRequest{
Command: "vfs/forget",
Args: args,
}
_, err := m.makeRequest(req)
if err != nil {
m.logger.Error().Err(err).
Str("provider", provider).
Msg("Failed to refresh directory")
return fmt.Errorf("failed to refresh directory %s for provider %s: %w", dirs, provider, err)
}
req = RCRequest{
Command: "vfs/refresh",
Args: args,
}
_, err = m.makeRequest(req)
if err != nil {
m.logger.Error().Err(err).
Str("provider", provider).
Msg("Failed to refresh directory")
return fmt.Errorf("failed to refresh directory %s for provider %s: %w", dirs, provider, err)
}
return nil
}
// createConfig creates an rclone config entry for the provider
func (m *Manager) createConfig(configName, webdavURL string) error {
req := RCRequest{
Command: "config/create",
Args: map[string]interface{}{
"name": configName,
"type": "webdav",
"parameters": map[string]interface{}{
"url": webdavURL,
"vendor": "other",
"pacer_min_sleep": "0",
},
},
}
_, err := m.makeRequest(req)
if err != nil {
return fmt.Errorf("failed to create config %s: %w", configName, err)
}
m.logger.Trace().
Str("config_name", configName).
Str("webdav_url", webdavURL).
Msg("Rclone config created")
return nil
}
// forceUnmountPath attempts to force unmount a path using system commands
func (m *Manager) forceUnmountPath(mountPath string) error {
methods := [][]string{
{"umount", mountPath},
{"umount", "-l", mountPath}, // lazy unmount
{"fusermount", "-uz", mountPath},
{"fusermount3", "-uz", mountPath},
}
for _, method := range methods {
if err := m.tryUnmountCommand(method...); err == nil {
m.logger.Info().
Strs("command", method).
Str("path", mountPath).
Msg("Successfully unmounted using system command")
return nil
}
}
return fmt.Errorf("all force unmount attempts failed for %s", mountPath)
}
// tryUnmountCommand tries to run an unmount command
func (m *Manager) tryUnmountCommand(args ...string) error {
if len(args) == 0 {
return fmt.Errorf("no command provided")
}
cmd := exec.CommandContext(m.ctx, args[0], args[1:]...)
return cmd.Run()
}
+140
View File
@@ -0,0 +1,140 @@
package rclone
import (
"context"
"fmt"
"time"
)
// HealthCheck performs comprehensive health checks on the rclone system
func (m *Manager) HealthCheck() error {
if !m.serverStarted {
return fmt.Errorf("rclone RC server is not started")
}
if !m.IsReady() {
return fmt.Errorf("rclone RC server is not ready")
}
// Check if we can communicate with the server
if !m.pingServer() {
return fmt.Errorf("rclone RC server is not responding")
}
// Check mounts health
m.mountsMutex.RLock()
unhealthyMounts := make([]string, 0)
for provider, mount := range m.mounts {
if mount.Mounted && !m.checkMountHealth(provider) {
unhealthyMounts = append(unhealthyMounts, provider)
}
}
m.mountsMutex.RUnlock()
if len(unhealthyMounts) > 0 {
return fmt.Errorf("unhealthy mounts detected: %v", unhealthyMounts)
}
return nil
}
// checkMountHealth checks if a specific mount is healthy
func (m *Manager) checkMountHealth(provider string) bool {
// Try to list the root directory of the mount
req := RCRequest{
Command: "operations/list",
Args: map[string]interface{}{
"fs": fmt.Sprintf("decypharr-%s:", provider),
"remote": "/",
},
}
_, err := m.makeRequest(req)
return err == nil
}
// RecoverMount attempts to recover a failed mount
func (m *Manager) RecoverMount(provider string) error {
m.mountsMutex.RLock()
mountInfo, exists := m.mounts[provider]
m.mountsMutex.RUnlock()
if !exists {
return fmt.Errorf("mount for provider %s does not exist", provider)
}
m.logger.Warn().Str("provider", provider).Msg("Attempting to recover mount")
// First try to unmount cleanly
if err := m.unmount(provider); err != nil {
m.logger.Error().Err(err).Str("provider", provider).Msg("Failed to unmount during recovery")
}
// Wait a moment
time.Sleep(2 * time.Second)
// Try to remount
if err := m.Mount(provider, mountInfo.WebDAVURL); err != nil {
return fmt.Errorf("failed to recover mount for %s: %w", provider, err)
}
m.logger.Info().Str("provider", provider).Msg("Successfully recovered mount")
return nil
}
// MonitorMounts continuously monitors mount health and attempts recovery
func (m *Manager) MonitorMounts(ctx context.Context) {
if !m.serverStarted {
return
}
ticker := time.NewTicker(30 * time.Second) // Check every 30 seconds
defer ticker.Stop()
for {
select {
case <-ctx.Done():
m.logger.Debug().Msg("Mount monitoring stopped")
return
case <-ticker.C:
m.performMountHealthCheck()
}
}
}
// performMountHealthCheck checks and attempts to recover unhealthy mounts
func (m *Manager) performMountHealthCheck() {
if !m.IsReady() {
return
}
m.mountsMutex.RLock()
providers := make([]string, 0, len(m.mounts))
for provider, mount := range m.mounts {
if mount.Mounted {
providers = append(providers, provider)
}
}
m.mountsMutex.RUnlock()
for _, provider := range providers {
if !m.checkMountHealth(provider) {
m.logger.Warn().Str("provider", provider).Msg("Mount health check failed, attempting recovery")
// Mark mount as unhealthy
m.mountsMutex.Lock()
if mount, exists := m.mounts[provider]; exists {
mount.Error = "Health check failed"
mount.Mounted = false
}
m.mountsMutex.Unlock()
// Attempt recovery
go func(provider string) {
if err := m.RecoverMount(provider); err != nil {
m.logger.Error().Err(err).Str("provider", provider).Msg("Failed to recover mount")
}
}(provider)
}
}
}
+43
View File
@@ -0,0 +1,43 @@
//go:build !windows
package rclone
import (
"errors"
"os/exec"
"syscall"
)
// WasHardTerminated reports true iff the process was ended by SIGKILL or SIGTERM.
func WasHardTerminated(err error) bool {
var ee *exec.ExitError
if !errors.As(err, &ee) {
return false
}
ws, ok := ee.Sys().(syscall.WaitStatus)
if !ok || !ws.Signaled() {
return false
}
sig := ws.Signal()
return sig == syscall.SIGKILL || sig == syscall.SIGTERM
}
// ExitCode returns the numeric exit code when available.
func ExitCode(err error) (int, bool) {
var ee *exec.ExitError
if !errors.As(err, &ee) {
return 0, false
}
ws, ok := ee.Sys().(syscall.WaitStatus)
if !ok {
return 0, false
}
if ws.Exited() {
return ws.ExitStatus(), true
}
// Conventional shell “killed by signal” code is 128 + signal.
if ws.Signaled() {
return 128 + int(ws.Signal()), true
}
return 0, false
}
+35
View File
@@ -0,0 +1,35 @@
//go:build windows
package rclone
import (
"errors"
"os/exec"
"syscall"
)
func WasHardTerminated(err error) bool {
var ee *exec.ExitError
if !errors.As(err, &ee) {
return false
}
ws, ok := ee.Sys().(syscall.WaitStatus)
if !ok {
return false
}
// No Signaled() on Windows; consider "hard terminated" if not success.
return ws.ExitCode() != 0
}
// ExitCode returns the process exit code when available.
func ExitCode(err error) (int, bool) {
var ee *exec.ExitError
if !errors.As(err, &ee) {
return 0, false
}
ws, ok := ee.Sys().(syscall.WaitStatus)
if !ok {
return 0, false
}
return ws.ExitCode(), true
}
+377
View File
@@ -0,0 +1,377 @@
package rclone
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"os/exec"
"path/filepath"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
)
// Manager handles the rclone RC server and provides mount operations
type Manager struct {
cmd *exec.Cmd
rcPort string
rcUser string
rcPass string
configDir string
mounts map[string]*MountInfo
mountsMutex sync.RWMutex
logger zerolog.Logger
ctx context.Context
cancel context.CancelFunc
httpClient *http.Client
serverReady chan struct{}
serverStarted bool
mu sync.RWMutex
}
type MountInfo struct {
Provider string `json:"provider"`
LocalPath string `json:"local_path"`
WebDAVURL string `json:"webdav_url"`
Mounted bool `json:"mounted"`
MountedAt string `json:"mounted_at,omitempty"`
ConfigName string `json:"config_name"`
Error string `json:"error,omitempty"`
}
type RCRequest struct {
Command string `json:"command"`
Args map[string]interface{} `json:"args,omitempty"`
}
type RCResponse struct {
Result interface{} `json:"result,omitempty"`
Error string `json:"error,omitempty"`
}
type CoreStatsResponse struct {
TransferStats map[string]interface{} `json:"transferStats"`
CoreStats map[string]interface{} `json:"coreStats"`
}
// NewManager creates a new rclone RC manager
func NewManager() *Manager {
cfg := config.Get()
rcPort := "5572"
configDir := filepath.Join(cfg.Path, "rclone")
// Ensure config directory exists
if err := os.MkdirAll(configDir, 0755); err != nil {
_logger := logger.New("rclone")
_logger.Error().Err(err).Msg("Failed to create rclone config directory")
}
ctx, cancel := context.WithCancel(context.Background())
return &Manager{
rcPort: rcPort,
configDir: configDir,
mounts: make(map[string]*MountInfo),
logger: logger.New("rclone"),
ctx: ctx,
cancel: cancel,
httpClient: &http.Client{Timeout: 30 * time.Second},
serverReady: make(chan struct{}),
}
}
// Start starts the rclone RC server
func (m *Manager) Start(ctx context.Context) error {
m.mu.Lock()
defer m.mu.Unlock()
if m.serverStarted {
return nil
}
cfg := config.Get()
if !cfg.Rclone.Enabled {
m.logger.Info().Msg("Rclone is disabled, skipping RC server startup")
return nil
}
args := []string{
"rcd",
"--rc-addr", ":" + m.rcPort,
"--rc-no-auth", // We'll handle auth at the application level
"--config", filepath.Join(m.configDir, "rclone.conf"),
"--log-level", "INFO",
}
m.cmd = exec.CommandContext(ctx, "rclone", args...)
m.cmd.Dir = m.configDir
// Capture output for debugging
var stdout, stderr bytes.Buffer
m.cmd.Stdout = &stdout
m.cmd.Stderr = &stderr
if err := m.cmd.Start(); err != nil {
return fmt.Errorf("failed to start rclone RC server: %w", err)
}
m.serverStarted = true
// Wait for server to be ready in a goroutine
go func() {
defer func() {
if r := recover(); r != nil {
m.logger.Error().Interface("panic", r).Msg("Panic in rclone RC server monitor")
}
}()
m.waitForServer()
close(m.serverReady)
// Start mount monitoring once server is ready
go func() {
defer func() {
if r := recover(); r != nil {
m.logger.Error().Interface("panic", r).Msg("Panic in mount monitor")
}
}()
m.MonitorMounts(ctx)
}()
// Wait for command to finish and log output
err := m.cmd.Wait()
switch {
case err == nil:
m.logger.Info().Msg("Rclone RC server exited normally")
case errors.Is(err, context.Canceled):
m.logger.Info().Msg("Rclone RC server terminated: context canceled")
case WasHardTerminated(err): // SIGKILL on *nix; non-zero exit on Windows
m.logger.Info().Msg("Rclone RC server hard-terminated")
default:
if code, ok := ExitCode(err); ok {
m.logger.Debug().Int("exit_code", code).Err(err).
Msg("Rclone RC server error")
} else {
m.logger.Debug().Err(err).Msg("Rclone RC server error (no exit code)")
}
}
}()
return nil
}
// Stop stops the rclone RC server and unmounts all mounts
func (m *Manager) Stop() error {
m.mu.Lock()
defer m.mu.Unlock()
if !m.serverStarted {
return nil
}
m.logger.Info().Msg("Stopping rclone RC server")
// Unmount all mounts first
m.mountsMutex.RLock()
mountList := make([]*MountInfo, 0, len(m.mounts))
for _, mount := range m.mounts {
if mount.Mounted {
mountList = append(mountList, mount)
}
}
m.mountsMutex.RUnlock()
// Unmount in parallel
var wg sync.WaitGroup
for _, mount := range mountList {
wg.Add(1)
go func(mount *MountInfo) {
defer wg.Done()
if err := m.unmount(mount.Provider); err != nil {
m.logger.Error().Err(err).Str("provider", mount.Provider).Msg("Failed to unmount during shutdown")
}
}(mount)
}
// Wait for unmounts with timeout
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
m.logger.Info().Msg("All mounts unmounted successfully")
case <-time.After(30 * time.Second):
m.logger.Warn().Msg("Timeout waiting for mounts to unmount, proceeding with shutdown")
}
// Cancel context and stop process
m.cancel()
if m.cmd != nil && m.cmd.Process != nil {
// Try graceful shutdown first
if err := m.cmd.Process.Signal(os.Interrupt); err != nil {
m.logger.Warn().Err(err).Msg("Failed to send interrupt signal, using kill")
if killErr := m.cmd.Process.Kill(); killErr != nil {
m.logger.Error().Err(killErr).Msg("Failed to kill rclone process")
return killErr
}
}
// Wait for process to exit with timeout
done := make(chan error, 1)
go func() {
done <- m.cmd.Wait()
}()
select {
case err := <-done:
if err != nil && !errors.Is(err, context.Canceled) && !WasHardTerminated(err) {
m.logger.Warn().Err(err).Msg("Rclone process exited with error")
}
case <-time.After(10 * time.Second):
m.logger.Warn().Msg("Timeout waiting for rclone to exit, force killing")
if err := m.cmd.Process.Kill(); err != nil {
m.logger.Error().Err(err).Msg("Failed to force kill rclone process")
return err
}
// Wait a bit more for the kill to take effect
select {
case <-done:
m.logger.Info().Msg("Rclone process killed successfully")
case <-time.After(5 * time.Second):
m.logger.Error().Msg("Process may still be running after kill")
}
}
}
// Clean up any remaining mount directories
cfg := config.Get()
if cfg.Rclone.MountPath != "" {
m.cleanupMountDirectories(cfg.Rclone.MountPath)
}
m.serverStarted = false
m.logger.Info().Msg("Rclone RC server stopped")
return nil
}
// cleanupMountDirectories removes empty mount directories
func (m *Manager) cleanupMountDirectories(_ string) {
m.mountsMutex.RLock()
defer m.mountsMutex.RUnlock()
for _, mount := range m.mounts {
if mount.LocalPath != "" {
// Try to remove the directory if it's empty
if err := os.Remove(mount.LocalPath); err == nil {
m.logger.Debug().Str("path", mount.LocalPath).Msg("Removed empty mount directory")
}
// Don't log errors here as the directory might not be empty, which is fine
}
}
}
// waitForServer waits for the RC server to become available
func (m *Manager) waitForServer() {
maxAttempts := 30
for i := 0; i < maxAttempts; i++ {
if m.ctx.Err() != nil {
return
}
if m.pingServer() {
m.logger.Info().Msg("Rclone RC server is ready")
return
}
time.Sleep(time.Second)
}
m.logger.Error().Msg("Rclone RC server not responding - mount operations will be disabled")
}
// pingServer checks if the RC server is responding
func (m *Manager) pingServer() bool {
req := RCRequest{Command: "core/version"}
_, err := m.makeRequest(req)
return err == nil
}
// makeRequest makes a request to the rclone RC server
func (m *Manager) makeRequest(req RCRequest) (*RCResponse, error) {
reqBody, err := json.Marshal(req.Args)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
url := fmt.Sprintf("http://localhost:%s/%s", m.rcPort, req.Command)
httpReq, err := http.NewRequestWithContext(m.ctx, "POST", url, bytes.NewBuffer(reqBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
httpReq.Header.Set("Content-Type", "application/json")
resp, err := m.httpClient.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("failed to make request: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
m.logger.Debug().Err(err).Msg("Failed to close response body")
}
}()
var rcResp RCResponse
if err := json.NewDecoder(resp.Body).Decode(&rcResp); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
if rcResp.Error != "" {
return nil, fmt.Errorf("rclone error: %s", rcResp.Error)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code: %d - %s", resp.StatusCode, http.StatusText(resp.StatusCode))
}
return &rcResp, nil
}
// IsReady returns true if the RC server is ready
func (m *Manager) IsReady() bool {
select {
case <-m.serverReady:
return true
default:
return false
}
}
// WaitForReady waits for the RC server to be ready
func (m *Manager) WaitForReady(timeout time.Duration) error {
select {
case <-m.serverReady:
return nil
case <-time.After(timeout):
return fmt.Errorf("timeout waiting for rclone RC server to be ready")
case <-m.ctx.Done():
return m.ctx.Err()
}
}
func (m *Manager) GetLogger() zerolog.Logger {
return m.logger
}
+120
View File
@@ -0,0 +1,120 @@
package rclone
import (
"context"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"net/url"
"path/filepath"
)
// Mount represents a mount using the rclone RC client
type Mount struct {
Provider string
LocalPath string
WebDAVURL string
logger zerolog.Logger
rcManager *Manager
}
// NewMount creates a new RC-based mount
func NewMount(provider, webdavURL string, rcManager *Manager) *Mount {
cfg := config.Get()
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
_url, err := url.JoinPath(webdavURL, provider)
if err != nil {
_url = fmt.Sprintf("%s/%s", webdavURL, provider)
}
return &Mount{
Provider: provider,
LocalPath: mountPath,
WebDAVURL: _url,
rcManager: rcManager,
logger: rcManager.GetLogger(),
}
}
// Mount creates the mount using rclone RC
func (m *Mount) Mount(ctx context.Context) error {
if m.rcManager == nil {
return fmt.Errorf("rclone manager is not available")
}
// Check if already mounted
if m.rcManager.IsMounted(m.Provider) {
m.logger.Info().Msgf("Mount %s is already mounted at %s", m.Provider, m.LocalPath)
return nil
}
m.logger.Info().
Str("provider", m.Provider).
Str("webdav_url", m.WebDAVURL).
Str("mount_path", m.LocalPath).
Msg("Creating mount via RC")
if err := m.rcManager.Mount(m.Provider, m.WebDAVURL); err != nil {
m.logger.Error().Str("provider", m.Provider).Msg("Mount operation failed")
return fmt.Errorf("mount failed for %s", m.Provider)
}
m.logger.Info().Msgf("Successfully mounted %s WebDAV at %s via RC", m.Provider, m.LocalPath)
return nil
}
// Unmount removes the mount using rclone RC
func (m *Mount) Unmount() error {
if m.rcManager == nil {
m.logger.Warn().Msg("Rclone manager is not available, skipping unmount")
return nil
}
if !m.rcManager.IsMounted(m.Provider) {
m.logger.Info().Msgf("Mount %s is not mounted, skipping unmount", m.Provider)
return nil
}
m.logger.Info().Str("provider", m.Provider).Msg("Unmounting via RC")
if err := m.rcManager.Unmount(m.Provider); err != nil {
return fmt.Errorf("failed to unmount %s via RC: %w", m.Provider, err)
}
m.logger.Info().Msgf("Successfully unmounted %s", m.Provider)
return nil
}
// IsMounted checks if the mount is active via RC
func (m *Mount) IsMounted() bool {
if m.rcManager == nil {
return false
}
return m.rcManager.IsMounted(m.Provider)
}
// RefreshDir refreshes directories in the mount
func (m *Mount) RefreshDir(dirs []string) error {
if m.rcManager == nil {
return fmt.Errorf("rclone manager is not available")
}
if !m.IsMounted() {
return fmt.Errorf("provider %s not properly mounted. Skipping refreshes", m.Provider)
}
if err := m.rcManager.RefreshDir(m.Provider, dirs); err != nil {
return fmt.Errorf("failed to refresh directories for %s: %w", m.Provider, err)
}
return nil
}
// GetMountInfo returns mount information
func (m *Mount) GetMountInfo() (*MountInfo, bool) {
if m.rcManager == nil {
return nil, false
}
return m.rcManager.GetMountInfo(m.Provider)
}
+136
View File
@@ -0,0 +1,136 @@
package rclone
import (
"encoding/json"
"fmt"
)
// Stats represents rclone statistics
type Stats struct {
CoreStats map[string]interface{} `json:"coreStats"`
TransferStats map[string]interface{} `json:"transferStats"`
MountStats map[string]*MountInfo `json:"mountStats"`
}
// GetStats retrieves statistics from the rclone RC server
func (m *Manager) GetStats() (*Stats, error) {
if !m.IsReady() {
return nil, fmt.Errorf("rclone RC server not ready")
}
// Get core stats
req := RCRequest{
Command: "core/stats",
}
resp, err := m.makeRequest(req)
if err != nil {
return nil, fmt.Errorf("failed to get rclone stats: %w", err)
}
// Parse the response
var coreStatsResp CoreStatsResponse
if respBytes, err := json.Marshal(resp.Result); err == nil {
json.Unmarshal(respBytes, &coreStatsResp)
}
// Get mount stats
mountStats := m.GetAllMounts()
stats := &Stats{
CoreStats: coreStatsResp.CoreStats,
TransferStats: coreStatsResp.TransferStats,
MountStats: mountStats,
}
return stats, nil
}
// GetMemoryUsage returns memory usage statistics
func (m *Manager) GetMemoryUsage() (map[string]interface{}, error) {
if !m.IsReady() {
return nil, fmt.Errorf("rclone RC server not ready")
}
req := RCRequest{
Command: "core/memstats",
}
resp, err := m.makeRequest(req)
if err != nil {
return nil, fmt.Errorf("failed to get memory stats: %w", err)
}
if memStats, ok := resp.Result.(map[string]interface{}); ok {
return memStats, nil
}
return nil, fmt.Errorf("invalid memory stats response")
}
// GetBandwidthStats returns bandwidth usage for all transfers
func (m *Manager) GetBandwidthStats() (map[string]interface{}, error) {
if !m.IsReady() {
return nil, fmt.Errorf("rclone RC server not ready")
}
req := RCRequest{
Command: "core/bwlimit",
}
resp, err := m.makeRequest(req)
if err != nil {
// Bandwidth stats might not be available, return empty
return map[string]interface{}{}, nil
}
if bwStats, ok := resp.Result.(map[string]interface{}); ok {
return bwStats, nil
}
return map[string]interface{}{}, nil
}
// GetVersion returns rclone version information
func (m *Manager) GetVersion() (map[string]interface{}, error) {
if !m.IsReady() {
return nil, fmt.Errorf("rclone RC server not ready")
}
req := RCRequest{
Command: "core/version",
}
resp, err := m.makeRequest(req)
if err != nil {
return nil, fmt.Errorf("failed to get version: %w", err)
}
if version, ok := resp.Result.(map[string]interface{}); ok {
return version, nil
}
return nil, fmt.Errorf("invalid version response")
}
// GetConfigDump returns the current rclone configuration
func (m *Manager) GetConfigDump() (map[string]interface{}, error) {
if !m.IsReady() {
return nil, fmt.Errorf("rclone RC server not ready")
}
req := RCRequest{
Command: "config/dump",
}
resp, err := m.makeRequest(req)
if err != nil {
return nil, fmt.Errorf("failed to get config dump: %w", err)
}
if config, ok := resp.Result.(map[string]interface{}); ok {
return config, nil
}
return nil, fmt.Errorf("invalid config dump response")
}
+40
View File
@@ -118,5 +118,45 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
profiles = append(profiles, profile)
}
stats["debrids"] = profiles
// Add rclone stats if available
if rcManager := store.Get().RcloneManager(); rcManager != nil && rcManager.IsReady() {
if rcStats, err := rcManager.GetStats(); err == nil {
stats["rclone"] = map[string]interface{}{
"enabled": true,
"server_ready": rcManager.IsReady(),
"core_stats": rcStats.CoreStats,
"transfer_stats": rcStats.TransferStats,
"mount_stats": rcStats.MountStats,
}
// Add memory usage
if memStats, err := rcManager.GetMemoryUsage(); err == nil {
stats["rclone"].(map[string]interface{})["memory_stats"] = memStats
}
// Add version info
if version, err := rcManager.GetVersion(); err == nil {
stats["rclone"].(map[string]interface{})["version"] = version
}
// Add bandwidth stats
if bwStats, err := rcManager.GetBandwidthStats(); err == nil {
stats["rclone"].(map[string]interface{})["bandwidth_stats"] = bwStats
}
} else {
stats["rclone"] = map[string]interface{}{
"enabled": true,
"server_ready": rcManager.IsReady(),
"error": err.Error(),
}
}
} else {
stats["rclone"] = map[string]interface{}{
"enabled": false,
"server_ready": false,
}
}
request.JSONResponse(w, stats, http.StatusOK)
}
+20 -2
View File
@@ -8,6 +8,7 @@ import (
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid"
"github.com/sirrobot01/decypharr/pkg/rclone"
"github.com/sirrobot01/decypharr/pkg/repair"
"sync"
"time"
@@ -17,6 +18,7 @@ type Store struct {
repair *repair.Repair
arr *arr.Storage
debrid *debrid.Storage
rcloneManager *rclone.Manager
importsQueue *ImportQueue // Queued import requests(probably from too_many_active_downloads)
torrents *TorrentStorage
logger zerolog.Logger
@@ -34,15 +36,24 @@ var (
// Get returns the singleton instance
func Get() *Store {
once.Do(func() {
arrs := arr.NewStorage()
deb := debrid.NewStorage()
cfg := config.Get()
qbitCfg := cfg.QBitTorrent
// Create rclone manager if enabled
var rcManager *rclone.Manager
if cfg.Rclone.Enabled {
rcManager = rclone.NewManager()
}
// Create services with dependencies
arrs := arr.NewStorage()
deb := debrid.NewStorage(rcManager)
instance = &Store{
repair: repair.New(arrs, deb),
arr: arrs,
debrid: deb,
rcloneManager: rcManager,
torrents: newTorrentStorage(cfg.TorrentsFile()),
logger: logger.Default(), // Use default logger [decypharr]
refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute,
@@ -66,6 +77,10 @@ func Reset() {
instance.debrid.Reset()
}
if instance.rcloneManager != nil {
instance.rcloneManager.Stop()
}
if instance.importsQueue != nil {
instance.importsQueue.Close()
}
@@ -90,3 +105,6 @@ func (s *Store) Repair() *repair.Repair {
func (s *Store) Torrents() *TorrentStorage {
return s.torrents
}
func (s *Store) RcloneManager() *rclone.Manager {
return s.rcloneManager
}