Migrate to full rclone rcd
This commit is contained in:
@@ -0,0 +1,413 @@
|
||||
package rclone
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
)
|
||||
|
||||
// Mount creates a mount using the rclone RC API with retry logic
|
||||
func (m *Manager) Mount(provider, webdavURL string) error {
|
||||
return m.mountWithRetry(provider, webdavURL, 3)
|
||||
}
|
||||
|
||||
// mountWithRetry attempts to mount with retry logic
|
||||
func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) error {
|
||||
if !m.IsReady() {
|
||||
if err := m.WaitForReady(30 * time.Second); err != nil {
|
||||
return fmt.Errorf("rclone RC server not ready: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for attempt := 0; attempt <= maxRetries; attempt++ {
|
||||
if attempt > 0 {
|
||||
// Wait before retry
|
||||
wait := time.Duration(attempt*2) * time.Second
|
||||
m.logger.Debug().
|
||||
Int("attempt", attempt).
|
||||
Str("provider", provider).
|
||||
Msg("Retrying mount operation")
|
||||
time.Sleep(wait)
|
||||
}
|
||||
|
||||
if err := m.performMount(provider, webdavURL); err != nil {
|
||||
m.logger.Error().
|
||||
Err(err).
|
||||
Str("provider", provider).
|
||||
Int("attempt", attempt+1).
|
||||
Msg("Mount attempt failed")
|
||||
continue
|
||||
}
|
||||
|
||||
return nil // Success
|
||||
}
|
||||
return fmt.Errorf("mount failed for %s", provider)
|
||||
}
|
||||
|
||||
// performMount performs a single mount attempt
|
||||
func (m *Manager) performMount(provider, webdavURL string) error {
|
||||
cfg := config.Get()
|
||||
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
|
||||
cacheDir := ""
|
||||
if cfg.Rclone.CacheDir != "" {
|
||||
cacheDir = filepath.Join(cfg.Rclone.CacheDir, provider)
|
||||
}
|
||||
|
||||
// Create mount directory
|
||||
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create mount directory %s: %w", mountPath, err)
|
||||
}
|
||||
|
||||
// Check if already mounted
|
||||
m.mountsMutex.RLock()
|
||||
existingMount, exists := m.mounts[provider]
|
||||
m.mountsMutex.RUnlock()
|
||||
|
||||
if exists && existingMount.Mounted {
|
||||
m.logger.Info().Str("provider", provider).Str("path", mountPath).Msg("Already mounted")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clean up any stale mount first
|
||||
if exists && !existingMount.Mounted {
|
||||
m.forceUnmountPath(mountPath)
|
||||
}
|
||||
|
||||
// Create rclone config for this provider
|
||||
configName := fmt.Sprintf("decypharr-%s", provider)
|
||||
if err := m.createConfig(configName, webdavURL); err != nil {
|
||||
return fmt.Errorf("failed to create rclone config: %w", err)
|
||||
}
|
||||
|
||||
// Prepare mount arguments
|
||||
mountArgs := map[string]interface{}{
|
||||
"fs": fmt.Sprintf("%s:", configName),
|
||||
"mountPoint": mountPath,
|
||||
"mountType": "mount", // Use standard FUSE mount
|
||||
"mountOpt": map[string]interface{}{
|
||||
"AllowNonEmpty": true,
|
||||
"AllowOther": true,
|
||||
"DebugFUSE": false,
|
||||
"DeviceName": fmt.Sprintf("decypharr-%s", provider),
|
||||
"VolumeName": fmt.Sprintf("decypharr-%s", provider),
|
||||
},
|
||||
}
|
||||
|
||||
configOpts := map[string]interface{}{
|
||||
"BufferSize": cfg.Rclone.BufferSize,
|
||||
}
|
||||
|
||||
if cacheDir != "" {
|
||||
// Create cache directory if specified
|
||||
if err := os.MkdirAll(cacheDir, 0755); err != nil {
|
||||
m.logger.Warn().Str("cacheDir", cacheDir).Msg("Failed to create cache directory")
|
||||
}
|
||||
configOpts["CacheDir"] = cacheDir
|
||||
}
|
||||
|
||||
mountArgs["_config"] = configOpts
|
||||
|
||||
// Add VFS options if caching is enabled
|
||||
if cfg.Rclone.VfsCacheMode != "off" {
|
||||
vfsOpt := map[string]interface{}{
|
||||
"CacheMode": cfg.Rclone.VfsCacheMode,
|
||||
}
|
||||
|
||||
if cfg.Rclone.VfsCacheMaxAge != "" {
|
||||
vfsOpt["CacheMaxAge"] = cfg.Rclone.VfsCacheMaxAge
|
||||
}
|
||||
if cfg.Rclone.VfsCacheMaxSize != "" {
|
||||
vfsOpt["CacheMaxSize"] = cfg.Rclone.VfsCacheMaxSize
|
||||
}
|
||||
if cfg.Rclone.VfsCachePollInterval != "" {
|
||||
vfsOpt["CachePollInterval"] = cfg.Rclone.VfsCachePollInterval
|
||||
}
|
||||
if cfg.Rclone.VfsReadChunkSize != "" {
|
||||
vfsOpt["ChunkSize"] = cfg.Rclone.VfsReadChunkSize
|
||||
}
|
||||
if cfg.Rclone.VfsReadAhead != "" {
|
||||
vfsOpt["ReadAhead"] = cfg.Rclone.VfsReadAhead
|
||||
}
|
||||
if cfg.Rclone.NoChecksum {
|
||||
vfsOpt["NoChecksum"] = cfg.Rclone.NoChecksum
|
||||
}
|
||||
if cfg.Rclone.NoModTime {
|
||||
vfsOpt["NoModTime"] = cfg.Rclone.NoModTime
|
||||
}
|
||||
|
||||
mountArgs["vfsOpt"] = vfsOpt
|
||||
}
|
||||
|
||||
// Add mount options based on configuration
|
||||
if cfg.Rclone.UID != 0 {
|
||||
mountArgs["mountOpt"].(map[string]interface{})["UID"] = cfg.Rclone.UID
|
||||
}
|
||||
if cfg.Rclone.GID != 0 {
|
||||
mountArgs["mountOpt"].(map[string]interface{})["GID"] = cfg.Rclone.GID
|
||||
}
|
||||
if cfg.Rclone.AttrTimeout != "" {
|
||||
if attrTimeout, err := time.ParseDuration(cfg.Rclone.AttrTimeout); err == nil {
|
||||
mountArgs["mountOpt"].(map[string]interface{})["AttrTimeout"] = attrTimeout.String()
|
||||
}
|
||||
}
|
||||
// Make the mount request
|
||||
req := RCRequest{
|
||||
Command: "mount/mount",
|
||||
Args: mountArgs,
|
||||
}
|
||||
|
||||
_, err := m.makeRequest(req)
|
||||
if err != nil {
|
||||
// Clean up mount point on failure
|
||||
m.forceUnmountPath(mountPath)
|
||||
return fmt.Errorf("failed to create mount for %s: %w", provider, err)
|
||||
}
|
||||
|
||||
// Store mount info
|
||||
mountInfo := &MountInfo{
|
||||
Provider: provider,
|
||||
LocalPath: mountPath,
|
||||
WebDAVURL: webdavURL,
|
||||
Mounted: true,
|
||||
MountedAt: time.Now().Format(time.RFC3339),
|
||||
ConfigName: configName,
|
||||
}
|
||||
|
||||
m.mountsMutex.Lock()
|
||||
m.mounts[provider] = mountInfo
|
||||
m.mountsMutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount unmounts a specific provider
|
||||
func (m *Manager) Unmount(provider string) error {
|
||||
return m.unmount(provider)
|
||||
}
|
||||
|
||||
// unmount is the internal unmount function
|
||||
func (m *Manager) unmount(provider string) error {
|
||||
m.mountsMutex.RLock()
|
||||
mountInfo, exists := m.mounts[provider]
|
||||
m.mountsMutex.RUnlock()
|
||||
|
||||
if !exists || !mountInfo.Mounted {
|
||||
m.logger.Info().Str("provider", provider).Msg("Mount not found or already unmounted")
|
||||
return nil
|
||||
}
|
||||
|
||||
m.logger.Info().Str("provider", provider).Str("path", mountInfo.LocalPath).Msg("Unmounting")
|
||||
|
||||
// Try RC unmount first
|
||||
req := RCRequest{
|
||||
Command: "mount/unmount",
|
||||
Args: map[string]interface{}{
|
||||
"mountPoint": mountInfo.LocalPath,
|
||||
},
|
||||
}
|
||||
|
||||
var rcErr error
|
||||
if m.IsReady() {
|
||||
_, rcErr = m.makeRequest(req)
|
||||
}
|
||||
|
||||
// If RC unmount fails or server is not ready, try force unmount
|
||||
if rcErr != nil {
|
||||
m.logger.Warn().Err(rcErr).Str("provider", provider).Msg("RC unmount failed, trying force unmount")
|
||||
if err := m.forceUnmountPath(mountInfo.LocalPath); err != nil {
|
||||
m.logger.Error().Err(err).Str("provider", provider).Msg("Force unmount failed")
|
||||
// Don't return error here, update the state anyway
|
||||
}
|
||||
}
|
||||
|
||||
// Update mount info
|
||||
m.mountsMutex.Lock()
|
||||
if info, exists := m.mounts[provider]; exists {
|
||||
info.Mounted = false
|
||||
info.Error = ""
|
||||
if rcErr != nil {
|
||||
info.Error = rcErr.Error()
|
||||
}
|
||||
}
|
||||
m.mountsMutex.Unlock()
|
||||
|
||||
m.logger.Info().Str("provider", provider).Msg("Unmount completed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmountAll unmounts all mounts
|
||||
func (m *Manager) UnmountAll() error {
|
||||
m.mountsMutex.RLock()
|
||||
providers := make([]string, 0, len(m.mounts))
|
||||
for provider, mount := range m.mounts {
|
||||
if mount.Mounted {
|
||||
providers = append(providers, provider)
|
||||
}
|
||||
}
|
||||
m.mountsMutex.RUnlock()
|
||||
|
||||
var lastError error
|
||||
for _, provider := range providers {
|
||||
if err := m.unmount(provider); err != nil {
|
||||
lastError = err
|
||||
m.logger.Error().Err(err).Str("provider", provider).Msg("Failed to unmount")
|
||||
}
|
||||
}
|
||||
|
||||
return lastError
|
||||
}
|
||||
|
||||
// GetMountInfo returns information about a specific mount
|
||||
func (m *Manager) GetMountInfo(provider string) (*MountInfo, bool) {
|
||||
m.mountsMutex.RLock()
|
||||
defer m.mountsMutex.RUnlock()
|
||||
|
||||
info, exists := m.mounts[provider]
|
||||
if !exists {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Create a copy to avoid race conditions
|
||||
mountInfo := *info
|
||||
return &mountInfo, true
|
||||
}
|
||||
|
||||
// GetAllMounts returns information about all mounts
|
||||
func (m *Manager) GetAllMounts() map[string]*MountInfo {
|
||||
m.mountsMutex.RLock()
|
||||
defer m.mountsMutex.RUnlock()
|
||||
|
||||
result := make(map[string]*MountInfo, len(m.mounts))
|
||||
for provider, info := range m.mounts {
|
||||
// Create a copy to avoid race conditions
|
||||
mountInfo := *info
|
||||
result[provider] = &mountInfo
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// IsMounted checks if a provider is mounted
|
||||
func (m *Manager) IsMounted(provider string) bool {
|
||||
info, exists := m.GetMountInfo(provider)
|
||||
return exists && info.Mounted
|
||||
}
|
||||
|
||||
// RefreshDir refreshes directories in the VFS cache
|
||||
func (m *Manager) RefreshDir(provider string, dirs []string) error {
|
||||
if !m.IsReady() {
|
||||
return fmt.Errorf("rclone RC server not ready")
|
||||
}
|
||||
|
||||
mountInfo, exists := m.GetMountInfo(provider)
|
||||
if !exists || !mountInfo.Mounted {
|
||||
return fmt.Errorf("provider %s not mounted", provider)
|
||||
}
|
||||
|
||||
// If no specific directories provided, refresh root
|
||||
if len(dirs) == 0 {
|
||||
dirs = []string{"/"}
|
||||
}
|
||||
args := map[string]interface{}{
|
||||
"fs": fmt.Sprintf("decypharr-%s:", provider),
|
||||
}
|
||||
for i, dir := range dirs {
|
||||
if dir != "" {
|
||||
if i == 0 {
|
||||
args["dir"] = dir
|
||||
} else {
|
||||
args[fmt.Sprintf("dir%d", i+1)] = dir
|
||||
}
|
||||
}
|
||||
}
|
||||
req := RCRequest{
|
||||
Command: "vfs/forget",
|
||||
Args: args,
|
||||
}
|
||||
|
||||
_, err := m.makeRequest(req)
|
||||
if err != nil {
|
||||
m.logger.Error().Err(err).
|
||||
Str("provider", provider).
|
||||
Msg("Failed to refresh directory")
|
||||
return fmt.Errorf("failed to refresh directory %s for provider %s: %w", dirs, provider, err)
|
||||
}
|
||||
|
||||
req = RCRequest{
|
||||
Command: "vfs/refresh",
|
||||
Args: args,
|
||||
}
|
||||
|
||||
_, err = m.makeRequest(req)
|
||||
if err != nil {
|
||||
m.logger.Error().Err(err).
|
||||
Str("provider", provider).
|
||||
Msg("Failed to refresh directory")
|
||||
return fmt.Errorf("failed to refresh directory %s for provider %s: %w", dirs, provider, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createConfig creates an rclone config entry for the provider
|
||||
func (m *Manager) createConfig(configName, webdavURL string) error {
|
||||
req := RCRequest{
|
||||
Command: "config/create",
|
||||
Args: map[string]interface{}{
|
||||
"name": configName,
|
||||
"type": "webdav",
|
||||
"parameters": map[string]interface{}{
|
||||
"url": webdavURL,
|
||||
"vendor": "other",
|
||||
"pacer_min_sleep": "0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := m.makeRequest(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create config %s: %w", configName, err)
|
||||
}
|
||||
|
||||
m.logger.Trace().
|
||||
Str("config_name", configName).
|
||||
Str("webdav_url", webdavURL).
|
||||
Msg("Rclone config created")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// forceUnmountPath attempts to force unmount a path using system commands
|
||||
func (m *Manager) forceUnmountPath(mountPath string) error {
|
||||
methods := [][]string{
|
||||
{"umount", mountPath},
|
||||
{"umount", "-l", mountPath}, // lazy unmount
|
||||
{"fusermount", "-uz", mountPath},
|
||||
{"fusermount3", "-uz", mountPath},
|
||||
}
|
||||
|
||||
for _, method := range methods {
|
||||
if err := m.tryUnmountCommand(method...); err == nil {
|
||||
m.logger.Info().
|
||||
Strs("command", method).
|
||||
Str("path", mountPath).
|
||||
Msg("Successfully unmounted using system command")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("all force unmount attempts failed for %s", mountPath)
|
||||
}
|
||||
|
||||
// tryUnmountCommand tries to run an unmount command
|
||||
func (m *Manager) tryUnmountCommand(args ...string) error {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("no command provided")
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(m.ctx, args[0], args[1:]...)
|
||||
return cmd.Run()
|
||||
}
|
||||
@@ -0,0 +1,140 @@
|
||||
package rclone
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HealthCheck performs comprehensive health checks on the rclone system
|
||||
func (m *Manager) HealthCheck() error {
|
||||
if !m.serverStarted {
|
||||
return fmt.Errorf("rclone RC server is not started")
|
||||
}
|
||||
|
||||
if !m.IsReady() {
|
||||
return fmt.Errorf("rclone RC server is not ready")
|
||||
}
|
||||
|
||||
// Check if we can communicate with the server
|
||||
if !m.pingServer() {
|
||||
return fmt.Errorf("rclone RC server is not responding")
|
||||
}
|
||||
|
||||
// Check mounts health
|
||||
m.mountsMutex.RLock()
|
||||
unhealthyMounts := make([]string, 0)
|
||||
for provider, mount := range m.mounts {
|
||||
if mount.Mounted && !m.checkMountHealth(provider) {
|
||||
unhealthyMounts = append(unhealthyMounts, provider)
|
||||
}
|
||||
}
|
||||
m.mountsMutex.RUnlock()
|
||||
|
||||
if len(unhealthyMounts) > 0 {
|
||||
return fmt.Errorf("unhealthy mounts detected: %v", unhealthyMounts)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkMountHealth checks if a specific mount is healthy
|
||||
func (m *Manager) checkMountHealth(provider string) bool {
|
||||
// Try to list the root directory of the mount
|
||||
req := RCRequest{
|
||||
Command: "operations/list",
|
||||
Args: map[string]interface{}{
|
||||
"fs": fmt.Sprintf("decypharr-%s:", provider),
|
||||
"remote": "/",
|
||||
},
|
||||
}
|
||||
|
||||
_, err := m.makeRequest(req)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// RecoverMount attempts to recover a failed mount
|
||||
func (m *Manager) RecoverMount(provider string) error {
|
||||
m.mountsMutex.RLock()
|
||||
mountInfo, exists := m.mounts[provider]
|
||||
m.mountsMutex.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return fmt.Errorf("mount for provider %s does not exist", provider)
|
||||
}
|
||||
|
||||
m.logger.Warn().Str("provider", provider).Msg("Attempting to recover mount")
|
||||
|
||||
// First try to unmount cleanly
|
||||
if err := m.unmount(provider); err != nil {
|
||||
m.logger.Error().Err(err).Str("provider", provider).Msg("Failed to unmount during recovery")
|
||||
}
|
||||
|
||||
// Wait a moment
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Try to remount
|
||||
if err := m.Mount(provider, mountInfo.WebDAVURL); err != nil {
|
||||
return fmt.Errorf("failed to recover mount for %s: %w", provider, err)
|
||||
}
|
||||
|
||||
m.logger.Info().Str("provider", provider).Msg("Successfully recovered mount")
|
||||
return nil
|
||||
}
|
||||
|
||||
// MonitorMounts continuously monitors mount health and attempts recovery
|
||||
func (m *Manager) MonitorMounts(ctx context.Context) {
|
||||
if !m.serverStarted {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(30 * time.Second) // Check every 30 seconds
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
m.logger.Debug().Msg("Mount monitoring stopped")
|
||||
return
|
||||
case <-ticker.C:
|
||||
m.performMountHealthCheck()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// performMountHealthCheck checks and attempts to recover unhealthy mounts
|
||||
func (m *Manager) performMountHealthCheck() {
|
||||
if !m.IsReady() {
|
||||
return
|
||||
}
|
||||
|
||||
m.mountsMutex.RLock()
|
||||
providers := make([]string, 0, len(m.mounts))
|
||||
for provider, mount := range m.mounts {
|
||||
if mount.Mounted {
|
||||
providers = append(providers, provider)
|
||||
}
|
||||
}
|
||||
m.mountsMutex.RUnlock()
|
||||
|
||||
for _, provider := range providers {
|
||||
if !m.checkMountHealth(provider) {
|
||||
m.logger.Warn().Str("provider", provider).Msg("Mount health check failed, attempting recovery")
|
||||
|
||||
// Mark mount as unhealthy
|
||||
m.mountsMutex.Lock()
|
||||
if mount, exists := m.mounts[provider]; exists {
|
||||
mount.Error = "Health check failed"
|
||||
mount.Mounted = false
|
||||
}
|
||||
m.mountsMutex.Unlock()
|
||||
|
||||
// Attempt recovery
|
||||
go func(provider string) {
|
||||
if err := m.RecoverMount(provider); err != nil {
|
||||
m.logger.Error().Err(err).Str("provider", provider).Msg("Failed to recover mount")
|
||||
}
|
||||
}(provider)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
//go:build !windows
|
||||
|
||||
package rclone
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// WasHardTerminated reports true iff the process was ended by SIGKILL or SIGTERM.
|
||||
func WasHardTerminated(err error) bool {
|
||||
var ee *exec.ExitError
|
||||
if !errors.As(err, &ee) {
|
||||
return false
|
||||
}
|
||||
ws, ok := ee.Sys().(syscall.WaitStatus)
|
||||
if !ok || !ws.Signaled() {
|
||||
return false
|
||||
}
|
||||
sig := ws.Signal()
|
||||
return sig == syscall.SIGKILL || sig == syscall.SIGTERM
|
||||
}
|
||||
|
||||
// ExitCode returns the numeric exit code when available.
|
||||
func ExitCode(err error) (int, bool) {
|
||||
var ee *exec.ExitError
|
||||
if !errors.As(err, &ee) {
|
||||
return 0, false
|
||||
}
|
||||
ws, ok := ee.Sys().(syscall.WaitStatus)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
if ws.Exited() {
|
||||
return ws.ExitStatus(), true
|
||||
}
|
||||
// Conventional shell “killed by signal” code is 128 + signal.
|
||||
if ws.Signaled() {
|
||||
return 128 + int(ws.Signal()), true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
//go:build windows
|
||||
|
||||
package rclone
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func WasHardTerminated(err error) bool {
|
||||
var ee *exec.ExitError
|
||||
if !errors.As(err, &ee) {
|
||||
return false
|
||||
}
|
||||
ws, ok := ee.Sys().(syscall.WaitStatus)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// No Signaled() on Windows; consider "hard terminated" if not success.
|
||||
return ws.ExitCode() != 0
|
||||
}
|
||||
|
||||
// ExitCode returns the process exit code when available.
|
||||
func ExitCode(err error) (int, bool) {
|
||||
var ee *exec.ExitError
|
||||
if !errors.As(err, &ee) {
|
||||
return 0, false
|
||||
}
|
||||
ws, ok := ee.Sys().(syscall.WaitStatus)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return ws.ExitCode(), true
|
||||
}
|
||||
@@ -0,0 +1,377 @@
|
||||
package rclone
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
)
|
||||
|
||||
// Manager handles the rclone RC server and provides mount operations
|
||||
type Manager struct {
|
||||
cmd *exec.Cmd
|
||||
rcPort string
|
||||
rcUser string
|
||||
rcPass string
|
||||
configDir string
|
||||
mounts map[string]*MountInfo
|
||||
mountsMutex sync.RWMutex
|
||||
logger zerolog.Logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
httpClient *http.Client
|
||||
serverReady chan struct{}
|
||||
serverStarted bool
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
type MountInfo struct {
|
||||
Provider string `json:"provider"`
|
||||
LocalPath string `json:"local_path"`
|
||||
WebDAVURL string `json:"webdav_url"`
|
||||
Mounted bool `json:"mounted"`
|
||||
MountedAt string `json:"mounted_at,omitempty"`
|
||||
ConfigName string `json:"config_name"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type RCRequest struct {
|
||||
Command string `json:"command"`
|
||||
Args map[string]interface{} `json:"args,omitempty"`
|
||||
}
|
||||
|
||||
type RCResponse struct {
|
||||
Result interface{} `json:"result,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type CoreStatsResponse struct {
|
||||
TransferStats map[string]interface{} `json:"transferStats"`
|
||||
CoreStats map[string]interface{} `json:"coreStats"`
|
||||
}
|
||||
|
||||
// NewManager creates a new rclone RC manager
|
||||
func NewManager() *Manager {
|
||||
cfg := config.Get()
|
||||
|
||||
rcPort := "5572"
|
||||
configDir := filepath.Join(cfg.Path, "rclone")
|
||||
|
||||
// Ensure config directory exists
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
_logger := logger.New("rclone")
|
||||
_logger.Error().Err(err).Msg("Failed to create rclone config directory")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
return &Manager{
|
||||
rcPort: rcPort,
|
||||
configDir: configDir,
|
||||
mounts: make(map[string]*MountInfo),
|
||||
logger: logger.New("rclone"),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
httpClient: &http.Client{Timeout: 30 * time.Second},
|
||||
serverReady: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the rclone RC server
|
||||
func (m *Manager) Start(ctx context.Context) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if m.serverStarted {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := config.Get()
|
||||
if !cfg.Rclone.Enabled {
|
||||
m.logger.Info().Msg("Rclone is disabled, skipping RC server startup")
|
||||
return nil
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"rcd",
|
||||
"--rc-addr", ":" + m.rcPort,
|
||||
"--rc-no-auth", // We'll handle auth at the application level
|
||||
"--config", filepath.Join(m.configDir, "rclone.conf"),
|
||||
"--log-level", "INFO",
|
||||
}
|
||||
m.cmd = exec.CommandContext(ctx, "rclone", args...)
|
||||
m.cmd.Dir = m.configDir
|
||||
|
||||
// Capture output for debugging
|
||||
var stdout, stderr bytes.Buffer
|
||||
m.cmd.Stdout = &stdout
|
||||
m.cmd.Stderr = &stderr
|
||||
|
||||
if err := m.cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start rclone RC server: %w", err)
|
||||
}
|
||||
|
||||
m.serverStarted = true
|
||||
|
||||
// Wait for server to be ready in a goroutine
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
m.logger.Error().Interface("panic", r).Msg("Panic in rclone RC server monitor")
|
||||
}
|
||||
}()
|
||||
|
||||
m.waitForServer()
|
||||
close(m.serverReady)
|
||||
|
||||
// Start mount monitoring once server is ready
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
m.logger.Error().Interface("panic", r).Msg("Panic in mount monitor")
|
||||
}
|
||||
}()
|
||||
m.MonitorMounts(ctx)
|
||||
}()
|
||||
|
||||
// Wait for command to finish and log output
|
||||
err := m.cmd.Wait()
|
||||
switch {
|
||||
case err == nil:
|
||||
m.logger.Info().Msg("Rclone RC server exited normally")
|
||||
|
||||
case errors.Is(err, context.Canceled):
|
||||
m.logger.Info().Msg("Rclone RC server terminated: context canceled")
|
||||
|
||||
case WasHardTerminated(err): // SIGKILL on *nix; non-zero exit on Windows
|
||||
m.logger.Info().Msg("Rclone RC server hard-terminated")
|
||||
|
||||
default:
|
||||
if code, ok := ExitCode(err); ok {
|
||||
m.logger.Debug().Int("exit_code", code).Err(err).
|
||||
Msg("Rclone RC server error")
|
||||
} else {
|
||||
m.logger.Debug().Err(err).Msg("Rclone RC server error (no exit code)")
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the rclone RC server and unmounts all mounts
|
||||
func (m *Manager) Stop() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if !m.serverStarted {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.logger.Info().Msg("Stopping rclone RC server")
|
||||
|
||||
// Unmount all mounts first
|
||||
m.mountsMutex.RLock()
|
||||
mountList := make([]*MountInfo, 0, len(m.mounts))
|
||||
for _, mount := range m.mounts {
|
||||
if mount.Mounted {
|
||||
mountList = append(mountList, mount)
|
||||
}
|
||||
}
|
||||
m.mountsMutex.RUnlock()
|
||||
|
||||
// Unmount in parallel
|
||||
var wg sync.WaitGroup
|
||||
for _, mount := range mountList {
|
||||
wg.Add(1)
|
||||
go func(mount *MountInfo) {
|
||||
defer wg.Done()
|
||||
if err := m.unmount(mount.Provider); err != nil {
|
||||
m.logger.Error().Err(err).Str("provider", mount.Provider).Msg("Failed to unmount during shutdown")
|
||||
}
|
||||
}(mount)
|
||||
}
|
||||
|
||||
// Wait for unmounts with timeout
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
m.logger.Info().Msg("All mounts unmounted successfully")
|
||||
case <-time.After(30 * time.Second):
|
||||
m.logger.Warn().Msg("Timeout waiting for mounts to unmount, proceeding with shutdown")
|
||||
}
|
||||
|
||||
// Cancel context and stop process
|
||||
m.cancel()
|
||||
|
||||
if m.cmd != nil && m.cmd.Process != nil {
|
||||
// Try graceful shutdown first
|
||||
if err := m.cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
m.logger.Warn().Err(err).Msg("Failed to send interrupt signal, using kill")
|
||||
if killErr := m.cmd.Process.Kill(); killErr != nil {
|
||||
m.logger.Error().Err(killErr).Msg("Failed to kill rclone process")
|
||||
return killErr
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for process to exit with timeout
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- m.cmd.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil && !errors.Is(err, context.Canceled) && !WasHardTerminated(err) {
|
||||
m.logger.Warn().Err(err).Msg("Rclone process exited with error")
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
m.logger.Warn().Msg("Timeout waiting for rclone to exit, force killing")
|
||||
if err := m.cmd.Process.Kill(); err != nil {
|
||||
m.logger.Error().Err(err).Msg("Failed to force kill rclone process")
|
||||
return err
|
||||
}
|
||||
// Wait a bit more for the kill to take effect
|
||||
select {
|
||||
case <-done:
|
||||
m.logger.Info().Msg("Rclone process killed successfully")
|
||||
case <-time.After(5 * time.Second):
|
||||
m.logger.Error().Msg("Process may still be running after kill")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up any remaining mount directories
|
||||
cfg := config.Get()
|
||||
if cfg.Rclone.MountPath != "" {
|
||||
m.cleanupMountDirectories(cfg.Rclone.MountPath)
|
||||
}
|
||||
|
||||
m.serverStarted = false
|
||||
m.logger.Info().Msg("Rclone RC server stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupMountDirectories removes empty mount directories
|
||||
func (m *Manager) cleanupMountDirectories(_ string) {
|
||||
m.mountsMutex.RLock()
|
||||
defer m.mountsMutex.RUnlock()
|
||||
|
||||
for _, mount := range m.mounts {
|
||||
if mount.LocalPath != "" {
|
||||
// Try to remove the directory if it's empty
|
||||
if err := os.Remove(mount.LocalPath); err == nil {
|
||||
m.logger.Debug().Str("path", mount.LocalPath).Msg("Removed empty mount directory")
|
||||
}
|
||||
// Don't log errors here as the directory might not be empty, which is fine
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// waitForServer waits for the RC server to become available
|
||||
func (m *Manager) waitForServer() {
|
||||
maxAttempts := 30
|
||||
for i := 0; i < maxAttempts; i++ {
|
||||
if m.ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m.pingServer() {
|
||||
m.logger.Info().Msg("Rclone RC server is ready")
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
m.logger.Error().Msg("Rclone RC server not responding - mount operations will be disabled")
|
||||
}
|
||||
|
||||
// pingServer checks if the RC server is responding
|
||||
func (m *Manager) pingServer() bool {
|
||||
req := RCRequest{Command: "core/version"}
|
||||
_, err := m.makeRequest(req)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// makeRequest makes a request to the rclone RC server
|
||||
func (m *Manager) makeRequest(req RCRequest) (*RCResponse, error) {
|
||||
reqBody, err := json.Marshal(req.Args)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("http://localhost:%s/%s", m.rcPort, req.Command)
|
||||
httpReq, err := http.NewRequestWithContext(m.ctx, "POST", url, bytes.NewBuffer(reqBody))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := m.httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make request: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
m.logger.Debug().Err(err).Msg("Failed to close response body")
|
||||
}
|
||||
}()
|
||||
|
||||
var rcResp RCResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&rcResp); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
|
||||
if rcResp.Error != "" {
|
||||
return nil, fmt.Errorf("rclone error: %s", rcResp.Error)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status code: %d - %s", resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
|
||||
return &rcResp, nil
|
||||
}
|
||||
|
||||
// IsReady returns true if the RC server is ready
|
||||
func (m *Manager) IsReady() bool {
|
||||
select {
|
||||
case <-m.serverReady:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForReady waits for the RC server to be ready
|
||||
func (m *Manager) WaitForReady(timeout time.Duration) error {
|
||||
select {
|
||||
case <-m.serverReady:
|
||||
return nil
|
||||
case <-time.After(timeout):
|
||||
return fmt.Errorf("timeout waiting for rclone RC server to be ready")
|
||||
case <-m.ctx.Done():
|
||||
return m.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) GetLogger() zerolog.Logger {
|
||||
return m.logger
|
||||
}
|
||||
@@ -0,0 +1,120 @@
|
||||
package rclone
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Mount represents a mount using the rclone RC client
|
||||
type Mount struct {
|
||||
Provider string
|
||||
LocalPath string
|
||||
WebDAVURL string
|
||||
logger zerolog.Logger
|
||||
rcManager *Manager
|
||||
}
|
||||
|
||||
// NewMount creates a new RC-based mount
|
||||
func NewMount(provider, webdavURL string, rcManager *Manager) *Mount {
|
||||
cfg := config.Get()
|
||||
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
|
||||
|
||||
_url, err := url.JoinPath(webdavURL, provider)
|
||||
if err != nil {
|
||||
_url = fmt.Sprintf("%s/%s", webdavURL, provider)
|
||||
}
|
||||
|
||||
return &Mount{
|
||||
Provider: provider,
|
||||
LocalPath: mountPath,
|
||||
WebDAVURL: _url,
|
||||
rcManager: rcManager,
|
||||
logger: rcManager.GetLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
// Mount creates the mount using rclone RC
|
||||
func (m *Mount) Mount(ctx context.Context) error {
|
||||
if m.rcManager == nil {
|
||||
return fmt.Errorf("rclone manager is not available")
|
||||
}
|
||||
|
||||
// Check if already mounted
|
||||
if m.rcManager.IsMounted(m.Provider) {
|
||||
m.logger.Info().Msgf("Mount %s is already mounted at %s", m.Provider, m.LocalPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
m.logger.Info().
|
||||
Str("provider", m.Provider).
|
||||
Str("webdav_url", m.WebDAVURL).
|
||||
Str("mount_path", m.LocalPath).
|
||||
Msg("Creating mount via RC")
|
||||
|
||||
if err := m.rcManager.Mount(m.Provider, m.WebDAVURL); err != nil {
|
||||
m.logger.Error().Str("provider", m.Provider).Msg("Mount operation failed")
|
||||
return fmt.Errorf("mount failed for %s", m.Provider)
|
||||
}
|
||||
|
||||
m.logger.Info().Msgf("Successfully mounted %s WebDAV at %s via RC", m.Provider, m.LocalPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount removes the mount using rclone RC
|
||||
func (m *Mount) Unmount() error {
|
||||
if m.rcManager == nil {
|
||||
m.logger.Warn().Msg("Rclone manager is not available, skipping unmount")
|
||||
return nil
|
||||
}
|
||||
|
||||
if !m.rcManager.IsMounted(m.Provider) {
|
||||
m.logger.Info().Msgf("Mount %s is not mounted, skipping unmount", m.Provider)
|
||||
return nil
|
||||
}
|
||||
|
||||
m.logger.Info().Str("provider", m.Provider).Msg("Unmounting via RC")
|
||||
|
||||
if err := m.rcManager.Unmount(m.Provider); err != nil {
|
||||
return fmt.Errorf("failed to unmount %s via RC: %w", m.Provider, err)
|
||||
}
|
||||
|
||||
m.logger.Info().Msgf("Successfully unmounted %s", m.Provider)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsMounted checks if the mount is active via RC
|
||||
func (m *Mount) IsMounted() bool {
|
||||
if m.rcManager == nil {
|
||||
return false
|
||||
}
|
||||
return m.rcManager.IsMounted(m.Provider)
|
||||
}
|
||||
|
||||
// RefreshDir refreshes directories in the mount
|
||||
func (m *Mount) RefreshDir(dirs []string) error {
|
||||
if m.rcManager == nil {
|
||||
return fmt.Errorf("rclone manager is not available")
|
||||
}
|
||||
|
||||
if !m.IsMounted() {
|
||||
return fmt.Errorf("provider %s not properly mounted. Skipping refreshes", m.Provider)
|
||||
}
|
||||
|
||||
if err := m.rcManager.RefreshDir(m.Provider, dirs); err != nil {
|
||||
return fmt.Errorf("failed to refresh directories for %s: %w", m.Provider, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMountInfo returns mount information
|
||||
func (m *Mount) GetMountInfo() (*MountInfo, bool) {
|
||||
if m.rcManager == nil {
|
||||
return nil, false
|
||||
}
|
||||
return m.rcManager.GetMountInfo(m.Provider)
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
package rclone
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Stats represents rclone statistics
|
||||
type Stats struct {
|
||||
CoreStats map[string]interface{} `json:"coreStats"`
|
||||
TransferStats map[string]interface{} `json:"transferStats"`
|
||||
MountStats map[string]*MountInfo `json:"mountStats"`
|
||||
}
|
||||
|
||||
// GetStats retrieves statistics from the rclone RC server
|
||||
func (m *Manager) GetStats() (*Stats, error) {
|
||||
if !m.IsReady() {
|
||||
return nil, fmt.Errorf("rclone RC server not ready")
|
||||
}
|
||||
|
||||
// Get core stats
|
||||
req := RCRequest{
|
||||
Command: "core/stats",
|
||||
}
|
||||
|
||||
resp, err := m.makeRequest(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get rclone stats: %w", err)
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
var coreStatsResp CoreStatsResponse
|
||||
if respBytes, err := json.Marshal(resp.Result); err == nil {
|
||||
json.Unmarshal(respBytes, &coreStatsResp)
|
||||
}
|
||||
|
||||
// Get mount stats
|
||||
mountStats := m.GetAllMounts()
|
||||
|
||||
stats := &Stats{
|
||||
CoreStats: coreStatsResp.CoreStats,
|
||||
TransferStats: coreStatsResp.TransferStats,
|
||||
MountStats: mountStats,
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// GetMemoryUsage returns memory usage statistics
|
||||
func (m *Manager) GetMemoryUsage() (map[string]interface{}, error) {
|
||||
if !m.IsReady() {
|
||||
return nil, fmt.Errorf("rclone RC server not ready")
|
||||
}
|
||||
|
||||
req := RCRequest{
|
||||
Command: "core/memstats",
|
||||
}
|
||||
|
||||
resp, err := m.makeRequest(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get memory stats: %w", err)
|
||||
}
|
||||
|
||||
if memStats, ok := resp.Result.(map[string]interface{}); ok {
|
||||
return memStats, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid memory stats response")
|
||||
}
|
||||
|
||||
// GetBandwidthStats returns bandwidth usage for all transfers
|
||||
func (m *Manager) GetBandwidthStats() (map[string]interface{}, error) {
|
||||
if !m.IsReady() {
|
||||
return nil, fmt.Errorf("rclone RC server not ready")
|
||||
}
|
||||
|
||||
req := RCRequest{
|
||||
Command: "core/bwlimit",
|
||||
}
|
||||
|
||||
resp, err := m.makeRequest(req)
|
||||
if err != nil {
|
||||
// Bandwidth stats might not be available, return empty
|
||||
return map[string]interface{}{}, nil
|
||||
}
|
||||
|
||||
if bwStats, ok := resp.Result.(map[string]interface{}); ok {
|
||||
return bwStats, nil
|
||||
}
|
||||
|
||||
return map[string]interface{}{}, nil
|
||||
}
|
||||
|
||||
// GetVersion returns rclone version information
|
||||
func (m *Manager) GetVersion() (map[string]interface{}, error) {
|
||||
if !m.IsReady() {
|
||||
return nil, fmt.Errorf("rclone RC server not ready")
|
||||
}
|
||||
|
||||
req := RCRequest{
|
||||
Command: "core/version",
|
||||
}
|
||||
|
||||
resp, err := m.makeRequest(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get version: %w", err)
|
||||
}
|
||||
|
||||
if version, ok := resp.Result.(map[string]interface{}); ok {
|
||||
return version, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid version response")
|
||||
}
|
||||
|
||||
// GetConfigDump returns the current rclone configuration
|
||||
func (m *Manager) GetConfigDump() (map[string]interface{}, error) {
|
||||
if !m.IsReady() {
|
||||
return nil, fmt.Errorf("rclone RC server not ready")
|
||||
}
|
||||
|
||||
req := RCRequest{
|
||||
Command: "config/dump",
|
||||
}
|
||||
|
||||
resp, err := m.makeRequest(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get config dump: %w", err)
|
||||
}
|
||||
|
||||
if config, ok := resp.Result.(map[string]interface{}); ok {
|
||||
return config, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid config dump response")
|
||||
}
|
||||
Reference in New Issue
Block a user