Fix issues with rclone setup

This commit is contained in:
Mukhtar Akere
2025-08-07 05:31:07 +01:00
parent c620ba3d56
commit eba24c9d63
9 changed files with 108 additions and 105 deletions

View File

@@ -59,7 +59,7 @@ ENV LOG_PATH=/app/logs
EXPOSE 8282
VOLUME ["/app"]
HEALTHCHECK --interval=3s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app", "--basic"]
HEALTHCHECK --interval=10s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app", "--basic"]
ENTRYPOINT ["/entrypoint.sh"]
CMD ["/usr/bin/decypharr", "--config", "/app"]

View File

@@ -103,8 +103,9 @@ type Rclone struct {
BufferSize string `json:"buffer_size,omitempty"` // Buffer size for reading files (default 16M)
// File system settings
UID uint32 `json:"uid,omitempty"` // User ID for mounted files
GID uint32 `json:"gid,omitempty"` // Group ID for mounted files
UID uint32 `json:"uid,omitempty"` // User ID for mounted files
GID uint32 `json:"gid,omitempty"` // Group ID for mounted files
Umask string `json:"umask,omitempty"`
// Timeout settings
AttrTimeout string `json:"attr_timeout,omitempty"` // Attribute cache timeout (default 1s)
@@ -338,7 +339,7 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
}
if d.TorrentsRefreshInterval == "" {
d.TorrentsRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "15s") // 15 seconds
d.TorrentsRefreshInterval = cmp.Or(c.WebDav.TorrentsRefreshInterval, "45s") // 45 seconds
}
if d.WebDav.DownloadLinksRefreshInterval == "" {
d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes
@@ -399,8 +400,8 @@ func (c *Config) setDefaults() {
c.Repair.Strategy = RepairStrategyPerTorrent
}
// Rclone defaults
if c.Rclone.Enabled {
c.Rclone.MountPath = cmp.Or(c.Rclone.MountPath, filepath.Join(c.Path, "mounts"))
c.Rclone.VfsCacheMode = cmp.Or(c.Rclone.VfsCacheMode, "off")
if c.Rclone.UID == 0 {
c.Rclone.UID = uint32(os.Getuid())
@@ -414,12 +415,8 @@ func (c *Config) setDefaults() {
}
}
if c.Rclone.VfsCacheMode != "off" {
c.Rclone.VfsCachePollInterval = cmp.Or(c.Rclone.VfsCachePollInterval, "1m")
c.Rclone.VfsReadChunkSizeLimit = cmp.Or(c.Rclone.VfsReadChunkSizeLimit, "off")
c.Rclone.VfsCachePollInterval = cmp.Or(c.Rclone.VfsCachePollInterval, "1m") // Clean cache every minute
}
c.Rclone.AttrTimeout = cmp.Or(c.Rclone.AttrTimeout, "10s")
c.Rclone.DirCacheTime = cmp.Or(c.Rclone.DirCacheTime, "5m")
}
// Load the auth file

11
main.go
View File

@@ -6,6 +6,8 @@ import (
"github.com/sirrobot01/decypharr/cmd/decypharr"
"github.com/sirrobot01/decypharr/internal/config"
"log"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"runtime/debug"
@@ -25,6 +27,15 @@ func main() {
config.SetConfigPath(configPath)
config.Get()
if os.Getenv("ENABLE_PPROF") == "true" {
go func() {
log.Println("Starting pprof server on :6060")
if err := http.ListenAndServe(":6060", nil); err != nil {
log.Printf("pprof server error: %v", err)
}
}()
}
// Create a context canceled on SIGINT/SIGTERM
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()

View File

@@ -134,7 +134,7 @@ func (c *Cache) refreshRclone() error {
dirs = []string{"__all__"}
}
if c.mounter != nil {
return c.mounter.Refresh(dirs)
return c.mounter.RefreshDir(dirs)
} else {
return c.refreshRcloneWithRC(dirs)
}

View File

@@ -9,6 +9,9 @@ import (
func (c *Cache) StartSchedule(ctx context.Context) error {
// For now, we just want to refresh the listing and download links
// Stop any existing jobs before starting new ones
c.scheduler.RemoveByTags("decypharr")
// Schedule download link refresh job
if jd, err := utils.ConvertToJobDef(c.downloadLinksRefreshInterval); err != nil {
c.logger.Error().Err(err).Msg("Failed to convert download link refresh interval to job definition")
@@ -16,7 +19,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
// Schedule the job
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
c.refreshDownloadLinks(ctx)
}), gocron.WithContext(ctx)); err != nil {
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
c.logger.Error().Err(err).Msg("Failed to create download link refresh job")
} else {
c.logger.Debug().Msgf("Download link refresh job scheduled for every %s", c.downloadLinksRefreshInterval)
@@ -30,7 +33,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
// Schedule the job
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
c.refreshTorrents(ctx)
}), gocron.WithContext(ctx)); err != nil {
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
c.logger.Error().Err(err).Msg("Failed to create torrent refresh job")
} else {
c.logger.Debug().Msgf("Torrent refresh job scheduled for every %s", c.torrentRefreshInterval)
@@ -46,7 +49,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
// Schedule the job
if _, err := c.cetScheduler.NewJob(jd, gocron.NewTask(func() {
c.resetInvalidLinks(ctx)
}), gocron.WithContext(ctx)); err != nil {
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
c.logger.Error().Err(err).Msg("Failed to create link reset job")
} else {
c.logger.Debug().Msgf("Link reset job scheduled for every midnight, CET")

View File

@@ -48,7 +48,6 @@ type Mount struct {
LocalPath string
WebDAVURL string
mountPoint *mountlib.MountPoint
vfs *vfs.VFS
cancel context.CancelFunc
mounted atomic.Bool
logger zerolog.Logger
@@ -105,13 +104,6 @@ func (m *Mount) Mount(ctx context.Context) error {
return fmt.Errorf("failed to set rclone config: %w", err)
}
// Check if fusermount3 is available
if _, err := exec.LookPath("fusermount3"); err != nil {
m.logger.Info().Msgf("FUSE mounting not available (fusermount3 not found). Files accessible via WebDAV at %s", m.WebDAVURL)
m.mounted.Store(true) // Mark as "mounted" for WebDAV access
return nil
}
// Get the mount function - try different mount methods
mountFn, err := getMountFn()
if err != nil {
@@ -125,9 +117,7 @@ func (m *Mount) Mount(ctx context.Context) error {
}
m.mounted.Store(true)
m.logger.Info().Msgf("Successfully mounted %s WebDAV at %s", m.Provider, m.LocalPath)
// Wait for context cancellation
<-mountCtx.Done()
<-mountCtx.Done() // Wait for context cancellation
}()
m.logger.Info().Msgf("Mount process started for %s at %s", m.Provider, m.LocalPath)
return nil
@@ -138,6 +128,7 @@ func setRcloneConfig(configName, webdavURL string) error {
config.FileSetValue(configName, "type", "webdav")
config.FileSetValue(configName, "url", webdavURL)
config.FileSetValue(configName, "vendor", "other")
config.FileSetValue(configName, "pacer_min_sleep", "0")
return nil
}
@@ -149,8 +140,7 @@ func (m *Mount) performMount(ctx context.Context, mountfn mountlib.MountFn) erro
}
// Get global rclone config
cfg := configPkg.Get()
rcloneOpt := &cfg.Rclone
rcloneOpt := configPkg.Get().Rclone
// Parse cache mode
var cacheMode vfscommon.CacheMode
@@ -167,12 +157,30 @@ func (m *Mount) performMount(ctx context.Context, mountfn mountlib.MountFn) erro
cacheMode = vfscommon.CacheModeOff
}
vfsOpt := &vfscommon.Options{
NoModTime: rcloneOpt.NoModTime,
NoChecksum: rcloneOpt.NoChecksum,
CacheMode: cacheMode,
UID: rcloneOpt.UID,
GID: rcloneOpt.GID,
vfsOpt := &vfscommon.Options{}
vfsOpt.Init() // Initialize VFS options with default values
vfsOpt.CacheMode = cacheMode
// Set VFS options based on rclone configuration
if rcloneOpt.NoChecksum {
vfsOpt.NoChecksum = rcloneOpt.NoChecksum
}
if rcloneOpt.NoModTime {
vfsOpt.NoModTime = rcloneOpt.NoModTime
}
if rcloneOpt.UID != 0 {
vfsOpt.UID = rcloneOpt.UID
}
if rcloneOpt.GID != 0 {
vfsOpt.GID = rcloneOpt.GID
}
if rcloneOpt.Umask != "" {
var umask vfscommon.FileMode
if err := umask.Set(rcloneOpt.Umask); err == nil {
vfsOpt.Umask = umask
}
}
// Parse duration strings
@@ -223,6 +231,8 @@ func (m *Mount) performMount(ctx context.Context, mountfn mountlib.MountFn) erro
}
}
fs.GetConfig(ctx).UseMmap = true
if rcloneOpt.VfsCacheMaxSize != "" {
var cacheMaxSize fs.SizeSuffix
if err := cacheMaxSize.Set(rcloneOpt.VfsCacheMaxSize); err == nil {
@@ -236,6 +246,7 @@ func (m *Mount) performMount(ctx context.Context, mountfn mountlib.MountFn) erro
AllowNonEmpty: true,
AllowOther: true,
Daemon: false,
AsyncRead: true,
DeviceName: fmt.Sprintf("decypharr-%s", m.Provider),
VolumeName: fmt.Sprintf("decypharr-%s", m.Provider),
}
@@ -258,20 +269,14 @@ func (m *Mount) performMount(ctx context.Context, mountfn mountlib.MountFn) erro
m.logger.Error().Err(err).Msgf("Failed to set cache directory %s, using default cache", cacheDir)
}
}
// Create VFS instance
vfsInstance := vfs.New(fsrc, vfsOpt)
m.vfs = vfsInstance
// Create mount point using rclone's internal mounting
mountPoint := mountlib.NewMountPoint(mountfn, m.LocalPath, fsrc, mountOpt, vfsOpt)
m.mountPoint = mountPoint
m.mountPoint = mountlib.NewMountPoint(mountfn, m.LocalPath, fsrc, mountOpt, vfsOpt)
// Start the mount
_, err = mountPoint.Mount()
_, err = m.mountPoint.Mount()
if err != nil {
// Cleanup mount point if it failed
if mountPoint != nil && mountPoint.UnmountFn != nil {
if m.mountPoint != nil && m.mountPoint.UnmountFn != nil {
if unmountErr := m.Unmount(); unmountErr != nil {
m.logger.Error().Err(unmountErr).Msgf("Failed to cleanup mount point %s after mount failure", m.LocalPath)
} else {
@@ -292,12 +297,8 @@ func (m *Mount) Unmount() error {
m.mounted.Store(false)
if m.vfs != nil {
m.logger.Debug().Msgf("Shutting down VFS for provider %s", m.Provider)
m.vfs.Shutdown()
} else {
m.logger.Warn().Msgf("VFS instance for provider %s is nil, skipping shutdown", m.Provider)
}
m.logger.Debug().Msgf("Shutting down VFS for provider %s", m.Provider)
m.mountPoint.VFS.Shutdown()
if m.mountPoint == nil || m.mountPoint.UnmountFn == nil {
m.logger.Warn().Msgf("Mount point for provider %s is nil or unmount function is not set, skipping unmount", m.Provider)
return nil
@@ -364,34 +365,31 @@ func (m *Mount) isMountBusy() bool {
}
func (m *Mount) IsMounted() bool {
return m.mounted.Load() && m.mountPoint != nil
return m.mounted.Load() && m.mountPoint != nil && m.mountPoint.VFS != nil
}
func (m *Mount) Refresh(dirs []string) error {
func (m *Mount) RefreshDir(dirs []string) error {
if !m.IsMounted() {
return fmt.Errorf("provider %s not properly mounted. Skipping refreshes", m.Provider)
}
if !m.mounted.Load() || m.vfs == nil {
return fmt.Errorf("provider %s not properly mounted", m.Provider)
}
// Forget the directories first
if err := m.ForgetVFS(dirs); err != nil {
return fmt.Errorf("failed to forget VFS directories for %s: %w", m.Provider, err)
}
//Then refresh the directories
if err := m.RefreshVFS(dirs); err != nil {
return fmt.Errorf("failed to refresh VFS directories for %s: %w", m.Provider, err)
}
return nil
// Use atomic forget-and-refresh to avoid race conditions
return m.forceRefreshVFS(dirs)
}
func (m *Mount) RefreshVFS(dirs []string) error {
root, err := m.vfs.Root()
// forceRefreshVFS atomically forgets and refreshes VFS directories to ensure immediate visibility
func (m *Mount) forceRefreshVFS(dirs []string) error {
vfsInstance := m.mountPoint.VFS
root, err := vfsInstance.Root()
if err != nil {
return fmt.Errorf("failed to get VFS root for %s: %w", m.Provider, err)
}
getDir := func(path string) (*vfs.Dir, error) {
path = strings.Trim(path, "/")
if path == "" {
return root, nil
}
segments := strings.Split(path, "/")
var node vfs.Node = root
for _, s := range segments {
@@ -408,62 +406,38 @@ func (m *Mount) RefreshVFS(dirs []string) error {
return nil, vfs.EINVAL
}
// If no specific directories provided, refresh root
// If no specific directories provided, work with root
if len(dirs) == 0 {
// Atomically forget and refresh root
root.ForgetAll()
if _, err := root.ReadDirAll(); err != nil {
return err
}
return nil
}
if len(dirs) == 1 {
vfsDir, err := getDir(dirs[0])
if err != nil {
return fmt.Errorf("failed to find directory '%s' for refresh in %s: %w", dirs[0], m.Provider, err)
}
if _, err := vfsDir.ReadDirAll(); err != nil {
return fmt.Errorf("failed to refresh directory '%s' in %s: %w", dirs[0], m.Provider, err)
return fmt.Errorf("failed to force-refresh root for %s: %w", m.Provider, err)
}
return nil
}
var errs []error
// Refresh specific directories
// Process each directory atomically
for _, dir := range dirs {
if dir != "" {
// Clean the directory path
dir = strings.Trim(dir, "/")
// Get the directory handle
vfsDir, err := getDir(dir)
if err != nil {
errs = append(errs, fmt.Errorf("failed to find directory '%s' for refresh in %s: %w", dir, m.Provider, err))
errs = append(errs, fmt.Errorf("failed to find directory '%s' for force-refresh in %s: %w", dir, m.Provider, err))
continue
}
// Atomically forget and refresh this specific directory
vfsDir.ForgetAll()
if _, err := vfsDir.ReadDirAll(); err != nil {
errs = append(errs, fmt.Errorf("failed to refresh directory '%s' in %s: %w", dir, m.Provider, err))
errs = append(errs, fmt.Errorf("failed to force-refresh directory '%s' in %s: %w", dir, m.Provider, err))
}
}
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
func (m *Mount) ForgetVFS(dirs []string) error {
// Get root directory
root, err := m.vfs.Root()
if err != nil {
return fmt.Errorf("failed to get VFS root for %s: %w", m.Provider, err)
}
// Forget specific directories
for _, dir := range dirs {
if dir != "" {
// Clean the directory path
dir = strings.Trim(dir, "/")
// Forget the directory from cache
root.ForgetPath(dir, fs.EntryDirectory)
}
}
return nil
}

File diff suppressed because one or more lines are too long

View File

@@ -148,7 +148,7 @@ class ConfigManager {
const fields = [
'enabled', 'mount_path', 'cache_dir', 'vfs_cache_mode', 'vfs_cache_max_size', 'vfs_cache_max_age',
'vfs_cache_poll_interval', 'vfs_read_chunk_size', 'vfs_read_chunk_size_limit', 'buffer_size',
'uid', 'gid', 'vfs_read_ahead', 'attr_timeout', 'dir_cache_time', 'poll_interval',
'uid', 'gid', 'vfs_read_ahead', 'attr_timeout', 'dir_cache_time', 'poll_interval', 'umask',
'no_modtime', 'no_checksum'
];
@@ -1236,6 +1236,7 @@ class ConfigManager {
vfs_read_chunk_size_limit: getElementValue('vfs_read_chunk_size_limit', 'off'),
uid: getElementValue('uid', 0),
gid: getElementValue('gid', 0),
umask: getElementValue('umask', ''),
vfs_read_ahead: getElementValue('vfs_read_ahead', '128k'),
attr_timeout: getElementValue('attr_timeout', '1s'),
dir_cache_time: getElementValue('dir_cache_time', '5m'),

View File

@@ -385,6 +385,15 @@
<span class="label-text-alt">Group ID for mounted files (0 = current group)</span>
</div>
</div>
<div class="form-control">
<label class="label" for="rclone.umask">
<span class="label-text font-medium">Group ID (PGID)</span>
</label>
<input type="text" class="input input-bordered" name="rclone.umask" id="rclone.umask" placeholder="0022">
<div class="label">
<span class="label-text-alt">Umask</span>
</div>
</div>
<div class="form-control">
<label class="label" for="rclone.buffer_size">
<span class="label-text font-medium">Buffer Size</span>
@@ -394,6 +403,15 @@
<span class="label-text-alt">Buffer Size(This caches to memory, be wary!!)</span>
</div>
</div>
<div class="form-control">
<label class="label" for="rclone.attr_timeout">
<span class="label-text font-medium">Attribute Caching Timeout</span>
</label>
<input type="text" class="input input-bordered" name="rclone.attr_timeout" id="rclone.attr_timeout" placeholder="1s">
<div class="label">
<span class="label-text-alt">How long the kernel caches the attributes (size, modification time, etc.)</span>
</div>
</div>
</div>
</div>
</div>
@@ -497,7 +515,6 @@
<span class="label-text-alt">How often VFS cache dir gets cleaned</span>
</div>
</div>
</div>
</div>
</div>