Per-share independent mounts: each share gets its own rclone process

Replace the hierarchical single-mount design with independent mounts:
each [[shares]] entry is a (name, remote_path, mount_point) triplet
with its own rclone FUSE mount process and dedicated RC API port
(5572 + index). Remove top-level connection.remote_path and [mount]
section. Auto-warmup now runs in a background thread to avoid blocking
the supervision loop.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
grabbit 2026-02-18 12:32:18 +08:00
parent 46e592c3a4
commit 08f8fc4667
18 changed files with 1600 additions and 474 deletions

View File

@ -1,42 +1,51 @@
//! `warpgate bwlimit` — view or adjust bandwidth limits at runtime.
use anyhow::{Context, Result};
use anyhow::Result;
use crate::config::Config;
use crate::rclone::rc;
pub fn run(_config: &Config, up: Option<&str>, down: Option<&str>) -> Result<()> {
let result = rc::bwlimit(up, down).context("Failed to call rclone bwlimit API")?;
pub fn run(config: &Config, up: Option<&str>, down: Option<&str>) -> Result<()> {
if up.is_none() && down.is_none() {
println!("Current bandwidth limits:");
} else {
println!("Updated bandwidth limits:");
println!("Updating bandwidth limits:");
}
// rclone core/bwlimit returns { "bytesPerSecond": N, "bytesPerSecondTx": N, "bytesPerSecondRx": N }
// A value of -1 means unlimited.
let has_fields = result.get("bytesPerSecondTx").is_some();
for (i, share) in config.shares.iter().enumerate() {
let port = config.rc_port(i);
let result = match rc::bwlimit(port, up, down) {
Ok(r) => r,
Err(e) => {
eprintln!(" [{}] unreachable — {}", share.name, e);
continue;
}
};
print!(" [{}] ", share.name);
let has_fields = result.get("bytesPerSecondTx").is_some();
if has_fields {
if let Some(tx) = result.get("bytesPerSecondTx").and_then(|v| v.as_i64()) {
if tx < 0 {
println!(" Upload: unlimited");
print!("Up: unlimited");
} else {
println!(" Upload: {}/s", format_bytes(tx as u64));
print!("Up: {}/s", format_bytes(tx as u64));
}
}
if let Some(rx) = result.get("bytesPerSecondRx").and_then(|v| v.as_i64()) {
if rx < 0 {
println!(" Download: unlimited");
println!(", Down: unlimited");
} else {
println!(" Download: {}/s", format_bytes(rx as u64));
}
println!(", Down: {}/s", format_bytes(rx as u64));
}
} else {
println!();
}
} else {
// Fallback: print raw response
println!("{}", serde_json::to_string_pretty(&result)?);
}
}
Ok(())
}
@ -84,7 +93,7 @@ mod tests {
#[test]
fn test_format_bytes_mixed() {
assert_eq!(format_bytes(10485760), "10.0 MiB"); // 10 MiB
assert_eq!(format_bytes(52428800), "50.0 MiB"); // 50 MiB
assert_eq!(format_bytes(10485760), "10.0 MiB");
assert_eq!(format_bytes(52428800), "50.0 MiB");
}
}

View File

@ -1,28 +1,36 @@
//! `warpgate cache-list` and `warpgate cache-clean` commands.
use anyhow::{Context, Result};
use anyhow::Result;
use crate::config::Config;
use crate::rclone::rc;
/// List cached files via rclone RC API.
pub fn list(_config: &Config) -> Result<()> {
let result = rc::vfs_list("/").context("Failed to list VFS cache")?;
/// List cached files via rclone RC API (aggregated across all shares).
pub fn list(config: &Config) -> Result<()> {
for (i, share) in config.shares.iter().enumerate() {
let port = config.rc_port(i);
println!("=== {} ===", share.name);
let result = match rc::vfs_list(port, "/") {
Ok(r) => r,
Err(e) => {
eprintln!(" Could not list cache for '{}': {}", share.name, e);
continue;
}
};
// vfs/list may return an array directly or { "list": [...] }
let entries = if let Some(arr) = result.as_array() {
arr.as_slice()
} else if let Some(list) = result.get("list").and_then(|v| v.as_array()) {
list.as_slice()
} else {
// Unknown format — print raw JSON
println!("{}", serde_json::to_string_pretty(&result)?);
return Ok(());
continue;
};
if entries.is_empty() {
println!("Cache is empty.");
return Ok(());
println!(" Cache is empty.");
continue;
}
println!("{:<10} PATH", "SIZE");
@ -42,30 +50,44 @@ pub fn list(_config: &Config) -> Result<()> {
println!("{:<10} {}", format_bytes(size), name);
}
}
println!();
}
Ok(())
}
/// Clean cached files (only clean files, never dirty).
pub fn clean(_config: &Config, all: bool) -> Result<()> {
pub fn clean(config: &Config, all: bool) -> Result<()> {
if all {
println!("Clearing VFS directory cache...");
rc::vfs_forget("/").context("Failed to clear VFS cache")?;
println!("Done. VFS directory cache cleared.");
println!("Clearing VFS directory cache for all shares...");
for (i, share) in config.shares.iter().enumerate() {
let port = config.rc_port(i);
match rc::vfs_forget(port, "/") {
Ok(()) => println!(" {}: cleared", share.name),
Err(e) => eprintln!(" {}: failed — {}", share.name, e),
}
}
println!("Done.");
} else {
println!("Current cache status:");
match rc::vfs_stats() {
for (i, share) in config.shares.iter().enumerate() {
let port = config.rc_port(i);
print!(" [{}] ", share.name);
match rc::vfs_stats(port) {
Ok(vfs) => {
if let Some(dc) = vfs.disk_cache {
println!(" Used: {}", format_bytes(dc.bytes_used));
println!(" Uploading: {}", dc.uploads_in_progress);
println!(" Queued: {}", dc.uploads_queued);
if dc.uploads_in_progress > 0 || dc.uploads_queued > 0 {
println!("\n Dirty files exist — only synced files are safe to clean.");
println!(
"Used: {}, Uploading: {}, Queued: {}",
format_bytes(dc.bytes_used),
dc.uploads_in_progress,
dc.uploads_queued
);
} else {
println!("no cache stats");
}
}
Err(e) => println!("unreachable — {}", e),
}
Err(e) => eprintln!(" Could not fetch cache stats: {}", e),
}
println!("\nRun with --all to clear the directory cache.");
}

View File

@ -13,9 +13,10 @@ const TEST_SIZE: usize = 10 * 1024 * 1024; // 10 MiB
pub fn run(config: &Config) -> Result<()> {
let tmp_local = std::env::temp_dir().join("warpgate-speedtest");
// Use the first share's remote_path for the speed test
let remote_path = format!(
"nas:{}/.warpgate-speedtest",
config.connection.remote_path
config.shares[0].remote_path
);
// Create a 10 MiB test file

View File

@ -6,49 +6,82 @@ use crate::config::Config;
use crate::rclone::{mount, rc};
pub fn run(config: &Config) -> Result<()> {
// Check mount status
let mounted = match mount::is_mounted(config) {
// Check mount status for each share
let mut any_mounted = false;
for share in &config.shares {
let mounted = match mount::is_mounted(&share.mount_point) {
Ok(m) => m,
Err(e) => {
eprintln!("Warning: could not check mount status: {}", e);
eprintln!("Warning: could not check mount for '{}': {}", share.name, e);
false
}
};
let ro_tag = if share.read_only { " (ro)" } else { "" };
if mounted {
println!("Mount: UP ({})", config.mount.point.display());
println!(
"Mount: UP {} → {}{}",
share.mount_point.display(),
share.name,
ro_tag
);
any_mounted = true;
} else {
println!("Mount: DOWN");
println!("\nrclone VFS mount is not active.");
println!("Start with: systemctl start warpgate-mount");
println!("Mount: DOWN {}{}", share.name, ro_tag);
}
}
if !any_mounted {
println!("\nNo rclone VFS mounts are active.");
println!("Start with: systemctl start warpgate");
return Ok(());
}
// Transfer stats from rclone RC API
match rc::core_stats() {
Ok(stats) => {
println!("Speed: {}/s", format_bytes(stats.speed as u64));
println!("Moved: {}", format_bytes(stats.bytes));
println!("Active: {} transfers", stats.transfers);
println!("Errors: {}", stats.errors);
// Aggregate stats from all share RC ports
let mut total_bytes = 0u64;
let mut total_speed = 0.0f64;
let mut total_transfers = 0u64;
let mut total_errors = 0u64;
let mut total_cache_used = 0u64;
let mut total_uploading = 0u64;
let mut total_queued = 0u64;
let mut total_errored = 0u64;
let mut rc_reachable = false;
for (i, _share) in config.shares.iter().enumerate() {
let port = config.rc_port(i);
if let Ok(stats) = rc::core_stats(port) {
rc_reachable = true;
total_bytes += stats.bytes;
total_speed += stats.speed;
total_transfers += stats.transfers;
total_errors += stats.errors;
}
if let Ok(vfs) = rc::vfs_stats(port) {
if let Some(dc) = vfs.disk_cache {
total_cache_used += dc.bytes_used;
total_uploading += dc.uploads_in_progress;
total_queued += dc.uploads_queued;
total_errored += dc.errored_files;
}
Err(e) => {
eprintln!("Could not reach rclone RC API: {}", e);
}
}
// VFS cache stats (RC connection error already reported above)
if let Ok(vfs) = rc::vfs_stats() {
if let Some(dc) = vfs.disk_cache {
println!("Cache: {}", format_bytes(dc.bytes_used));
if rc_reachable {
println!("Speed: {}/s", format_bytes(total_speed as u64));
println!("Moved: {}", format_bytes(total_bytes));
println!("Active: {} transfers", total_transfers);
println!("Errors: {}", total_errors);
println!("Cache: {}", format_bytes(total_cache_used));
println!(
"Dirty: {} uploading, {} queued",
dc.uploads_in_progress, dc.uploads_queued
total_uploading, total_queued
);
if dc.errored_files > 0 {
println!("Errored: {} files", dc.errored_files);
}
if total_errored > 0 {
println!("Errored: {} files", total_errored);
}
} else {
eprintln!("Could not reach any rclone RC API.");
}
Ok(())

View File

@ -1,8 +1,7 @@
//! `warpgate warmup` — pre-cache a remote directory to local SSD.
//!
//! Lists files via `rclone lsf`, then reads each through the FUSE mount
//! to trigger VFS caching. This ensures files land in the rclone VFS
//! SSD cache rather than being downloaded to a throwaway temp directory.
//! to trigger VFS caching.
use std::io;
use std::process::Command;
@ -12,9 +11,13 @@ use anyhow::{Context, Result};
use crate::config::Config;
use crate::rclone::config as rclone_config;
pub fn run(config: &Config, path: &str, newer_than: Option<&str>) -> Result<()> {
let warmup_path = config.mount.point.join(path);
let remote_src = format!("nas:{}/{}", config.connection.remote_path, path);
pub fn run(config: &Config, share_name: &str, path: &str, newer_than: Option<&str>) -> Result<()> {
let share = config
.find_share(share_name)
.with_context(|| format!("Share '{}' not found in config", share_name))?;
let warmup_path = share.mount_point.join(path);
let remote_src = format!("nas:{}/{}", share.remote_path, path);
println!("Warming up: {remote_src}");
println!(" via mount: {}", warmup_path.display());
@ -63,7 +66,7 @@ pub fn run(config: &Config, path: &str, newer_than: Option<&str>) -> Result<()>
let mut errors = 0usize;
for file in &files {
if is_cached(config, path, file) {
if is_cached(config, &share.remote_path, path, file) {
skipped += 1;
continue;
}
@ -71,7 +74,6 @@ pub fn run(config: &Config, path: &str, newer_than: Option<&str>) -> Result<()>
let full_path = warmup_path.join(file);
match std::fs::File::open(&full_path) {
Ok(mut f) => {
// Stream-read through FUSE mount → populates VFS cache
if let Err(e) = io::copy(&mut f, &mut io::sink()) {
eprintln!(" Warning: read failed: {file}: {e}");
errors += 1;
@ -95,16 +97,13 @@ pub fn run(config: &Config, path: &str, newer_than: Option<&str>) -> Result<()>
}
/// Check if a file is already in the rclone VFS cache.
///
/// `warmup_path` is the subdir passed to `warpgate warmup` (e.g. "Image/2026").
/// `relative_path` is the filename from `rclone lsf` (relative to warmup_path).
fn is_cached(config: &Config, warmup_path: &str, relative_path: &str) -> bool {
fn is_cached(config: &Config, remote_path: &str, warmup_path: &str, relative_path: &str) -> bool {
let cache_path = config
.cache
.dir
.join("vfs")
.join("nas")
.join(config.connection.remote_path.trim_start_matches('/'))
.join(remote_path.trim_start_matches('/'))
.join(warmup_path)
.join(relative_path);
cache_path.exists()
@ -120,7 +119,6 @@ mod tests {
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
remote_path = "/photos"
[cache]
dir = "/tmp/warpgate-test-cache"
@ -130,7 +128,11 @@ dir = "/tmp/warpgate-test-cache"
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#,
)
.unwrap()
@ -139,35 +141,31 @@ dir = "/tmp/warpgate-test-cache"
#[test]
fn test_is_cached_nonexistent_file() {
let config = test_config();
// File doesn't exist on disk, so should return false
assert!(!is_cached(&config, "2024", "IMG_001.jpg"));
assert!(!is_cached(&config, "/photos", "2024", "IMG_001.jpg"));
}
#[test]
fn test_is_cached_deep_path() {
let config = test_config();
assert!(!is_cached(&config, "Images/2024/January", "photo.cr3"));
assert!(!is_cached(&config, "/photos", "Images/2024/January", "photo.cr3"));
}
#[test]
fn test_is_cached_path_construction() {
// Verify the path is constructed correctly by checking the expected
// cache path: cache_dir/vfs/nas/<remote_path_trimmed>/<warmup>/<file>
let config = test_config();
let expected = std::path::PathBuf::from("/tmp/warpgate-test-cache")
.join("vfs")
.join("nas")
.join("photos") // "/photos" trimmed of leading /
.join("photos")
.join("2024")
.join("IMG_001.jpg");
// Reconstruct the same logic as is_cached
let cache_path = config
.cache
.dir
.join("vfs")
.join("nas")
.join(config.connection.remote_path.trim_start_matches('/'))
.join("photos")
.join("2024")
.join("IMG_001.jpg");
@ -176,21 +174,19 @@ dir = "/tmp/warpgate-test-cache"
#[test]
fn test_is_cached_remote_path_trimming() {
let mut config = test_config();
config.connection.remote_path = "/volume1/photos".into();
let config = test_config();
let remote_path = "/volume1/photos";
let cache_path = config
.cache
.dir
.join("vfs")
.join("nas")
.join(config.connection.remote_path.trim_start_matches('/'))
.join(remote_path.trim_start_matches('/'))
.join("2024")
.join("file.jpg");
// The leading "/" is stripped, so "nas" is followed by "volume1" (not "/volume1")
assert!(cache_path.to_string_lossy().contains("nas/volume1/photos"));
// No double slash from unstripped leading /
assert!(!cache_path.to_string_lossy().contains("nas//volume1"));
}
}

View File

@ -11,6 +11,9 @@ use serde::{Deserialize, Serialize};
/// Default config file path.
pub const DEFAULT_CONFIG_PATH: &str = "/etc/warpgate/config.toml";
/// Base RC API port. Each share gets `RC_BASE_PORT + share_index`.
pub const RC_BASE_PORT: u16 = 5572;
/// Top-level configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
@ -21,9 +24,11 @@ pub struct Config {
pub writeback: WritebackConfig,
pub directory_cache: DirectoryCacheConfig,
pub protocols: ProtocolsConfig,
pub mount: MountConfig,
#[serde(default)]
pub warmup: WarmupConfig,
#[serde(default)]
pub smb_auth: SmbAuthConfig,
pub shares: Vec<ShareConfig>,
}
/// SFTP connection to remote NAS.
@ -39,8 +44,6 @@ pub struct ConnectionConfig {
/// Path to SSH private key.
#[serde(default)]
pub nas_key_file: Option<String>,
/// Target path on NAS.
pub remote_path: String,
/// SFTP port.
#[serde(default = "default_sftp_port")]
pub sftp_port: u16,
@ -135,14 +138,6 @@ pub struct ProtocolsConfig {
pub webdav_port: u16,
}
/// FUSE mount point configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MountConfig {
/// FUSE mount point path.
#[serde(default = "default_mount_point")]
pub point: PathBuf,
}
/// Warmup configuration — auto-cache paths on startup.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WarmupConfig {
@ -166,12 +161,45 @@ impl Default for WarmupConfig {
/// A single warmup rule specifying a path to pre-cache.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WarmupRule {
/// Path relative to remote_path.
/// Name of the share this rule applies to.
pub share: String,
/// Path relative to the share's remote_path.
pub path: String,
/// Only cache files newer than this (e.g. "7d", "24h").
pub newer_than: Option<String>,
}
/// Optional SMB user authentication (instead of guest access).
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct SmbAuthConfig {
/// Enable SMB user authentication.
#[serde(default)]
pub enabled: bool,
/// SMB username (defaults to connection.nas_user if unset).
#[serde(default)]
pub username: Option<String>,
/// Dedicated SMB password (takes precedence over reuse_nas_pass).
#[serde(default)]
pub smb_pass: Option<String>,
/// Reuse connection.nas_pass as the SMB password.
#[serde(default)]
pub reuse_nas_pass: bool,
}
/// A single share exported as SMB/NFS, each with its own rclone mount.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ShareConfig {
/// SMB/NFS share name.
pub name: String,
/// Absolute path on the remote NAS (e.g. "/volume1/photos").
pub remote_path: String,
/// Local FUSE mount point (e.g. "/mnt/photos").
pub mount_point: PathBuf,
/// Export as read-only.
#[serde(default)]
pub read_only: bool,
}
// --- Default value functions ---
fn default_sftp_port() -> u16 {
@ -222,10 +250,6 @@ fn default_nfs_network() -> String {
fn default_webdav_port() -> u16 {
8080
}
fn default_mount_point() -> PathBuf {
PathBuf::from("/mnt/nas-photos")
}
impl Config {
/// Load config from a TOML file.
pub fn load(path: &Path) -> Result<Self> {
@ -233,6 +257,7 @@ impl Config {
.with_context(|| format!("Failed to read config file: {}", path.display()))?;
let config: Config =
toml::from_str(&content).with_context(|| "Failed to parse config TOML")?;
config.validate()?;
Ok(config)
}
@ -241,6 +266,110 @@ impl Config {
include_str!("../templates/config.toml.default")
.to_string()
}
/// Find a share by name.
pub fn find_share(&self, name: &str) -> Option<&ShareConfig> {
self.shares.iter().find(|s| s.name == name)
}
/// Return the RC API port for a given share index.
pub fn rc_port(&self, share_index: usize) -> u16 {
RC_BASE_PORT + share_index as u16
}
/// Effective SMB username. Falls back to `connection.nas_user`.
pub fn smb_username(&self) -> &str {
self.smb_auth
.username
.as_deref()
.unwrap_or(&self.connection.nas_user)
}
/// Resolve the SMB password. Returns `None` when auth is disabled.
/// Returns an error if auth is enabled but no password can be resolved.
pub fn smb_password(&self) -> Result<Option<String>> {
if !self.smb_auth.enabled {
return Ok(None);
}
// Dedicated smb_pass takes precedence
if let Some(ref pass) = self.smb_auth.smb_pass {
return Ok(Some(pass.clone()));
}
// Fallback: reuse NAS password
if self.smb_auth.reuse_nas_pass {
if let Some(ref pass) = self.connection.nas_pass {
return Ok(Some(pass.clone()));
}
anyhow::bail!(
"smb_auth.reuse_nas_pass is true but connection.nas_pass is not set"
);
}
anyhow::bail!(
"smb_auth is enabled but no password configured (set smb_pass or reuse_nas_pass)"
);
}
/// Validate configuration invariants.
pub fn validate(&self) -> Result<()> {
// At least one share required
if self.shares.is_empty() {
anyhow::bail!("At least one [[shares]] entry is required");
}
let mut seen_names = std::collections::HashSet::new();
let mut seen_mounts = std::collections::HashSet::new();
for (i, share) in self.shares.iter().enumerate() {
if share.name.is_empty() {
anyhow::bail!("shares[{}]: name must not be empty", i);
}
if !seen_names.insert(&share.name) {
anyhow::bail!("shares[{}]: duplicate share name '{}'", i, share.name);
}
if !share.remote_path.starts_with('/') {
anyhow::bail!(
"shares[{}]: remote_path '{}' must start with '/'",
i,
share.remote_path
);
}
if !share.mount_point.is_absolute() {
anyhow::bail!(
"shares[{}]: mount_point '{}' must be an absolute path",
i,
share.mount_point.display()
);
}
if !seen_mounts.insert(&share.mount_point) {
anyhow::bail!(
"shares[{}]: duplicate mount_point '{}'",
i,
share.mount_point.display()
);
}
}
// Validate warmup rules reference existing shares
for (i, rule) in self.warmup.rules.iter().enumerate() {
if self.find_share(&rule.share).is_none() {
anyhow::bail!(
"warmup.rules[{}]: share '{}' does not exist",
i,
rule.share
);
}
}
// Validate SMB auth password resolution
if self.smb_auth.enabled {
self.smb_password()?;
}
Ok(())
}
}
#[cfg(test)]
@ -252,7 +381,6 @@ mod tests {
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
remote_path = "/photos"
[cache]
dir = "/tmp/cache"
@ -262,7 +390,11 @@ dir = "/tmp/cache"
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#
}
@ -270,50 +402,43 @@ dir = "/tmp/cache"
fn test_config_load_minimal_defaults() {
let config: Config = toml::from_str(minimal_toml()).unwrap();
// Connection defaults
assert_eq!(config.connection.nas_host, "10.0.0.1");
assert_eq!(config.connection.nas_user, "admin");
assert_eq!(config.connection.remote_path, "/photos");
assert_eq!(config.connection.sftp_port, 22);
assert_eq!(config.connection.sftp_connections, 8);
assert!(config.connection.nas_pass.is_none());
assert!(config.connection.nas_key_file.is_none());
// Cache defaults
assert_eq!(config.cache.dir, PathBuf::from("/tmp/cache"));
assert_eq!(config.cache.max_size, "200G");
assert_eq!(config.cache.max_age, "720h");
assert_eq!(config.cache.min_free, "10G");
// Read defaults
assert_eq!(config.read.chunk_size, "256M");
assert_eq!(config.read.chunk_limit, "1G");
assert_eq!(config.read.read_ahead, "512M");
assert_eq!(config.read.buffer_size, "256M");
// Bandwidth defaults
assert_eq!(config.bandwidth.limit_up, "0");
assert_eq!(config.bandwidth.limit_down, "0");
assert!(config.bandwidth.adaptive);
// Writeback defaults
assert_eq!(config.writeback.write_back, "5s");
assert_eq!(config.writeback.transfers, 4);
// Directory cache default
assert_eq!(config.directory_cache.cache_time, "1h");
// Protocol defaults
assert!(config.protocols.enable_smb);
assert!(!config.protocols.enable_nfs);
assert!(!config.protocols.enable_webdav);
assert_eq!(config.protocols.nfs_allowed_network, "192.168.0.0/24");
assert_eq!(config.protocols.webdav_port, 8080);
// Mount default
assert_eq!(config.mount.point, PathBuf::from("/mnt/nas-photos"));
assert_eq!(config.shares.len(), 1);
assert_eq!(config.shares[0].name, "photos");
assert_eq!(config.shares[0].remote_path, "/photos");
assert_eq!(config.shares[0].mount_point, PathBuf::from("/mnt/photos"));
// Warmup default
assert!(config.warmup.auto);
assert!(config.warmup.rules.is_empty());
}
@ -326,7 +451,6 @@ nas_host = "192.168.1.100"
nas_user = "photographer"
nas_pass = "secret123"
nas_key_file = "/root/.ssh/id_rsa"
remote_path = "/volume1/photos"
sftp_port = 2222
sftp_connections = 16
@ -361,13 +485,21 @@ enable_webdav = true
nfs_allowed_network = "10.0.0.0/8"
webdav_port = 9090
[mount]
point = "/mnt/nas"
[[shares]]
name = "photos"
remote_path = "/volume1/photos"
mount_point = "/mnt/photos"
[[shares]]
name = "projects"
remote_path = "/volume1/projects"
mount_point = "/mnt/projects"
[warmup]
auto = false
[[warmup.rules]]
share = "photos"
path = "2024"
newer_than = "7d"
"#;
@ -380,7 +512,6 @@ newer_than = "7d"
config.connection.nas_key_file.as_deref(),
Some("/root/.ssh/id_rsa")
);
assert_eq!(config.connection.remote_path, "/volume1/photos");
assert_eq!(config.connection.sftp_port, 2222);
assert_eq!(config.connection.sftp_connections, 16);
@ -404,10 +535,13 @@ newer_than = "7d"
assert!(config.protocols.enable_webdav);
assert_eq!(config.protocols.webdav_port, 9090);
assert_eq!(config.mount.point, PathBuf::from("/mnt/nas"));
assert_eq!(config.shares.len(), 2);
assert_eq!(config.shares[0].remote_path, "/volume1/photos");
assert_eq!(config.shares[0].mount_point, PathBuf::from("/mnt/photos"));
assert!(!config.warmup.auto);
assert_eq!(config.warmup.rules.len(), 1);
assert_eq!(config.warmup.rules[0].share, "photos");
assert_eq!(config.warmup.rules[0].path, "2024");
assert_eq!(config.warmup.rules[0].newer_than.as_deref(), Some("7d"));
}
@ -417,7 +551,6 @@ newer_than = "7d"
let toml_str = r#"
[connection]
nas_user = "admin"
remote_path = "/photos"
[cache]
dir = "/tmp/cache"
@ -427,7 +560,11 @@ dir = "/tmp/cache"
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#;
let result = toml::from_str::<Config>(toml_str);
assert!(result.is_err());
@ -450,7 +587,6 @@ dir = "/tmp/cache"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
remote_path = "/photos"
sftp_connections = 999
[cache]
@ -462,7 +598,11 @@ max_size = "999T"
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
assert_eq!(config.connection.sftp_connections, 999);
@ -475,14 +615,17 @@ max_size = "999T"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
remote_path = "/photos"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#;
let result = toml::from_str::<Config>(toml_str);
assert!(result.is_err());
@ -578,11 +721,6 @@ remote_path = "/photos"
assert_eq!(default_webdav_port(), 8080);
}
#[test]
fn test_default_mount_point() {
assert_eq!(default_mount_point(), PathBuf::from("/mnt/nas-photos"));
}
#[test]
fn test_warmup_config_default() {
let wc = WarmupConfig::default();
@ -593,10 +731,12 @@ remote_path = "/photos"
#[test]
fn test_warmup_rule_deserialization() {
let toml_str = r#"
share = "photos"
path = "Images/2024"
newer_than = "7d"
"#;
let rule: WarmupRule = toml::from_str(toml_str).unwrap();
assert_eq!(rule.share, "photos");
assert_eq!(rule.path, "Images/2024");
assert_eq!(rule.newer_than.as_deref(), Some("7d"));
}
@ -604,6 +744,7 @@ newer_than = "7d"
#[test]
fn test_warmup_rule_without_newer_than() {
let toml_str = r#"
share = "photos"
path = "Images/2024"
"#;
let rule: WarmupRule = toml::from_str(toml_str).unwrap();
@ -615,4 +756,436 @@ path = "Images/2024"
fn test_default_config_path() {
assert_eq!(DEFAULT_CONFIG_PATH, "/etc/warpgate/config.toml");
}
#[test]
fn test_smb_auth_default_disabled() {
let auth = SmbAuthConfig::default();
assert!(!auth.enabled);
assert!(auth.username.is_none());
assert!(auth.smb_pass.is_none());
assert!(!auth.reuse_nas_pass);
}
#[test]
fn test_config_with_smb_auth_and_shares() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
nas_pass = "secret"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[smb_auth]
enabled = true
username = "photographer"
smb_pass = "my-password"
[[shares]]
name = "photos"
remote_path = "/volume1/photos"
mount_point = "/mnt/photos"
[[shares]]
name = "projects"
remote_path = "/volume1/projects"
mount_point = "/mnt/projects"
[[shares]]
name = "backups"
remote_path = "/volume1/backups"
mount_point = "/mnt/backups"
read_only = true
"#;
let config: Config = toml::from_str(toml_str).unwrap();
assert!(config.smb_auth.enabled);
assert_eq!(config.smb_auth.username.as_deref(), Some("photographer"));
assert_eq!(config.smb_auth.smb_pass.as_deref(), Some("my-password"));
assert_eq!(config.shares.len(), 3);
assert_eq!(config.shares[0].name, "photos");
assert_eq!(config.shares[0].remote_path, "/volume1/photos");
assert_eq!(config.shares[0].mount_point, PathBuf::from("/mnt/photos"));
assert!(!config.shares[0].read_only);
assert_eq!(config.shares[2].name, "backups");
assert!(config.shares[2].read_only);
}
#[test]
fn test_find_share() {
let config: Config = toml::from_str(minimal_toml()).unwrap();
assert!(config.find_share("photos").is_some());
assert!(config.find_share("nonexistent").is_none());
}
#[test]
fn test_rc_port() {
let config: Config = toml::from_str(minimal_toml()).unwrap();
assert_eq!(config.rc_port(0), 5572);
assert_eq!(config.rc_port(1), 5573);
assert_eq!(config.rc_port(2), 5574);
}
#[test]
fn test_smb_username_fallback() {
let config: Config = toml::from_str(minimal_toml()).unwrap();
assert_eq!(config.smb_username(), "admin");
}
#[test]
fn test_smb_username_explicit() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[smb_auth]
enabled = true
username = "smbuser"
smb_pass = "pass"
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
assert_eq!(config.smb_username(), "smbuser");
}
#[test]
fn test_smb_password_disabled() {
let config: Config = toml::from_str(minimal_toml()).unwrap();
assert!(config.smb_password().unwrap().is_none());
}
#[test]
fn test_smb_password_dedicated() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[smb_auth]
enabled = true
smb_pass = "dedicated-pass"
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
assert_eq!(config.smb_password().unwrap(), Some("dedicated-pass".into()));
}
#[test]
fn test_smb_password_reuse_nas_pass() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
nas_pass = "nas-secret"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[smb_auth]
enabled = true
reuse_nas_pass = true
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
assert_eq!(config.smb_password().unwrap(), Some("nas-secret".into()));
}
#[test]
fn test_smb_password_reuse_but_nas_pass_missing() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[smb_auth]
enabled = true
reuse_nas_pass = true
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
assert!(config.validate().is_err());
}
#[test]
fn test_validate_no_shares() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
"#;
// This should fail to parse because shares is required and non-optional
let result = toml::from_str::<Config>(toml_str);
// If it parses with empty vec, validate should catch it
if let Ok(config) = result {
let err = config.validate().unwrap_err().to_string();
assert!(err.contains("At least one"), "got: {err}");
}
}
#[test]
fn test_validate_duplicate_share_name() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[[shares]]
name = "photos"
remote_path = "/volume1/photos"
mount_point = "/mnt/photos"
[[shares]]
name = "photos"
remote_path = "/volume1/other"
mount_point = "/mnt/other"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
let err = config.validate().unwrap_err().to_string();
assert!(err.contains("duplicate share name"), "got: {err}");
}
#[test]
fn test_validate_empty_share_name() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[[shares]]
name = ""
remote_path = "/photos"
mount_point = "/mnt/photos"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
let err = config.validate().unwrap_err().to_string();
assert!(err.contains("name must not be empty"), "got: {err}");
}
#[test]
fn test_validate_relative_remote_path() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[[shares]]
name = "photos"
remote_path = "photos"
mount_point = "/mnt/photos"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
let err = config.validate().unwrap_err().to_string();
assert!(err.contains("must start with '/'"), "got: {err}");
}
#[test]
fn test_validate_relative_mount_point() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "mnt/photos"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
let err = config.validate().unwrap_err().to_string();
assert!(err.contains("must be an absolute path"), "got: {err}");
}
#[test]
fn test_validate_duplicate_mount_point() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[[shares]]
name = "photos"
remote_path = "/volume1/photos"
mount_point = "/mnt/data"
[[shares]]
name = "videos"
remote_path = "/volume1/videos"
mount_point = "/mnt/data"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
let err = config.validate().unwrap_err().to_string();
assert!(err.contains("duplicate mount_point"), "got: {err}");
}
#[test]
fn test_validate_warmup_bad_share_ref() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
[warmup]
auto = true
[[warmup.rules]]
share = "nonexistent"
path = "2024"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
let err = config.validate().unwrap_err().to_string();
assert!(err.contains("does not exist"), "got: {err}");
}
#[test]
fn test_validate_smb_auth_enabled_no_password() {
let toml_str = r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[smb_auth]
enabled = true
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#;
let config: Config = toml::from_str(toml_str).unwrap();
assert!(config.validate().is_err());
}
}

View File

@ -35,6 +35,11 @@ pub fn run(config: &Config) -> Result<()> {
println!("Generating service configs...");
if config.protocols.enable_smb {
samba::write_config(config)?;
// Set up SMB user authentication if enabled
if config.smb_auth.enabled {
println!("Setting up SMB user authentication...");
samba::setup_user(config)?;
}
}
if config.protocols.enable_nfs {
nfs::write_config(config)?;

View File

@ -42,7 +42,10 @@ enum Commands {
},
/// Pre-cache a remote directory to local SSD.
Warmup {
/// Remote path to warm up (relative to NAS remote_path).
/// Name of the share to warm up.
#[arg(long)]
share: String,
/// Path within the share to warm up.
path: String,
/// Only files newer than this duration (e.g. "7d", "24h").
#[arg(long)]
@ -96,8 +99,8 @@ fn main() -> Result<()> {
Commands::Status => cli::status::run(&config),
Commands::CacheList => cli::cache::list(&config),
Commands::CacheClean { all } => cli::cache::clean(&config, all),
Commands::Warmup { path, newer_than } => {
cli::warmup::run(&config, &path, newer_than.as_deref())
Commands::Warmup { share, path, newer_than } => {
cli::warmup::run(&config, &share, &path, newer_than.as_deref())
}
Commands::Bwlimit { up, down } => {
cli::bwlimit::run(&config, up.as_deref(), down.as_deref())

View File

@ -82,7 +82,6 @@ mod tests {
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
remote_path = "/photos"
[cache]
dir = "/tmp/cache"
@ -92,7 +91,11 @@ dir = "/tmp/cache"
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#,
)
.unwrap()

View File

@ -1,22 +1,23 @@
//! Manage rclone VFS FUSE mount lifecycle.
use std::path::Path;
use anyhow::{Context, Result};
use crate::config::Config;
use crate::config::{Config, ShareConfig};
use super::config::RCLONE_CONF_PATH;
/// Build the full `rclone mount` command-line arguments from config.
/// Build the full `rclone mount` command-line arguments for a single share.
///
/// Returns a `Vec<String>` starting with `"mount"` followed by the remote
/// source, mount point, and all VFS/cache flags derived from config.
pub fn build_mount_args(config: &Config) -> Vec<String> {
/// Each share gets its own rclone mount process with a dedicated RC port.
pub fn build_mount_args(config: &Config, share: &ShareConfig, rc_port: u16) -> Vec<String> {
let mut args = Vec::new();
// Subcommand and source:dest
args.push("mount".into());
args.push(format!("nas:{}", config.connection.remote_path));
args.push(config.mount.point.display().to_string());
args.push(format!("nas:{}", share.remote_path));
args.push(share.mount_point.display().to_string());
// Point to our generated rclone.conf
args.push("--config".into());
@ -76,8 +77,10 @@ pub fn build_mount_args(config: &Config) -> Vec<String> {
args.push(bw);
}
// Enable rclone RC API on default port
// Enable rclone RC API on per-share port
args.push("--rc".into());
args.push("--rc-addr".into());
args.push(format!("127.0.0.1:{rc_port}"));
// Allow non-root users to access the FUSE mount (requires user_allow_other in /etc/fuse.conf)
args.push("--allow-other".into());
@ -115,11 +118,31 @@ fn rclone_supports_min_free_space() -> bool {
}
/// Build the rclone mount command as a string (for systemd ExecStart).
pub fn build_mount_command(config: &Config) -> String {
let args = build_mount_args(config);
pub fn build_mount_command(config: &Config, share: &ShareConfig, rc_port: u16) -> String {
let args = build_mount_args(config, share, rc_port);
format!("/usr/bin/rclone {}", args.join(" "))
}
/// Check if a FUSE mount is currently active at the given mount point.
pub fn is_mounted(mount_point: &Path) -> Result<bool> {
let mp_str = mount_point.display().to_string();
let content = std::fs::read_to_string("/proc/mounts")
.with_context(|| "Failed to read /proc/mounts")?;
for line in content.lines() {
// /proc/mounts format: device mountpoint fstype options dump pass
let mut fields = line.split_whitespace();
let _device = fields.next();
if let Some(mp) = fields.next()
&& mp == mp_str {
return Ok(true);
}
}
Ok(false)
}
#[cfg(test)]
mod tests {
use super::*;
@ -130,7 +153,6 @@ mod tests {
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
remote_path = "/photos"
[cache]
dir = "/tmp/cache"
@ -140,7 +162,11 @@ dir = "/tmp/cache"
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#,
)
.unwrap()
@ -180,11 +206,12 @@ dir = "/tmp/cache"
#[test]
fn test_build_mount_args_contains_essentials() {
let config = test_config();
let args = build_mount_args(&config);
let share = &config.shares[0];
let args = build_mount_args(&config, share, 5572);
assert_eq!(args[0], "mount");
assert_eq!(args[1], "nas:/photos");
assert_eq!(args[2], "/mnt/nas-photos");
assert_eq!(args[2], "/mnt/photos");
assert!(args.contains(&"--config".to_string()));
assert!(args.contains(&RCLONE_CONF_PATH.to_string()));
@ -204,14 +231,16 @@ dir = "/tmp/cache"
assert!(args.contains(&"--transfers".to_string()));
assert!(args.contains(&"4".to_string()));
assert!(args.contains(&"--rc".to_string()));
assert!(args.contains(&"--rc-addr".to_string()));
assert!(args.contains(&"127.0.0.1:5572".to_string()));
assert!(args.contains(&"--allow-other".to_string()));
}
#[test]
fn test_build_mount_args_no_bwlimit_when_unlimited() {
let config = test_config();
let args = build_mount_args(&config);
// Default bandwidth is "0" for both, so --bwlimit should NOT be present
let share = &config.shares[0];
let args = build_mount_args(&config, share, 5572);
assert!(!args.contains(&"--bwlimit".to_string()));
}
@ -220,7 +249,8 @@ dir = "/tmp/cache"
let mut config = test_config();
config.bandwidth.limit_up = "10M".into();
config.bandwidth.limit_down = "50M".into();
let args = build_mount_args(&config);
let share = &config.shares[0];
let args = build_mount_args(&config, share, 5572);
assert!(args.contains(&"--bwlimit".to_string()));
assert!(args.contains(&"10M:50M".to_string()));
}
@ -228,44 +258,39 @@ dir = "/tmp/cache"
#[test]
fn test_build_mount_command_format() {
let config = test_config();
let cmd = build_mount_command(&config);
let share = &config.shares[0];
let cmd = build_mount_command(&config, share, 5572);
assert!(cmd.starts_with("/usr/bin/rclone mount"));
assert!(cmd.contains("nas:/photos"));
assert!(cmd.contains("/mnt/nas-photos"));
assert!(cmd.contains("/mnt/photos"));
}
#[test]
fn test_build_mount_args_custom_config() {
let mut config = test_config();
config.connection.remote_path = "/volume1/media".into();
config.mount.point = std::path::PathBuf::from("/mnt/media");
config.shares[0].remote_path = "/volume1/media".into();
config.shares[0].mount_point = std::path::PathBuf::from("/mnt/media");
config.cache.dir = std::path::PathBuf::from("/ssd/cache");
config.writeback.transfers = 16;
let args = build_mount_args(&config);
let share = &config.shares[0];
let args = build_mount_args(&config, share, 5573);
assert_eq!(args[1], "nas:/volume1/media");
assert_eq!(args[2], "/mnt/media");
assert!(args.contains(&"/ssd/cache".to_string()));
assert!(args.contains(&"16".to_string()));
assert!(args.contains(&"127.0.0.1:5573".to_string()));
}
#[test]
fn test_build_mount_args_different_rc_ports() {
let config = test_config();
let share = &config.shares[0];
let args0 = build_mount_args(&config, share, 5572);
assert!(args0.contains(&"127.0.0.1:5572".to_string()));
let args1 = build_mount_args(&config, share, 5573);
assert!(args1.contains(&"127.0.0.1:5573".to_string()));
}
}
/// Check if the FUSE mount is currently active by inspecting `/proc/mounts`.
pub fn is_mounted(config: &Config) -> Result<bool> {
let mount_point = config.mount.point.display().to_string();
let content = std::fs::read_to_string("/proc/mounts")
.with_context(|| "Failed to read /proc/mounts")?;
for line in content.lines() {
// /proc/mounts format: device mountpoint fstype options dump pass
let mut fields = line.split_whitespace();
let _device = fields.next();
if let Some(mp) = fields.next()
&& mp == mount_point {
return Ok(true);
}
}
Ok(false)
}

View File

@ -1,14 +1,12 @@
//! rclone RC (Remote Control) API client.
//!
//! rclone exposes an HTTP API on localhost when started with `--rc`.
//! Each share's rclone instance listens on a different port.
//! This module calls those endpoints for runtime status and control.
use anyhow::{Context, Result};
use serde::Deserialize;
/// Default rclone RC API address.
pub const RC_ADDR: &str = "http://127.0.0.1:5572";
/// Response from `core/stats`.
#[derive(Debug, Deserialize)]
pub struct CoreStats {
@ -39,9 +37,14 @@ pub struct DiskCacheStats {
pub uploads_queued: u64,
}
fn rc_addr(port: u16) -> String {
format!("http://127.0.0.1:{port}")
}
/// Call `core/stats` — transfer statistics.
pub fn core_stats() -> Result<CoreStats> {
let stats: CoreStats = ureq::post(format!("{RC_ADDR}/core/stats"))
pub fn core_stats(port: u16) -> Result<CoreStats> {
let addr = rc_addr(port);
let stats: CoreStats = ureq::post(format!("{addr}/core/stats"))
.send_json(serde_json::json!({}))?
.body_mut()
.read_json()
@ -50,8 +53,9 @@ pub fn core_stats() -> Result<CoreStats> {
}
/// Call `vfs/stats` — VFS cache statistics.
pub fn vfs_stats() -> Result<VfsStats> {
let stats: VfsStats = ureq::post(format!("{RC_ADDR}/vfs/stats"))
pub fn vfs_stats(port: u16) -> Result<VfsStats> {
let addr = rc_addr(port);
let stats: VfsStats = ureq::post(format!("{addr}/vfs/stats"))
.send_json(serde_json::json!({}))?
.body_mut()
.read_json()
@ -60,8 +64,9 @@ pub fn vfs_stats() -> Result<VfsStats> {
}
/// Call `vfs/list` — list active VFS instances.
pub fn vfs_list(dir: &str) -> Result<serde_json::Value> {
let value: serde_json::Value = ureq::post(format!("{RC_ADDR}/vfs/list"))
pub fn vfs_list(port: u16, dir: &str) -> Result<serde_json::Value> {
let addr = rc_addr(port);
let value: serde_json::Value = ureq::post(format!("{addr}/vfs/list"))
.send_json(serde_json::json!({ "dir": dir }))?
.body_mut()
.read_json()
@ -70,8 +75,9 @@ pub fn vfs_list(dir: &str) -> Result<serde_json::Value> {
}
/// Call `vfs/forget` — force directory cache refresh.
pub fn vfs_forget(dir: &str) -> Result<()> {
ureq::post(format!("{RC_ADDR}/vfs/forget"))
pub fn vfs_forget(port: u16, dir: &str) -> Result<()> {
let addr = rc_addr(port);
ureq::post(format!("{addr}/vfs/forget"))
.send_json(serde_json::json!({ "dir": dir }))?;
Ok(())
}
@ -80,7 +86,8 @@ pub fn vfs_forget(dir: &str) -> Result<()> {
///
/// If both `upload` and `download` are `None`, returns current limits.
/// Otherwise sets new limits using rclone's `UP:DOWN` rate format.
pub fn bwlimit(upload: Option<&str>, download: Option<&str>) -> Result<serde_json::Value> {
pub fn bwlimit(port: u16, upload: Option<&str>, download: Option<&str>) -> Result<serde_json::Value> {
let addr = rc_addr(port);
let body = match (upload, download) {
(None, None) => serde_json::json!({}),
(up, down) => {
@ -93,7 +100,7 @@ pub fn bwlimit(upload: Option<&str>, download: Option<&str>) -> Result<serde_jso
}
};
let value: serde_json::Value = ureq::post(format!("{RC_ADDR}/core/bwlimit"))
let value: serde_json::Value = ureq::post(format!("{addr}/core/bwlimit"))
.send_json(&body)?
.body_mut()
.read_json()
@ -204,8 +211,14 @@ mod tests {
}
#[test]
fn test_rc_addr_constant() {
assert_eq!(RC_ADDR, "http://127.0.0.1:5572");
fn test_rc_base_port_constant() {
assert_eq!(crate::config::RC_BASE_PORT, 5572);
}
#[test]
fn test_rc_addr_formatting() {
assert_eq!(rc_addr(5572), "http://127.0.0.1:5572");
assert_eq!(rc_addr(5573), "http://127.0.0.1:5573");
}
#[test]

View File

@ -1,5 +1,6 @@
//! Generate NFS export configuration.
use std::fmt::Write as _;
use std::fs;
use std::path::Path;
@ -10,24 +11,28 @@ use crate::config::Config;
/// Default output path for NFS exports.
pub const EXPORTS_PATH: &str = "/etc/exports.d/warpgate.exports";
/// Generate NFS exports entry for the FUSE mount point.
/// Generate NFS exports entries.
///
/// Produces a line like:
/// ```text
/// /mnt/nas-photos 192.168.0.0/24(rw,sync,no_subtree_check,fsid=1)
/// ```
/// `fsid=1` is required for FUSE-backed mounts because the kernel cannot
/// derive a stable fsid from the device number.
/// Each share gets its own export line with a unique `fsid` (1, 2, 3, ...),
/// required because FUSE-backed mounts don't have stable device numbers.
pub fn generate(config: &Config) -> Result<String> {
let mount_point = config.mount.point.display();
let network = &config.protocols.nfs_allowed_network;
let mut content = String::new();
let line = format!(
"# Generated by Warpgate — do not edit manually.\n\
{mount_point} {network}(rw,sync,no_subtree_check,fsid=1)\n"
);
writeln!(content, "# Generated by Warpgate — do not edit manually.")?;
Ok(line)
for (i, share) in config.shares.iter().enumerate() {
let rw_flag = if share.read_only { "ro" } else { "rw" };
let fsid = i + 1;
writeln!(
content,
"{} {network}({rw_flag},sync,no_subtree_check,fsid={fsid})",
share.mount_point.display()
)?;
}
Ok(content)
}
/// Write exports file to disk.
@ -56,7 +61,6 @@ mod tests {
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
remote_path = "/photos"
[cache]
dir = "/tmp/cache"
@ -66,7 +70,49 @@ dir = "/tmp/cache"
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#,
)
.unwrap()
}
fn test_config_with_shares() -> Config {
toml::from_str(
r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
nfs_allowed_network = "192.168.0.0/24"
[[shares]]
name = "photos"
remote_path = "/volume1/photos"
mount_point = "/mnt/photos"
[[shares]]
name = "projects"
remote_path = "/volume1/projects"
mount_point = "/mnt/projects"
[[shares]]
name = "backups"
remote_path = "/volume1/backups"
mount_point = "/mnt/backups"
read_only = true
"#,
)
.unwrap()
@ -77,7 +123,7 @@ dir = "/tmp/cache"
let config = test_config();
let content = generate(&config).unwrap();
assert!(content.contains("/mnt/nas-photos"));
assert!(content.contains("/mnt/photos"));
assert!(content.contains("192.168.0.0/24"));
assert!(content.contains("rw,sync,no_subtree_check,fsid=1"));
}
@ -91,17 +137,54 @@ dir = "/tmp/cache"
assert!(content.contains("10.0.0.0/8"));
}
#[test]
fn test_generate_exports_custom_mount() {
let mut config = test_config();
config.mount.point = std::path::PathBuf::from("/mnt/media");
let content = generate(&config).unwrap();
assert!(content.contains("/mnt/media"));
}
#[test]
fn test_exports_path_constant() {
assert_eq!(EXPORTS_PATH, "/etc/exports.d/warpgate.exports");
}
#[test]
fn test_generate_multi_export() {
let config = test_config_with_shares();
let content = generate(&config).unwrap();
assert!(content.contains("/mnt/photos"));
assert!(content.contains("/mnt/projects"));
assert!(content.contains("/mnt/backups"));
}
#[test]
fn test_generate_unique_fsid() {
let config = test_config_with_shares();
let content = generate(&config).unwrap();
assert!(content.contains("fsid=1"));
assert!(content.contains("fsid=2"));
assert!(content.contains("fsid=3"));
}
#[test]
fn test_generate_read_only_export() {
let config = test_config_with_shares();
let content = generate(&config).unwrap();
// backups should be ro
let lines: Vec<&str> = content.lines().collect();
let backups_line = lines.iter().find(|l| l.contains("backups")).unwrap();
assert!(backups_line.contains("(ro,"));
// photos should be rw
let photos_line = lines.iter().find(|l| l.contains("photos")).unwrap();
assert!(photos_line.contains("(rw,"));
}
#[test]
fn test_generate_single_share() {
let config = test_config();
let content = generate(&config).unwrap();
let lines: Vec<&str> = content.lines().filter(|l| !l.starts_with('#') && !l.is_empty()).collect();
assert_eq!(lines.len(), 1);
assert!(lines[0].contains("/mnt/photos"));
assert!(lines[0].contains("fsid=1"));
}
}

View File

@ -3,6 +3,7 @@
use std::fmt::Write as _;
use std::fs;
use std::path::Path;
use std::process::{Command, Stdio};
use anyhow::{Context, Result};
@ -11,10 +12,12 @@ use crate::config::Config;
/// Default output path for generated smb.conf.
pub const SMB_CONF_PATH: &str = "/etc/samba/smb.conf";
/// Generate smb.conf content that shares the rclone FUSE mount point.
/// Generate smb.conf content that shares rclone FUSE mount directories.
///
/// Each share points directly at its own mount_point (independent rclone mount).
/// When `smb_auth` is enabled, uses `security = user` with a dedicated
/// valid user. Otherwise falls back to guest access (`map to guest = Bad User`).
pub fn generate(config: &Config) -> Result<String> {
let mount_point = config.mount.point.display();
let mut conf = String::new();
// [global] section
@ -27,8 +30,18 @@ pub fn generate(config: &Config) -> Result<String> {
writeln!(conf, " # Require SMB2+ (disable insecure SMB1)")?;
writeln!(conf, " server min protocol = SMB2_02")?;
writeln!(conf)?;
if config.smb_auth.enabled {
let username = config.smb_username();
writeln!(conf, " # User authentication")?;
writeln!(conf, " security = user")?;
writeln!(conf, " map to guest = Never")?;
writeln!(conf, " valid users = {username}")?;
} else {
writeln!(conf, " # Guest / map-to-guest for simple setups")?;
writeln!(conf, " map to guest = Bad User")?;
}
writeln!(conf)?;
writeln!(conf, " # Logging")?;
writeln!(conf, " log file = /var/log/samba/log.%m")?;
@ -41,22 +54,29 @@ pub fn generate(config: &Config) -> Result<String> {
writeln!(conf, " disable spoolss = yes")?;
writeln!(conf)?;
// Share name derived from mount point directory name
let share_name = config
.mount
.point
.file_name()
.map(|n| n.to_string_lossy())
.unwrap_or("warpgate".into());
writeln!(conf, "[{share_name}]")?;
// Share sections — each share points at its own mount_point
for share in &config.shares {
writeln!(conf, "[{}]", share.name)?;
writeln!(conf, " comment = Warpgate cached NAS share")?;
writeln!(conf, " path = {mount_point}")?;
writeln!(conf, " path = {}", share.mount_point.display())?;
writeln!(conf, " browseable = yes")?;
writeln!(conf, " read only = no")?;
writeln!(
conf,
" read only = {}",
if share.read_only { "yes" } else { "no" }
)?;
if config.smb_auth.enabled {
writeln!(conf, " guest ok = no")?;
} else {
writeln!(conf, " guest ok = yes")?;
}
writeln!(conf, " force user = root")?;
writeln!(conf, " create mask = 0644")?;
writeln!(conf, " directory mask = 0755")?;
writeln!(conf)?;
}
Ok(conf)
}
@ -77,6 +97,69 @@ pub fn write_config(config: &Config) -> Result<()> {
Ok(())
}
/// Create the system user and set the Samba password.
///
/// 1. Check if the user exists (`id <username>`)
/// 2. Create if missing (`useradd --system --no-create-home --shell /usr/sbin/nologin`)
/// 3. Set Samba password (`smbpasswd -a -s` via stdin)
pub fn setup_user(config: &Config) -> Result<()> {
if !config.smb_auth.enabled {
return Ok(());
}
let username = config.smb_username();
let password = config
.smb_password()?
.context("SMB auth enabled but no password resolved")?;
// Check if system user exists
let exists = Command::new("id")
.arg(username)
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
.context("Failed to run 'id' command")?
.success();
if !exists {
println!(" Creating system user '{username}'...");
let status = Command::new("useradd")
.args([
"--system",
"--no-create-home",
"--shell",
"/usr/sbin/nologin",
username,
])
.status()
.context("Failed to run useradd")?;
if !status.success() {
anyhow::bail!("useradd failed for user '{username}': {status}");
}
}
// Set Samba password via stdin
println!(" Setting Samba password for '{username}'...");
let mut child = Command::new("smbpasswd")
.args(["-a", "-s", username])
.stdin(Stdio::piped())
.spawn()
.context("Failed to spawn smbpasswd")?;
if let Some(ref mut stdin) = child.stdin {
use std::io::Write;
// smbpasswd -s expects password twice on stdin
write!(stdin, "{password}\n{password}\n")?;
}
let status = child.wait().context("Failed to wait for smbpasswd")?;
if !status.success() {
anyhow::bail!("smbpasswd failed for user '{username}': {status}");
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
@ -87,7 +170,6 @@ mod tests {
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
remote_path = "/photos"
[cache]
dir = "/tmp/cache"
@ -97,7 +179,77 @@ dir = "/tmp/cache"
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#,
)
.unwrap()
}
fn test_config_with_shares() -> Config {
toml::from_str(
r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[[shares]]
name = "photos"
remote_path = "/volume1/photos"
mount_point = "/mnt/photos"
[[shares]]
name = "projects"
remote_path = "/volume1/projects"
mount_point = "/mnt/projects"
[[shares]]
name = "backups"
remote_path = "/volume1/backups"
mount_point = "/mnt/backups"
read_only = true
"#,
)
.unwrap()
}
fn test_config_with_auth() -> Config {
toml::from_str(
r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[smb_auth]
enabled = true
username = "photographer"
smb_pass = "my-password"
[[shares]]
name = "photos"
remote_path = "/volume1/photos"
mount_point = "/mnt/photos"
"#,
)
.unwrap()
@ -120,27 +272,74 @@ dir = "/tmp/cache"
let config = test_config();
let content = generate(&config).unwrap();
// Share name derived from mount point dir name "nas-photos"
assert!(content.contains("[nas-photos]"));
assert!(content.contains("path = /mnt/nas-photos"));
assert!(content.contains("[photos]"));
assert!(content.contains("path = /mnt/photos"));
assert!(content.contains("browseable = yes"));
assert!(content.contains("read only = no"));
assert!(content.contains("guest ok = yes"));
assert!(content.contains("force user = root"));
}
#[test]
fn test_generate_smb_conf_custom_mount() {
let mut config = test_config();
config.mount.point = std::path::PathBuf::from("/mnt/my-nas");
let content = generate(&config).unwrap();
assert!(content.contains("[my-nas]"));
assert!(content.contains("path = /mnt/my-nas"));
}
#[test]
fn test_smb_conf_path_constant() {
assert_eq!(SMB_CONF_PATH, "/etc/samba/smb.conf");
}
#[test]
fn test_generate_multi_share() {
let config = test_config_with_shares();
let content = generate(&config).unwrap();
assert!(content.contains("[photos]"));
assert!(content.contains("path = /mnt/photos"));
assert!(content.contains("[projects]"));
assert!(content.contains("path = /mnt/projects"));
assert!(content.contains("[backups]"));
assert!(content.contains("path = /mnt/backups"));
}
#[test]
fn test_generate_read_only_share() {
let config = test_config_with_shares();
let content = generate(&config).unwrap();
// backups is read_only
let backups_section = content.split("[backups]").nth(1).unwrap();
assert!(backups_section.contains("read only = yes"));
// photos is read-write
let photos_section = content
.split("[photos]")
.nth(1)
.unwrap()
.split('[')
.next()
.unwrap();
assert!(photos_section.contains("read only = no"));
}
#[test]
fn test_generate_auth_mode() {
let config = test_config_with_auth();
let content = generate(&config).unwrap();
// Global auth settings
assert!(content.contains("security = user"));
assert!(content.contains("map to guest = Never"));
assert!(content.contains("valid users = photographer"));
// Share-level auth
assert!(content.contains("guest ok = no"));
assert!(!content.contains("guest ok = yes"));
}
#[test]
fn test_generate_guest_mode() {
let config = test_config();
let content = generate(&config).unwrap();
assert!(content.contains("map to guest = Bad User"));
assert!(content.contains("guest ok = yes"));
assert!(!content.contains("security = user"));
}
}

View File

@ -72,7 +72,6 @@ mod tests {
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
remote_path = "/photos"
[cache]
dir = "/tmp/cache"
@ -82,7 +81,11 @@ dir = "/tmp/cache"
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#,
)
.unwrap()

View File

@ -3,8 +3,11 @@
use crate::config::Config;
/// Build the `rclone serve webdav` command arguments.
///
/// Uses the first share's mount_point as the serve directory.
/// Multi-share WebDAV support is future work.
pub fn build_serve_args(config: &Config) -> Vec<String> {
let mount_point = config.mount.point.display().to_string();
let mount_point = config.shares[0].mount_point.display().to_string();
let addr = format!("0.0.0.0:{}", config.protocols.webdav_port);
vec![
@ -33,7 +36,6 @@ mod tests {
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
remote_path = "/photos"
[cache]
dir = "/tmp/cache"
@ -43,7 +45,11 @@ dir = "/tmp/cache"
[writeback]
[directory_cache]
[protocols]
[mount]
[[shares]]
name = "photos"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#,
)
.unwrap()
@ -56,7 +62,7 @@ dir = "/tmp/cache"
assert_eq!(args[0], "serve");
assert_eq!(args[1], "webdav");
assert_eq!(args[2], "/mnt/nas-photos");
assert_eq!(args[2], "/mnt/photos");
assert_eq!(args[3], "--addr");
assert_eq!(args[4], "0.0.0.0:8080");
assert_eq!(args[5], "--read-only=false");
@ -72,12 +78,36 @@ dir = "/tmp/cache"
}
#[test]
fn test_build_serve_args_custom_mount() {
let mut config = test_config();
config.mount.point = std::path::PathBuf::from("/mnt/media");
let args = build_serve_args(&config);
fn test_build_serve_args_uses_first_share() {
let config: Config = toml::from_str(
r#"
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
assert_eq!(args[2], "/mnt/media");
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[[shares]]
name = "photos"
remote_path = "/volume1/photos"
mount_point = "/mnt/photos"
[[shares]]
name = "videos"
remote_path = "/volume1/videos"
mount_point = "/mnt/videos"
"#,
)
.unwrap();
let args = build_serve_args(&config);
assert_eq!(args[2], "/mnt/photos");
}
#[test]
@ -86,7 +116,7 @@ dir = "/tmp/cache"
let cmd = build_serve_command(&config);
assert!(cmd.starts_with("/usr/bin/rclone serve webdav"));
assert!(cmd.contains("/mnt/nas-photos"));
assert!(cmd.contains("/mnt/photos"));
assert!(cmd.contains("--addr"));
assert!(cmd.contains("0.0.0.0:8080"));
}

View File

@ -1,8 +1,7 @@
//! `warpgate run` — single-process supervisor for all services.
//!
//! Manages rclone mount + protocol services in one process tree with
//! coordinated startup and shutdown. Designed to run as a systemd unit
//! or standalone (Docker-friendly).
//! Manages rclone mount processes (one per share) + protocol services in one
//! process tree with coordinated startup and shutdown.
use std::os::unix::process::CommandExt;
use std::process::{Child, Command};
@ -63,6 +62,12 @@ impl RestartTracker {
}
}
/// A named rclone mount child process for a single share.
struct MountChild {
name: String,
child: Child,
}
/// Child processes for protocol servers managed by the supervisor.
///
/// Implements `Drop` to kill any spawned children — prevents orphaned
@ -96,56 +101,71 @@ pub fn run(config: &Config) -> Result<()> {
println!("Preflight checks...");
preflight(config)?;
// Phase 2: Start rclone mount and wait for it to become ready
println!("Starting rclone mount...");
let mut mount_child = start_and_wait_mount(config, &shutdown)?;
println!("Mount ready at {}", config.mount.point.display());
// Phase 2: Start rclone mounts (one per share) and wait for all to become ready
println!("Starting rclone mounts...");
let mut mount_children = start_and_wait_mounts(config, &shutdown)?;
for share in &config.shares {
println!(" Mount ready at {}", share.mount_point.display());
}
// Phase 3: Start protocol services
if shutdown.load(Ordering::SeqCst) {
println!("Shutdown signal received during mount.");
let _ = mount_child.kill();
let _ = mount_child.wait();
for mc in &mut mount_children {
let _ = mc.child.kill();
let _ = mc.child.wait();
}
return Ok(());
}
println!("Starting protocol services...");
let mut protocols = start_protocols(config)?;
// Phase 3.5: Auto-warmup (non-blocking, best-effort)
// Phase 3.5: Auto-warmup in background thread (non-blocking)
if !config.warmup.rules.is_empty() && config.warmup.auto {
println!("Running auto-warmup...");
for rule in &config.warmup.rules {
if shutdown.load(Ordering::SeqCst) {
let warmup_config = config.clone();
let warmup_shutdown = Arc::clone(&shutdown);
thread::spawn(move || {
println!("Auto-warmup started (background)...");
for rule in &warmup_config.warmup.rules {
if warmup_shutdown.load(Ordering::SeqCst) {
println!("Auto-warmup interrupted by shutdown.");
break;
}
if let Err(e) =
crate::cli::warmup::run(config, &rule.path, rule.newer_than.as_deref())
{
if let Err(e) = crate::cli::warmup::run(
&warmup_config,
&rule.share,
&rule.path,
rule.newer_than.as_deref(),
) {
eprintln!("Warmup warning: {e}");
}
}
println!("Auto-warmup complete.");
});
}
// Phase 4: Supervision loop
println!("Supervision active. Press Ctrl+C to stop.");
let result = supervise(config, &mut mount_child, &mut protocols, Arc::clone(&shutdown));
let result = supervise(config, &mut mount_children, &mut protocols, Arc::clone(&shutdown));
// Phase 5: Teardown (always runs)
println!("Shutting down...");
shutdown_services(config, &mut mount_child, &mut protocols);
shutdown_services(config, &mut mount_children, &mut protocols);
result
}
/// Write configs and create directories. Reuses existing modules.
/// Write configs and create directories.
fn preflight(config: &Config) -> Result<()> {
// Ensure mount point exists
std::fs::create_dir_all(&config.mount.point).with_context(|| {
// Ensure mount points exist for each share
for share in &config.shares {
std::fs::create_dir_all(&share.mount_point).with_context(|| {
format!(
"Failed to create mount point: {}",
config.mount.point.display()
share.mount_point.display()
)
})?;
}
// Ensure cache directory exists
std::fs::create_dir_all(&config.cache.dir).with_context(|| {
@ -161,6 +181,9 @@ fn preflight(config: &Config) -> Result<()> {
// Generate protocol configs
if config.protocols.enable_smb {
samba::write_config(config)?;
if config.smb_auth.enabled {
samba::setup_user(config)?;
}
}
if config.protocols.enable_nfs {
nfs::write_config(config)?;
@ -169,57 +192,99 @@ fn preflight(config: &Config) -> Result<()> {
Ok(())
}
/// Spawn rclone mount process and poll until the FUSE mount appears.
fn start_and_wait_mount(config: &Config, shutdown: &AtomicBool) -> Result<Child> {
let args = build_mount_args(config);
/// Spawn rclone mount processes for all shares and poll until each FUSE mount appears.
fn start_and_wait_mounts(config: &Config, shutdown: &AtomicBool) -> Result<Vec<MountChild>> {
let mut children = Vec::new();
let mut child = Command::new("rclone")
for (i, share) in config.shares.iter().enumerate() {
let rc_port = config.rc_port(i);
let args = build_mount_args(config, share, rc_port);
let child = Command::new("rclone")
.args(&args)
.process_group(0) // isolate from terminal SIGINT
.process_group(0)
.spawn()
.context("Failed to spawn rclone mount")?;
.with_context(|| format!("Failed to spawn rclone mount for share '{}'", share.name))?;
// Poll for mount readiness
children.push(MountChild {
name: share.name.clone(),
child,
});
}
// Poll for all mounts to become ready
let deadline = Instant::now() + MOUNT_TIMEOUT;
let mut ready = vec![false; config.shares.len()];
loop {
// Check for shutdown signal (e.g. Ctrl+C during mount wait)
if shutdown.load(Ordering::SeqCst) {
let _ = child.kill();
let _ = child.wait();
anyhow::bail!("Interrupted while waiting for mount");
for mc in &mut children {
let _ = mc.child.kill();
let _ = mc.child.wait();
}
anyhow::bail!("Interrupted while waiting for mounts");
}
if Instant::now() > deadline {
let _ = child.kill();
let _ = child.wait();
for mc in &mut children {
let _ = mc.child.kill();
let _ = mc.child.wait();
}
let pending: Vec<&str> = config.shares.iter()
.zip(ready.iter())
.filter(|(_, r)| !**r)
.map(|(s, _)| s.name.as_str())
.collect();
anyhow::bail!(
"Timed out waiting for mount at {} ({}s)",
config.mount.point.display(),
MOUNT_TIMEOUT.as_secs()
"Timed out waiting for mounts ({}s). Still pending: {}",
MOUNT_TIMEOUT.as_secs(),
pending.join(", ")
);
}
// Detect early rclone exit (e.g. bad config, auth failure)
match child.try_wait() {
Ok(Some(status)) => {
anyhow::bail!("rclone mount exited immediately ({status}). Check remote/auth config.");
// Check for early exits
for (i, mc) in children.iter_mut().enumerate() {
if ready[i] {
continue;
}
Ok(None) => {} // still running, good
match mc.child.try_wait() {
Ok(Some(status)) => {
anyhow::bail!(
"rclone mount for '{}' exited immediately ({status}). Check remote/auth config.",
mc.name
);
}
Ok(None) => {}
Err(e) => {
anyhow::bail!("Failed to check rclone mount status: {e}");
anyhow::bail!("Failed to check rclone mount status for '{}': {e}", mc.name);
}
}
}
match is_mounted(config) {
Ok(true) => break,
Ok(false) => {}
Err(e) => eprintln!("Warning: mount check failed: {e}"),
// Check mount readiness
let mut all_ready = true;
for (i, share) in config.shares.iter().enumerate() {
if ready[i] {
continue;
}
match is_mounted(&share.mount_point) {
Ok(true) => ready[i] = true,
Ok(false) => all_ready = false,
Err(e) => {
eprintln!("Warning: mount check failed for '{}': {e}", share.name);
all_ready = false;
}
}
}
if all_ready {
break;
}
thread::sleep(Duration::from_millis(500));
}
Ok(child)
Ok(children)
}
/// Spawn smbd as a foreground child process.
@ -233,10 +298,6 @@ fn spawn_smbd() -> Result<Child> {
}
/// Start protocol services after the mount is ready.
///
/// - SMB: spawn `smbd -F` as a child process
/// - NFS: `exportfs -ra`
/// - WebDAV: spawn `rclone serve webdav` as a child process
fn start_protocols(config: &Config) -> Result<ProtocolChildren> {
let smbd = if config.protocols.enable_smb {
let child = spawn_smbd()?;
@ -280,12 +341,11 @@ fn spawn_webdav(config: &Config) -> Result<Child> {
/// Main supervision loop. Polls child processes every 2s.
///
/// - If rclone mount dies → full shutdown (data safety: dirty files may be in flight).
/// - If smbd/WebDAV dies → restart up to 3 times (counter resets after 5 min stable).
/// - Checks shutdown flag set by signal handler.
/// - If any rclone mount dies → full shutdown (data safety).
/// - If smbd/WebDAV dies → restart up to 3 times.
fn supervise(
config: &Config,
mount: &mut Child,
mounts: &mut Vec<MountChild>,
protocols: &mut ProtocolChildren,
shutdown: Arc<AtomicBool>,
) -> Result<()> {
@ -293,23 +353,25 @@ fn supervise(
let mut webdav_tracker = RestartTracker::new();
loop {
// Check for shutdown signal
if shutdown.load(Ordering::SeqCst) {
println!("Shutdown signal received.");
return Ok(());
}
// Check rclone mount process
match mount.try_wait() {
// Check all rclone mount processes
for mc in mounts.iter_mut() {
match mc.child.try_wait() {
Ok(Some(status)) => {
anyhow::bail!(
"rclone mount exited unexpectedly ({}). Initiating full shutdown for data safety.",
"rclone mount for '{}' exited unexpectedly ({}). Initiating full shutdown for data safety.",
mc.name,
status
);
}
Ok(None) => {} // still running
Ok(None) => {}
Err(e) => {
anyhow::bail!("Failed to check rclone mount status: {e}");
anyhow::bail!("Failed to check rclone mount status for '{}': {e}", mc.name);
}
}
}
@ -340,7 +402,7 @@ fn supervise(
protocols.smbd = None;
}
}
Ok(None) => {} // still running
Ok(None) => {}
Err(e) => eprintln!("Warning: failed to check smbd status: {e}"),
}
}
@ -372,7 +434,7 @@ fn supervise(
protocols.webdav = None;
}
}
Ok(None) => {} // still running
Ok(None) => {}
Err(e) => eprintln!("Warning: failed to check WebDAV status: {e}"),
}
}
@ -382,10 +444,6 @@ fn supervise(
}
/// Send SIGTERM, wait up to `SIGTERM_GRACE`, then SIGKILL if still alive.
///
/// smbd forks worker processes per client connection — SIGTERM lets
/// the parent signal its children to exit cleanly. SIGKILL would
/// orphan those workers.
fn graceful_kill(child: &mut Child) {
let pid = child.id() as i32;
// SAFETY: sending a signal to a known child PID is safe.
@ -394,7 +452,7 @@ fn graceful_kill(child: &mut Child) {
let deadline = Instant::now() + SIGTERM_GRACE;
loop {
match child.try_wait() {
Ok(Some(_)) => return, // exited cleanly
Ok(Some(_)) => return,
Ok(None) => {}
Err(_) => break,
}
@ -404,23 +462,19 @@ fn graceful_kill(child: &mut Child) {
thread::sleep(Duration::from_millis(100));
}
// Still alive after grace period — escalate
let _ = child.kill(); // SIGKILL
let _ = child.kill();
let _ = child.wait();
}
/// Wait for rclone VFS write-back queue to drain.
///
/// Polls `vfs/stats` every 2s. Exits when uploads_in_progress + uploads_queued
/// reaches 0, or after 5 minutes (safety cap to avoid hanging forever).
fn wait_writeback_drain() {
/// Wait for rclone VFS write-back queue to drain on a specific RC port.
fn wait_writeback_drain(port: u16) {
use crate::rclone::rc;
let deadline = Instant::now() + WRITEBACK_DRAIN_TIMEOUT;
let mut first = true;
loop {
match rc::vfs_stats() {
match rc::vfs_stats(port) {
Ok(vfs) => {
if let Some(dc) = &vfs.disk_cache {
let pending = dc.uploads_in_progress + dc.uploads_queued;
@ -439,10 +493,10 @@ fn wait_writeback_drain() {
eprint!("\r Write-back: {pending} files remaining... ");
}
} else {
return; // no cache info → nothing to wait for
return;
}
}
Err(_) => return, // RC API unavailable → rclone already gone
Err(_) => return,
}
if Instant::now() > deadline {
@ -458,6 +512,56 @@ fn wait_writeback_drain() {
}
}
/// Reverse-order teardown of all services.
fn shutdown_services(config: &Config, mounts: &mut Vec<MountChild>, protocols: &mut ProtocolChildren) {
// Stop SMB
if let Some(child) = &mut protocols.smbd {
graceful_kill(child);
println!(" SMB: stopped");
}
// Unexport NFS
if config.protocols.enable_nfs {
let _ = Command::new("exportfs").arg("-ua").status();
println!(" NFS: unexported");
}
// Kill WebDAV
if let Some(child) = &mut protocols.webdav {
graceful_kill(child);
println!(" WebDAV: stopped");
}
// Wait for write-back queues to drain on each share's RC port
for (i, _share) in config.shares.iter().enumerate() {
wait_writeback_drain(config.rc_port(i));
}
// Lazy unmount each share's FUSE mount
for share in &config.shares {
if is_mounted(&share.mount_point).unwrap_or(false) {
let mp = share.mount_point.display().to_string();
let unmounted = Command::new("fusermount3")
.args(["-uz", &mp])
.status()
.map(|s| s.success())
.unwrap_or(false);
if !unmounted {
let _ = Command::new("fusermount")
.args(["-uz", &mp])
.status();
}
}
}
println!(" FUSE: unmounted");
// Gracefully stop all rclone mount processes
for mc in mounts.iter_mut() {
graceful_kill(&mut mc.child);
}
println!(" rclone: stopped");
}
#[cfg(test)]
mod tests {
use super::*;
@ -533,49 +637,3 @@ mod tests {
assert_eq!(WRITEBACK_POLL_INTERVAL, Duration::from_secs(2));
}
}
/// Reverse-order teardown of all services.
///
/// Order: stop smbd → unexport NFS → kill WebDAV → unmount FUSE → kill rclone.
fn shutdown_services(config: &Config, mount: &mut Child, protocols: &mut ProtocolChildren) {
// Stop SMB
if let Some(child) = &mut protocols.smbd {
graceful_kill(child);
println!(" SMB: stopped");
}
// Unexport NFS
if config.protocols.enable_nfs {
let _ = Command::new("exportfs").arg("-ua").status();
println!(" NFS: unexported");
}
// Kill WebDAV
if let Some(child) = &mut protocols.webdav {
graceful_kill(child);
println!(" WebDAV: stopped");
}
// Wait for write-back queue to drain before unmounting
wait_writeback_drain();
// Lazy unmount FUSE (skip if rclone already unmounted on signal)
if is_mounted(config).unwrap_or(false) {
let mount_point = config.mount.point.display().to_string();
let unmounted = Command::new("fusermount3")
.args(["-uz", &mount_point])
.status()
.map(|s| s.success())
.unwrap_or(false);
if !unmounted {
let _ = Command::new("fusermount")
.args(["-uz", &mount_point])
.status();
}
}
println!(" FUSE: unmounted");
// Gracefully stop rclone
graceful_kill(mount);
println!(" rclone: stopped");
}

View File

@ -10,8 +10,6 @@ nas_user = "admin"
# nas_pass = "your-password"
# Path to SSH private key (recommended)
# nas_key_file = "/root/.ssh/id_ed25519"
# Target directory on NAS
remote_path = "/volume1/photos"
# SFTP port
sftp_port = 22
# SFTP connection pool size
@ -67,17 +65,45 @@ nfs_allowed_network = "192.168.0.0/24"
# WebDAV listen port
webdav_port = 8080
[mount]
# FUSE mount point (all protocols share this)
point = "/mnt/nas-photos"
# --- Optional: SMB user authentication ---
# By default, SMB shares use guest access (no password).
# Enable smb_auth for password-protected access.
#
# [smb_auth]
# enabled = true
# username = "photographer" # defaults to connection.nas_user
# smb_pass = "my-password" # option 1: dedicated password
# reuse_nas_pass = true # option 2: reuse connection.nas_pass
# --- Shares ---
# Each share maps a remote NAS path to a local mount point.
# Each gets its own rclone mount process with independent FUSE mount.
[[shares]]
name = "photos"
remote_path = "/volume1/photos"
mount_point = "/mnt/photos"
# [[shares]]
# name = "projects"
# remote_path = "/volume1/projects"
# mount_point = "/mnt/projects"
#
# [[shares]]
# name = "backups"
# remote_path = "/volume1/backups"
# mount_point = "/mnt/backups"
# read_only = true
[warmup]
# Auto-warmup configured paths on startup
auto = true
# [[warmup.rules]]
# share = "photos"
# path = "2024"
# newer_than = "30d"
#
# [[warmup.rules]]
# share = "photos"
# path = "Lightroom/Catalog"

View File

@ -19,7 +19,6 @@ _gen_config() {
local nas_host="${MOCK_NAS_IP:-10.99.0.2}"
local nas_user="root"
local nas_key_file="${TEST_SSH_KEY:-$TEST_DIR/test_key}"
local remote_path="/"
local sftp_port="22"
local sftp_connections="4"
@ -48,11 +47,20 @@ _gen_config() {
local nfs_allowed_network="10.99.0.0/24"
local webdav_port="8080"
local mount_point="${TEST_MOUNT:-$TEST_DIR/mnt}"
local warmup_auto="false"
local warmup_rules=""
local smb_auth_enabled="false"
local smb_auth_username=""
local smb_auth_smb_pass=""
local smb_auth_reuse_nas_pass="false"
# Default share: single share at /
local share_name="${TEST_SHARE_NAME:-data}"
local share_remote_path="${TEST_SHARE_REMOTE_PATH:-/}"
local share_mount_point="${TEST_MOUNT:-$TEST_DIR/mnt}"
local shares_config=""
# Apply overrides
for override in "$@"; do
local key="${override%%=*}"
@ -62,7 +70,6 @@ _gen_config() {
connection.nas_host|nas_host) nas_host="$value" ;;
connection.nas_user|nas_user) nas_user="$value" ;;
connection.nas_key_file|nas_key_file) nas_key_file="$value" ;;
connection.remote_path|remote_path) remote_path="$value" ;;
connection.sftp_port|sftp_port) sftp_port="$value" ;;
connection.sftp_connections|sftp_connections) sftp_connections="$value" ;;
cache.dir|cache_dir) cache_dir="$value" ;;
@ -84,9 +91,16 @@ _gen_config() {
protocols.enable_webdav|enable_webdav) enable_webdav="$value" ;;
protocols.nfs_allowed_network|nfs_allowed_network) nfs_allowed_network="$value" ;;
protocols.webdav_port|webdav_port) webdav_port="$value" ;;
mount.point|mount_point) mount_point="$value" ;;
warmup.auto|warmup_auto) warmup_auto="$value" ;;
warmup.rules) warmup_rules="$value" ;;
smb_auth.enabled|smb_auth_enabled) smb_auth_enabled="$value" ;;
smb_auth.username|smb_auth_username) smb_auth_username="$value" ;;
smb_auth.smb_pass|smb_auth_smb_pass) smb_auth_smb_pass="$value" ;;
smb_auth.reuse_nas_pass|smb_auth_reuse_nas_pass) smb_auth_reuse_nas_pass="$value" ;;
share.name|share_name) share_name="$value" ;;
share.remote_path|share_remote_path) share_remote_path="$value" ;;
share.mount_point|share_mount_point) share_mount_point="$value" ;;
shares) shares_config="$value" ;;
*) echo "WARNING: unknown config override: $key" >&2 ;;
esac
done
@ -96,7 +110,6 @@ _gen_config() {
nas_host = "$nas_host"
nas_user = "$nas_user"
nas_key_file = "$nas_key_file"
remote_path = "$remote_path"
sftp_port = $sftp_port
sftp_connections = $sftp_connections
@ -131,13 +144,42 @@ enable_webdav = $enable_webdav
nfs_allowed_network = "$nfs_allowed_network"
webdav_port = $webdav_port
[mount]
point = "$mount_point"
[warmup]
auto = $warmup_auto
CONFIG_EOF
# Append smb_auth section if enabled
if [[ "$smb_auth_enabled" == "true" ]]; then
cat >> "$config_file" <<SMB_AUTH_EOF
[smb_auth]
enabled = true
SMB_AUTH_EOF
if [[ -n "$smb_auth_username" ]]; then
echo "username = \"$smb_auth_username\"" >> "$config_file"
fi
if [[ -n "$smb_auth_smb_pass" ]]; then
echo "smb_pass = \"$smb_auth_smb_pass\"" >> "$config_file"
fi
if [[ "$smb_auth_reuse_nas_pass" == "true" ]]; then
echo "reuse_nas_pass = true" >> "$config_file"
fi
fi
# Append shares config — use override or default single share
if [[ -n "$shares_config" ]]; then
echo "" >> "$config_file"
echo "$shares_config" >> "$config_file"
else
cat >> "$config_file" <<SHARES_EOF
[[shares]]
name = "$share_name"
remote_path = "$share_remote_path"
mount_point = "$share_mount_point"
SHARES_EOF
fi
# Append warmup rules if specified
if [[ -n "$warmup_rules" ]]; then
echo "" >> "$config_file"
@ -156,13 +198,14 @@ _gen_minimal_config() {
nas_host = "${MOCK_NAS_IP:-10.99.0.2}"
nas_user = "root"
nas_key_file = "${TEST_SSH_KEY:-$TEST_DIR/test_key}"
remote_path = "/"
[cache]
dir = "${CACHE_DIR:-$TEST_DIR/cache}"
[mount]
point = "${TEST_MOUNT:-$TEST_DIR/mnt}"
[[shares]]
name = "data"
remote_path = "/"
mount_point = "${TEST_MOUNT:-$TEST_DIR/mnt}"
CONFIG_EOF
export TEST_CONFIG="$config_file"
@ -179,13 +222,14 @@ _gen_broken_config() {
cat > "$config_file" <<CONFIG_EOF
[connection]
nas_user = "root"
remote_path = "/"
[cache]
dir = "/tmp/cache"
[mount]
point = "/tmp/mnt"
[[shares]]
name = "data"
remote_path = "/"
mount_point = "/tmp/mnt"
CONFIG_EOF
;;
bad_toml)