diff --git a/src/cli/bwlimit.rs b/src/cli/bwlimit.rs
index 4371b8e..cc9a15b 100644
--- a/src/cli/bwlimit.rs
+++ b/src/cli/bwlimit.rs
@@ -1,41 +1,50 @@
//! `warpgate bwlimit` — view or adjust bandwidth limits at runtime.
-use anyhow::{Context, Result};
+use anyhow::Result;
use crate::config::Config;
use crate::rclone::rc;
-pub fn run(_config: &Config, up: Option<&str>, down: Option<&str>) -> Result<()> {
- let result = rc::bwlimit(up, down).context("Failed to call rclone bwlimit API")?;
-
+pub fn run(config: &Config, up: Option<&str>, down: Option<&str>) -> Result<()> {
if up.is_none() && down.is_none() {
println!("Current bandwidth limits:");
} else {
- println!("Updated bandwidth limits:");
+ println!("Updating bandwidth limits:");
}
- // rclone core/bwlimit returns { "bytesPerSecond": N, "bytesPerSecondTx": N, "bytesPerSecondRx": N }
- // A value of -1 means unlimited.
- let has_fields = result.get("bytesPerSecondTx").is_some();
+ for (i, share) in config.shares.iter().enumerate() {
+ let port = config.rc_port(i);
+ let result = match rc::bwlimit(port, up, down) {
+ Ok(r) => r,
+ Err(e) => {
+ eprintln!(" [{}] unreachable — {}", share.name, e);
+ continue;
+ }
+ };
- if has_fields {
- if let Some(tx) = result.get("bytesPerSecondTx").and_then(|v| v.as_i64()) {
- if tx < 0 {
- println!(" Upload: unlimited");
- } else {
- println!(" Upload: {}/s", format_bytes(tx as u64));
+ print!(" [{}] ", share.name);
+
+ let has_fields = result.get("bytesPerSecondTx").is_some();
+ if has_fields {
+ if let Some(tx) = result.get("bytesPerSecondTx").and_then(|v| v.as_i64()) {
+ if tx < 0 {
+ print!("Up: unlimited");
+ } else {
+ print!("Up: {}/s", format_bytes(tx as u64));
+ }
}
- }
- if let Some(rx) = result.get("bytesPerSecondRx").and_then(|v| v.as_i64()) {
- if rx < 0 {
- println!(" Download: unlimited");
+ if let Some(rx) = result.get("bytesPerSecondRx").and_then(|v| v.as_i64()) {
+ if rx < 0 {
+ println!(", Down: unlimited");
+ } else {
+ println!(", Down: {}/s", format_bytes(rx as u64));
+ }
} else {
- println!(" Download: {}/s", format_bytes(rx as u64));
+ println!();
}
+ } else {
+ println!("{}", serde_json::to_string_pretty(&result)?);
}
- } else {
- // Fallback: print raw response
- println!("{}", serde_json::to_string_pretty(&result)?);
}
Ok(())
@@ -84,7 +93,7 @@ mod tests {
#[test]
fn test_format_bytes_mixed() {
- assert_eq!(format_bytes(10485760), "10.0 MiB"); // 10 MiB
- assert_eq!(format_bytes(52428800), "50.0 MiB"); // 50 MiB
+ assert_eq!(format_bytes(10485760), "10.0 MiB");
+ assert_eq!(format_bytes(52428800), "50.0 MiB");
}
}
diff --git a/src/cli/cache.rs b/src/cli/cache.rs
index 5f05c05..5d64eb0 100644
--- a/src/cli/cache.rs
+++ b/src/cli/cache.rs
@@ -1,71 +1,93 @@
//! `warpgate cache-list` and `warpgate cache-clean` commands.
-use anyhow::{Context, Result};
+use anyhow::Result;
use crate::config::Config;
use crate::rclone::rc;
-/// List cached files via rclone RC API.
-pub fn list(_config: &Config) -> Result<()> {
- let result = rc::vfs_list("/").context("Failed to list VFS cache")?;
+/// List cached files via rclone RC API (aggregated across all shares).
+pub fn list(config: &Config) -> Result<()> {
+ for (i, share) in config.shares.iter().enumerate() {
+ let port = config.rc_port(i);
+ println!("=== {} ===", share.name);
- // vfs/list may return an array directly or { "list": [...] }
- let entries = if let Some(arr) = result.as_array() {
- arr.as_slice()
- } else if let Some(list) = result.get("list").and_then(|v| v.as_array()) {
- list.as_slice()
- } else {
- // Unknown format — print raw JSON
- println!("{}", serde_json::to_string_pretty(&result)?);
- return Ok(());
- };
+ let result = match rc::vfs_list(port, "/") {
+ Ok(r) => r,
+ Err(e) => {
+ eprintln!(" Could not list cache for '{}': {}", share.name, e);
+ continue;
+ }
+ };
- if entries.is_empty() {
- println!("Cache is empty.");
- return Ok(());
- }
-
- println!("{:<10} PATH", "SIZE");
- println!("{}", "-".repeat(60));
-
- for entry in entries {
- let name = entry.get("Name").and_then(|v| v.as_str()).unwrap_or("?");
- let size = entry.get("Size").and_then(|v| v.as_u64()).unwrap_or(0);
- let is_dir = entry
- .get("IsDir")
- .and_then(|v| v.as_bool())
- .unwrap_or(false);
-
- if is_dir {
- println!("{:<10} {}/", "
", name);
+ let entries = if let Some(arr) = result.as_array() {
+ arr.as_slice()
+ } else if let Some(list) = result.get("list").and_then(|v| v.as_array()) {
+ list.as_slice()
} else {
- println!("{:<10} {}", format_bytes(size), name);
+ println!("{}", serde_json::to_string_pretty(&result)?);
+ continue;
+ };
+
+ if entries.is_empty() {
+ println!(" Cache is empty.");
+ continue;
}
+
+ println!("{:<10} PATH", "SIZE");
+ println!("{}", "-".repeat(60));
+
+ for entry in entries {
+ let name = entry.get("Name").and_then(|v| v.as_str()).unwrap_or("?");
+ let size = entry.get("Size").and_then(|v| v.as_u64()).unwrap_or(0);
+ let is_dir = entry
+ .get("IsDir")
+ .and_then(|v| v.as_bool())
+ .unwrap_or(false);
+
+ if is_dir {
+ println!("{:<10} {}/", "", name);
+ } else {
+ println!("{:<10} {}", format_bytes(size), name);
+ }
+ }
+ println!();
}
Ok(())
}
/// Clean cached files (only clean files, never dirty).
-pub fn clean(_config: &Config, all: bool) -> Result<()> {
+pub fn clean(config: &Config, all: bool) -> Result<()> {
if all {
- println!("Clearing VFS directory cache...");
- rc::vfs_forget("/").context("Failed to clear VFS cache")?;
- println!("Done. VFS directory cache cleared.");
+ println!("Clearing VFS directory cache for all shares...");
+ for (i, share) in config.shares.iter().enumerate() {
+ let port = config.rc_port(i);
+ match rc::vfs_forget(port, "/") {
+ Ok(()) => println!(" {}: cleared", share.name),
+ Err(e) => eprintln!(" {}: failed — {}", share.name, e),
+ }
+ }
+ println!("Done.");
} else {
println!("Current cache status:");
- match rc::vfs_stats() {
- Ok(vfs) => {
- if let Some(dc) = vfs.disk_cache {
- println!(" Used: {}", format_bytes(dc.bytes_used));
- println!(" Uploading: {}", dc.uploads_in_progress);
- println!(" Queued: {}", dc.uploads_queued);
- if dc.uploads_in_progress > 0 || dc.uploads_queued > 0 {
- println!("\n Dirty files exist — only synced files are safe to clean.");
+ for (i, share) in config.shares.iter().enumerate() {
+ let port = config.rc_port(i);
+ print!(" [{}] ", share.name);
+ match rc::vfs_stats(port) {
+ Ok(vfs) => {
+ if let Some(dc) = vfs.disk_cache {
+ println!(
+ "Used: {}, Uploading: {}, Queued: {}",
+ format_bytes(dc.bytes_used),
+ dc.uploads_in_progress,
+ dc.uploads_queued
+ );
+ } else {
+ println!("no cache stats");
}
}
+ Err(e) => println!("unreachable — {}", e),
}
- Err(e) => eprintln!(" Could not fetch cache stats: {}", e),
}
println!("\nRun with --all to clear the directory cache.");
}
diff --git a/src/cli/speed_test.rs b/src/cli/speed_test.rs
index d70099f..5edcaf8 100644
--- a/src/cli/speed_test.rs
+++ b/src/cli/speed_test.rs
@@ -13,9 +13,10 @@ const TEST_SIZE: usize = 10 * 1024 * 1024; // 10 MiB
pub fn run(config: &Config) -> Result<()> {
let tmp_local = std::env::temp_dir().join("warpgate-speedtest");
+ // Use the first share's remote_path for the speed test
let remote_path = format!(
"nas:{}/.warpgate-speedtest",
- config.connection.remote_path
+ config.shares[0].remote_path
);
// Create a 10 MiB test file
diff --git a/src/cli/status.rs b/src/cli/status.rs
index 5c983e8..e53432c 100644
--- a/src/cli/status.rs
+++ b/src/cli/status.rs
@@ -6,49 +6,82 @@ use crate::config::Config;
use crate::rclone::{mount, rc};
pub fn run(config: &Config) -> Result<()> {
- // Check mount status
- let mounted = match mount::is_mounted(config) {
- Ok(m) => m,
- Err(e) => {
- eprintln!("Warning: could not check mount status: {}", e);
- false
- }
- };
+ // Check mount status for each share
+ let mut any_mounted = false;
+ for share in &config.shares {
+ let mounted = match mount::is_mounted(&share.mount_point) {
+ Ok(m) => m,
+ Err(e) => {
+ eprintln!("Warning: could not check mount for '{}': {}", share.name, e);
+ false
+ }
+ };
- if mounted {
- println!("Mount: UP ({})", config.mount.point.display());
- } else {
- println!("Mount: DOWN");
- println!("\nrclone VFS mount is not active.");
- println!("Start with: systemctl start warpgate-mount");
+ let ro_tag = if share.read_only { " (ro)" } else { "" };
+ if mounted {
+ println!(
+ "Mount: UP {} → {}{}",
+ share.mount_point.display(),
+ share.name,
+ ro_tag
+ );
+ any_mounted = true;
+ } else {
+ println!("Mount: DOWN {}{}", share.name, ro_tag);
+ }
+ }
+
+ if !any_mounted {
+ println!("\nNo rclone VFS mounts are active.");
+ println!("Start with: systemctl start warpgate");
return Ok(());
}
- // Transfer stats from rclone RC API
- match rc::core_stats() {
- Ok(stats) => {
- println!("Speed: {}/s", format_bytes(stats.speed as u64));
- println!("Moved: {}", format_bytes(stats.bytes));
- println!("Active: {} transfers", stats.transfers);
- println!("Errors: {}", stats.errors);
+ // Aggregate stats from all share RC ports
+ let mut total_bytes = 0u64;
+ let mut total_speed = 0.0f64;
+ let mut total_transfers = 0u64;
+ let mut total_errors = 0u64;
+ let mut total_cache_used = 0u64;
+ let mut total_uploading = 0u64;
+ let mut total_queued = 0u64;
+ let mut total_errored = 0u64;
+ let mut rc_reachable = false;
+
+ for (i, _share) in config.shares.iter().enumerate() {
+ let port = config.rc_port(i);
+ if let Ok(stats) = rc::core_stats(port) {
+ rc_reachable = true;
+ total_bytes += stats.bytes;
+ total_speed += stats.speed;
+ total_transfers += stats.transfers;
+ total_errors += stats.errors;
}
- Err(e) => {
- eprintln!("Could not reach rclone RC API: {}", e);
+ if let Ok(vfs) = rc::vfs_stats(port) {
+ if let Some(dc) = vfs.disk_cache {
+ total_cache_used += dc.bytes_used;
+ total_uploading += dc.uploads_in_progress;
+ total_queued += dc.uploads_queued;
+ total_errored += dc.errored_files;
+ }
}
}
- // VFS cache stats (RC connection error already reported above)
- if let Ok(vfs) = rc::vfs_stats() {
- if let Some(dc) = vfs.disk_cache {
- println!("Cache: {}", format_bytes(dc.bytes_used));
- println!(
- "Dirty: {} uploading, {} queued",
- dc.uploads_in_progress, dc.uploads_queued
- );
- if dc.errored_files > 0 {
- println!("Errored: {} files", dc.errored_files);
- }
+ if rc_reachable {
+ println!("Speed: {}/s", format_bytes(total_speed as u64));
+ println!("Moved: {}", format_bytes(total_bytes));
+ println!("Active: {} transfers", total_transfers);
+ println!("Errors: {}", total_errors);
+ println!("Cache: {}", format_bytes(total_cache_used));
+ println!(
+ "Dirty: {} uploading, {} queued",
+ total_uploading, total_queued
+ );
+ if total_errored > 0 {
+ println!("Errored: {} files", total_errored);
}
+ } else {
+ eprintln!("Could not reach any rclone RC API.");
}
Ok(())
diff --git a/src/cli/warmup.rs b/src/cli/warmup.rs
index b4f3d7b..83f25ee 100644
--- a/src/cli/warmup.rs
+++ b/src/cli/warmup.rs
@@ -1,8 +1,7 @@
//! `warpgate warmup` — pre-cache a remote directory to local SSD.
//!
//! Lists files via `rclone lsf`, then reads each through the FUSE mount
-//! to trigger VFS caching. This ensures files land in the rclone VFS
-//! SSD cache rather than being downloaded to a throwaway temp directory.
+//! to trigger VFS caching.
use std::io;
use std::process::Command;
@@ -12,9 +11,13 @@ use anyhow::{Context, Result};
use crate::config::Config;
use crate::rclone::config as rclone_config;
-pub fn run(config: &Config, path: &str, newer_than: Option<&str>) -> Result<()> {
- let warmup_path = config.mount.point.join(path);
- let remote_src = format!("nas:{}/{}", config.connection.remote_path, path);
+pub fn run(config: &Config, share_name: &str, path: &str, newer_than: Option<&str>) -> Result<()> {
+ let share = config
+ .find_share(share_name)
+ .with_context(|| format!("Share '{}' not found in config", share_name))?;
+
+ let warmup_path = share.mount_point.join(path);
+ let remote_src = format!("nas:{}/{}", share.remote_path, path);
println!("Warming up: {remote_src}");
println!(" via mount: {}", warmup_path.display());
@@ -63,7 +66,7 @@ pub fn run(config: &Config, path: &str, newer_than: Option<&str>) -> Result<()>
let mut errors = 0usize;
for file in &files {
- if is_cached(config, path, file) {
+ if is_cached(config, &share.remote_path, path, file) {
skipped += 1;
continue;
}
@@ -71,7 +74,6 @@ pub fn run(config: &Config, path: &str, newer_than: Option<&str>) -> Result<()>
let full_path = warmup_path.join(file);
match std::fs::File::open(&full_path) {
Ok(mut f) => {
- // Stream-read through FUSE mount → populates VFS cache
if let Err(e) = io::copy(&mut f, &mut io::sink()) {
eprintln!(" Warning: read failed: {file}: {e}");
errors += 1;
@@ -95,16 +97,13 @@ pub fn run(config: &Config, path: &str, newer_than: Option<&str>) -> Result<()>
}
/// Check if a file is already in the rclone VFS cache.
-///
-/// `warmup_path` is the subdir passed to `warpgate warmup` (e.g. "Image/2026").
-/// `relative_path` is the filename from `rclone lsf` (relative to warmup_path).
-fn is_cached(config: &Config, warmup_path: &str, relative_path: &str) -> bool {
+fn is_cached(config: &Config, remote_path: &str, warmup_path: &str, relative_path: &str) -> bool {
let cache_path = config
.cache
.dir
.join("vfs")
.join("nas")
- .join(config.connection.remote_path.trim_start_matches('/'))
+ .join(remote_path.trim_start_matches('/'))
.join(warmup_path)
.join(relative_path);
cache_path.exists()
@@ -120,7 +119,6 @@ mod tests {
[connection]
nas_host = "10.0.0.1"
nas_user = "admin"
-remote_path = "/photos"
[cache]
dir = "/tmp/warpgate-test-cache"
@@ -130,7 +128,11 @@ dir = "/tmp/warpgate-test-cache"
[writeback]
[directory_cache]
[protocols]
-[mount]
+
+[[shares]]
+name = "photos"
+remote_path = "/photos"
+mount_point = "/mnt/photos"
"#,
)
.unwrap()
@@ -139,35 +141,31 @@ dir = "/tmp/warpgate-test-cache"
#[test]
fn test_is_cached_nonexistent_file() {
let config = test_config();
- // File doesn't exist on disk, so should return false
- assert!(!is_cached(&config, "2024", "IMG_001.jpg"));
+ assert!(!is_cached(&config, "/photos", "2024", "IMG_001.jpg"));
}
#[test]
fn test_is_cached_deep_path() {
let config = test_config();
- assert!(!is_cached(&config, "Images/2024/January", "photo.cr3"));
+ assert!(!is_cached(&config, "/photos", "Images/2024/January", "photo.cr3"));
}
#[test]
fn test_is_cached_path_construction() {
- // Verify the path is constructed correctly by checking the expected
- // cache path: cache_dir/vfs/nas///
let config = test_config();
let expected = std::path::PathBuf::from("/tmp/warpgate-test-cache")
.join("vfs")
.join("nas")
- .join("photos") // "/photos" trimmed of leading /
+ .join("photos")
.join("2024")
.join("IMG_001.jpg");
- // Reconstruct the same logic as is_cached
let cache_path = config
.cache
.dir
.join("vfs")
.join("nas")
- .join(config.connection.remote_path.trim_start_matches('/'))
+ .join("photos")
.join("2024")
.join("IMG_001.jpg");
@@ -176,21 +174,19 @@ dir = "/tmp/warpgate-test-cache"
#[test]
fn test_is_cached_remote_path_trimming() {
- let mut config = test_config();
- config.connection.remote_path = "/volume1/photos".into();
+ let config = test_config();
+ let remote_path = "/volume1/photos";
let cache_path = config
.cache
.dir
.join("vfs")
.join("nas")
- .join(config.connection.remote_path.trim_start_matches('/'))
+ .join(remote_path.trim_start_matches('/'))
.join("2024")
.join("file.jpg");
- // The leading "/" is stripped, so "nas" is followed by "volume1" (not "/volume1")
assert!(cache_path.to_string_lossy().contains("nas/volume1/photos"));
- // No double slash from unstripped leading /
assert!(!cache_path.to_string_lossy().contains("nas//volume1"));
}
}
diff --git a/src/config.rs b/src/config.rs
index b182ca4..9a0d405 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -11,6 +11,9 @@ use serde::{Deserialize, Serialize};
/// Default config file path.
pub const DEFAULT_CONFIG_PATH: &str = "/etc/warpgate/config.toml";
+/// Base RC API port. Each share gets `RC_BASE_PORT + share_index`.
+pub const RC_BASE_PORT: u16 = 5572;
+
/// Top-level configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
@@ -21,9 +24,11 @@ pub struct Config {
pub writeback: WritebackConfig,
pub directory_cache: DirectoryCacheConfig,
pub protocols: ProtocolsConfig,
- pub mount: MountConfig,
#[serde(default)]
pub warmup: WarmupConfig,
+ #[serde(default)]
+ pub smb_auth: SmbAuthConfig,
+ pub shares: Vec,
}
/// SFTP connection to remote NAS.
@@ -39,8 +44,6 @@ pub struct ConnectionConfig {
/// Path to SSH private key.
#[serde(default)]
pub nas_key_file: Option,
- /// Target path on NAS.
- pub remote_path: String,
/// SFTP port.
#[serde(default = "default_sftp_port")]
pub sftp_port: u16,
@@ -135,14 +138,6 @@ pub struct ProtocolsConfig {
pub webdav_port: u16,
}
-/// FUSE mount point configuration.
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct MountConfig {
- /// FUSE mount point path.
- #[serde(default = "default_mount_point")]
- pub point: PathBuf,
-}
-
/// Warmup configuration — auto-cache paths on startup.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WarmupConfig {
@@ -166,12 +161,45 @@ impl Default for WarmupConfig {
/// A single warmup rule specifying a path to pre-cache.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WarmupRule {
- /// Path relative to remote_path.
+ /// Name of the share this rule applies to.
+ pub share: String,
+ /// Path relative to the share's remote_path.
pub path: String,
/// Only cache files newer than this (e.g. "7d", "24h").
pub newer_than: Option,
}
+/// Optional SMB user authentication (instead of guest access).
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct SmbAuthConfig {
+ /// Enable SMB user authentication.
+ #[serde(default)]
+ pub enabled: bool,
+ /// SMB username (defaults to connection.nas_user if unset).
+ #[serde(default)]
+ pub username: Option,
+ /// Dedicated SMB password (takes precedence over reuse_nas_pass).
+ #[serde(default)]
+ pub smb_pass: Option,
+ /// Reuse connection.nas_pass as the SMB password.
+ #[serde(default)]
+ pub reuse_nas_pass: bool,
+}
+
+/// A single share exported as SMB/NFS, each with its own rclone mount.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ShareConfig {
+ /// SMB/NFS share name.
+ pub name: String,
+ /// Absolute path on the remote NAS (e.g. "/volume1/photos").
+ pub remote_path: String,
+ /// Local FUSE mount point (e.g. "/mnt/photos").
+ pub mount_point: PathBuf,
+ /// Export as read-only.
+ #[serde(default)]
+ pub read_only: bool,
+}
+
// --- Default value functions ---
fn default_sftp_port() -> u16 {
@@ -222,10 +250,6 @@ fn default_nfs_network() -> String {
fn default_webdav_port() -> u16 {
8080
}
-fn default_mount_point() -> PathBuf {
- PathBuf::from("/mnt/nas-photos")
-}
-
impl Config {
/// Load config from a TOML file.
pub fn load(path: &Path) -> Result {
@@ -233,6 +257,7 @@ impl Config {
.with_context(|| format!("Failed to read config file: {}", path.display()))?;
let config: Config =
toml::from_str(&content).with_context(|| "Failed to parse config TOML")?;
+ config.validate()?;
Ok(config)
}
@@ -241,6 +266,110 @@ impl Config {
include_str!("../templates/config.toml.default")
.to_string()
}
+
+ /// Find a share by name.
+ pub fn find_share(&self, name: &str) -> Option<&ShareConfig> {
+ self.shares.iter().find(|s| s.name == name)
+ }
+
+ /// Return the RC API port for a given share index.
+ pub fn rc_port(&self, share_index: usize) -> u16 {
+ RC_BASE_PORT + share_index as u16
+ }
+
+ /// Effective SMB username. Falls back to `connection.nas_user`.
+ pub fn smb_username(&self) -> &str {
+ self.smb_auth
+ .username
+ .as_deref()
+ .unwrap_or(&self.connection.nas_user)
+ }
+
+ /// Resolve the SMB password. Returns `None` when auth is disabled.
+ /// Returns an error if auth is enabled but no password can be resolved.
+ pub fn smb_password(&self) -> Result