Fix warmup to use VFS cache, dynamic SMB share name, smbd long flags

- warmup: read files through FUSE mount instead of rclone copy to temp
  dir. Files now actually land in rclone VFS SSD cache.
- samba: derive share name from mount point dir name instead of
  hardcoded [nas-photos] (e.g. /mnt/projects → [projects])
- supervisor: use smbd long flags (--foreground, --debug-stdout,
  --no-process-group, --configfile) for compatibility with Samba 4.19

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
grabbit 2026-02-18 00:38:42 +08:00
parent 5d8bf52ae9
commit 9b37c88cd5
3 changed files with 73 additions and 24 deletions

View File

@ -1,5 +1,10 @@
//! `warpgate warmup` — pre-cache a remote directory to local SSD. //! `warpgate warmup` — pre-cache a remote directory to local SSD.
//!
//! Lists files via `rclone lsf`, then reads each through the FUSE mount
//! to trigger VFS caching. This ensures files land in the rclone VFS
//! SSD cache rather than being downloaded to a throwaway temp directory.
use std::io;
use std::process::Command; use std::process::Command;
use anyhow::{Context, Result}; use anyhow::{Context, Result};
@ -8,38 +13,75 @@ use crate::config::Config;
use crate::rclone::config as rclone_config; use crate::rclone::config as rclone_config;
pub fn run(config: &Config, path: &str, newer_than: Option<&str>) -> Result<()> { pub fn run(config: &Config, path: &str, newer_than: Option<&str>) -> Result<()> {
let warmup_path = config.mount.point.join(path);
let remote_src = format!("nas:{}/{}", config.connection.remote_path, path); let remote_src = format!("nas:{}/{}", config.connection.remote_path, path);
let local_dest = std::env::temp_dir().join("warpgate-warmup");
println!("Warming up: {}", remote_src); println!("Warming up: {remote_src}");
println!(" via mount: {}", warmup_path.display());
// Create temp destination for downloaded files if !warmup_path.exists() {
std::fs::create_dir_all(&local_dest) anyhow::bail!(
.context("Failed to create temp directory for warmup")?; "Path not found on mount: {}. Is the mount running?",
warmup_path.display()
);
}
// List files on remote (supports --max-age for newer_than filter)
let mut cmd = Command::new("rclone"); let mut cmd = Command::new("rclone");
cmd.arg("copy") cmd.arg("lsf")
.arg("--config") .arg("--config")
.arg(rclone_config::RCLONE_CONF_PATH) .arg(rclone_config::RCLONE_CONF_PATH)
.arg(&remote_src) .arg("--recursive")
.arg(&local_dest) .arg("--files-only")
.arg("--no-traverse") .arg(&remote_src);
.arg("--progress");
if let Some(age) = newer_than { if let Some(age) = newer_than {
cmd.arg("--max-age").arg(age); cmd.arg("--max-age").arg(age);
} }
println!("Downloading from remote NAS..."); let output = cmd.output().context("Failed to run rclone lsf")?;
let status = cmd.status().context("Failed to run rclone copy")?; if !output.status.success() {
anyhow::bail!(
// Clean up temp directory "rclone lsf failed: {}",
let _ = std::fs::remove_dir_all(&local_dest); String::from_utf8_lossy(&output.stderr).trim()
);
if status.success() {
println!("Warmup complete.");
Ok(())
} else {
anyhow::bail!("rclone copy exited with status {}", status);
} }
let file_list = String::from_utf8_lossy(&output.stdout);
let files: Vec<&str> = file_list.lines().filter(|l| !l.is_empty()).collect();
let total = files.len();
if total == 0 {
println!("No files matched.");
return Ok(());
}
println!("Found {total} files to cache.");
let mut cached = 0usize;
let mut errors = 0usize;
for file in &files {
let full_path = warmup_path.join(file);
match std::fs::File::open(&full_path) {
Ok(mut f) => {
// Stream-read through FUSE mount → populates VFS cache
if let Err(e) = io::copy(&mut f, &mut io::sink()) {
eprintln!(" Warning: read failed: {file}: {e}");
errors += 1;
} else {
cached += 1;
eprint!("\r Cached {cached}/{total}");
}
}
Err(e) => {
eprintln!(" Warning: open failed: {file}: {e}");
errors += 1;
}
}
}
eprintln!();
println!("Warmup complete: {cached} cached, {errors} errors.");
Ok(())
} }

View File

@ -41,8 +41,14 @@ pub fn generate(config: &Config) -> Result<String> {
writeln!(conf, " disable spoolss = yes")?; writeln!(conf, " disable spoolss = yes")?;
writeln!(conf)?; writeln!(conf)?;
// [nas-photos] share section // Share name derived from mount point directory name
writeln!(conf, "[nas-photos]")?; let share_name = config
.mount
.point
.file_name()
.map(|n| n.to_string_lossy())
.unwrap_or("warpgate".into());
writeln!(conf, "[{share_name}]")?;
writeln!(conf, " comment = Warpgate cached NAS share")?; writeln!(conf, " comment = Warpgate cached NAS share")?;
writeln!(conf, " path = {mount_point}")?; writeln!(conf, " path = {mount_point}")?;
writeln!(conf, " browseable = yes")?; writeln!(conf, " browseable = yes")?;

View File

@ -204,7 +204,8 @@ fn start_and_wait_mount(config: &Config, shutdown: &AtomicBool) -> Result<Child>
/// Spawn smbd as a foreground child process. /// Spawn smbd as a foreground child process.
fn spawn_smbd() -> Result<Child> { fn spawn_smbd() -> Result<Child> {
Command::new("smbd") Command::new("smbd")
.args(["-F", "-S", "-N", "-s", samba::SMB_CONF_PATH]) .args(["--foreground", "--debug-stdout", "--no-process-group",
"--configfile", samba::SMB_CONF_PATH])
.spawn() .spawn()
.context("Failed to spawn smbd") .context("Failed to spawn smbd")
} }