Re-trigger warmup on config reload and add per-share warmup status tracking

Warmup config changes via the web UI now actually run warmup without requiring
a daemon restart. Adds generation-based warmup tracking with progress reporting
across CLI status, JSON API, SSE live updates, and web UI badges/detail panels.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
grabbit 2026-02-18 19:13:04 +08:00
parent 6bb7ec4d27
commit 2432f83914
11 changed files with 641 additions and 50 deletions

View File

@ -28,6 +28,12 @@ struct ApiShare {
speed: f64, speed: f64,
transfers: u64, transfers: u64,
errors: u64, errors: u64,
#[serde(default)]
warmup_state: Option<String>,
#[serde(default)]
warmup_done: Option<usize>,
#[serde(default)]
warmup_total: Option<usize>,
} }
pub fn run(config: &Config) -> Result<()> { pub fn run(config: &Config) -> Result<()> {
@ -52,10 +58,23 @@ fn print_api_status(api: &ApiStatus) -> Result<()> {
let mut any_active = false; let mut any_active = false;
for share in &api.shares { for share in &api.shares {
// Build warmup suffix
let warmup_suffix = match share.warmup_state.as_deref() {
Some("running") => {
let done = share.warmup_done.unwrap_or(0);
let total = share.warmup_total.unwrap_or(0);
format!("\tWarmup [{done}/{total}]")
}
Some("pending") => "\tWarmup...".to_string(),
Some("complete") => "\tWarmup done".to_string(),
Some("failed") => "\tWarmup FAILED".to_string(),
_ => String::new(),
};
match share.health.as_str() { match share.health.as_str() {
"OK" => { "OK" => {
if share.mounted { if share.mounted {
println!("Mount: OK {}", share.name); println!("Mount: OK {}{}", share.name, warmup_suffix);
any_active = true; any_active = true;
} else { } else {
println!("Mount: DOWN {} — mount lost", share.name); println!("Mount: DOWN {} — mount lost", share.name);

View File

@ -5,10 +5,13 @@
use std::io; use std::io;
use std::process::Command; use std::process::Command;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use crate::config::Config; use crate::config::Config;
use crate::daemon::{DaemonStatus, WarmupRuleState};
use crate::rclone::config as rclone_config; use crate::rclone::config as rclone_config;
pub fn run(config: &Config, share_name: &str, path: &str, newer_than: Option<&str>) -> Result<()> { pub fn run(config: &Config, share_name: &str, path: &str, newer_than: Option<&str>) -> Result<()> {
@ -96,6 +99,165 @@ pub fn run(config: &Config, share_name: &str, path: &str, newer_than: Option<&st
Ok(()) Ok(())
} }
/// Like `run()` but reports progress into `shared_status.warmup[rule_index]`.
///
/// Checks `shutdown` and `generation` before each file to allow early exit
/// when the daemon is stopping or a new warmup generation supersedes this one.
pub fn run_tracked(
config: &Config,
share_name: &str,
path: &str,
newer_than: Option<&str>,
shared_status: &Arc<RwLock<DaemonStatus>>,
rule_index: usize,
generation: u64,
shutdown: &AtomicBool,
) -> Result<()> {
let share = config
.find_share(share_name)
.with_context(|| format!("Share '{}' not found in config", share_name))?;
let warmup_path = share.mount_point.join(path);
let remote_src = format!("{}:{}/{}", share.connection, share.remote_path, path);
// Mark as Listing
{
let mut status = shared_status.write().unwrap();
if status.warmup_generation != generation {
return Ok(());
}
if let Some(rs) = status.warmup.get_mut(rule_index) {
rs.state = WarmupRuleState::Listing;
}
}
if !warmup_path.exists() {
let msg = format!("Path not found on mount: {}", warmup_path.display());
let mut status = shared_status.write().unwrap();
if let Some(rs) = status.warmup.get_mut(rule_index) {
rs.state = WarmupRuleState::Failed(msg.clone());
}
anyhow::bail!("{msg}");
}
// List files on remote
let mut cmd = Command::new("rclone");
cmd.arg("lsf")
.arg("--config")
.arg(rclone_config::RCLONE_CONF_PATH)
.arg("--recursive")
.arg("--files-only")
.arg(&remote_src);
if let Some(age) = newer_than {
cmd.arg("--max-age").arg(age);
}
let output = match cmd.output() {
Ok(o) => o,
Err(e) => {
let msg = format!("Failed to run rclone lsf: {e}");
let mut status = shared_status.write().unwrap();
if let Some(rs) = status.warmup.get_mut(rule_index) {
rs.state = WarmupRuleState::Failed(msg.clone());
}
anyhow::bail!("{msg}");
}
};
if !output.status.success() {
let msg = format!(
"rclone lsf failed: {}",
String::from_utf8_lossy(&output.stderr).trim()
);
let mut status = shared_status.write().unwrap();
if let Some(rs) = status.warmup.get_mut(rule_index) {
rs.state = WarmupRuleState::Failed(msg.clone());
}
anyhow::bail!("{msg}");
}
let file_list = String::from_utf8_lossy(&output.stdout);
let files: Vec<&str> = file_list.lines().filter(|l| !l.is_empty()).collect();
let total = files.len();
// Update total and transition to Caching
{
let mut status = shared_status.write().unwrap();
if status.warmup_generation != generation {
return Ok(());
}
if let Some(rs) = status.warmup.get_mut(rule_index) {
rs.total_files = total;
rs.state = if total == 0 {
WarmupRuleState::Complete
} else {
WarmupRuleState::Caching
};
}
}
if total == 0 {
return Ok(());
}
for file in &files {
// Check shutdown / generation before each file
if shutdown.load(Ordering::SeqCst) {
return Ok(());
}
{
let status = shared_status.read().unwrap();
if status.warmup_generation != generation {
return Ok(());
}
}
if is_cached(config, &share.connection, &share.remote_path, path, file) {
let mut status = shared_status.write().unwrap();
if let Some(rs) = status.warmup.get_mut(rule_index) {
rs.skipped += 1;
}
continue;
}
let full_path = warmup_path.join(file);
match std::fs::File::open(&full_path) {
Ok(mut f) => {
if let Err(_e) = io::copy(&mut f, &mut io::sink()) {
let mut status = shared_status.write().unwrap();
if let Some(rs) = status.warmup.get_mut(rule_index) {
rs.errors += 1;
}
} else {
let mut status = shared_status.write().unwrap();
if let Some(rs) = status.warmup.get_mut(rule_index) {
rs.cached += 1;
}
}
}
Err(_e) => {
let mut status = shared_status.write().unwrap();
if let Some(rs) = status.warmup.get_mut(rule_index) {
rs.errors += 1;
}
}
}
}
// Mark complete
{
let mut status = shared_status.write().unwrap();
if status.warmup_generation == generation {
if let Some(rs) = status.warmup.get_mut(rule_index) {
rs.state = WarmupRuleState::Complete;
}
}
}
Ok(())
}
/// Check if a file is already in the rclone VFS cache. /// Check if a file is already in the rclone VFS cache.
fn is_cached(config: &Config, connection: &str, remote_path: &str, warmup_path: &str, relative_path: &str) -> bool { fn is_cached(config: &Config, connection: &str, remote_path: &str, warmup_path: &str, relative_path: &str) -> bool {
let cache_path = config let cache_path = config

View File

@ -28,6 +28,8 @@ pub struct ConfigDiff {
pub connections_modified: Vec<String>, pub connections_modified: Vec<String>,
/// Tier D: global settings changed (cache, read, writeback, directory_cache). /// Tier D: global settings changed (cache, read, writeback, directory_cache).
pub global_changed: bool, pub global_changed: bool,
/// Warmup settings changed (no restart needed, just update in-memory config).
pub warmup_changed: bool,
} }
impl ConfigDiff { impl ConfigDiff {
@ -42,6 +44,7 @@ impl ConfigDiff {
&& self.connections_removed.is_empty() && self.connections_removed.is_empty()
&& self.connections_modified.is_empty() && self.connections_modified.is_empty()
&& !self.global_changed && !self.global_changed
&& !self.warmup_changed
} }
/// Returns the highest tier of change detected. /// Returns the highest tier of change detected.
@ -98,6 +101,9 @@ impl ConfigDiff {
if self.bandwidth_changed { if self.bandwidth_changed {
parts.push("bandwidth limits changed".to_string()); parts.push("bandwidth limits changed".to_string());
} }
if self.warmup_changed {
parts.push("warmup settings changed".to_string());
}
if parts.is_empty() { if parts.is_empty() {
"no changes detected".to_string() "no changes detected".to_string()
} else { } else {
@ -234,6 +240,16 @@ pub fn diff(old: &Config, new: &Config) -> ConfigDiff {
d.shares_modified = modified_set.into_iter().collect(); d.shares_modified = modified_set.into_iter().collect();
// Warmup changes (no restart needed)
d.warmup_changed = old.warmup.auto != new.warmup.auto
|| old.warmup.rules.len() != new.warmup.rules.len()
|| old
.warmup
.rules
.iter()
.zip(new.warmup.rules.iter())
.any(|(o, n)| o.share != n.share || o.path != n.path || o.newer_than != n.newer_than);
d d
} }
@ -481,6 +497,25 @@ mount_point = "/mnt/photos"
assert!(summary.contains("bandwidth")); assert!(summary.contains("bandwidth"));
} }
#[test]
fn test_warmup_change() {
let old = minimal_config();
let mut new = old.clone();
new.warmup.auto = true;
new.warmup.rules.push(crate::config::WarmupRule {
share: "photos".to_string(),
path: "/2024".to_string(),
newer_than: Some("7d".to_string()),
});
let d = diff(&old, &new);
assert!(d.warmup_changed);
assert!(!d.global_changed);
// Warmup-only changes need no restart
assert_eq!(d.highest_tier(), ChangeTier::None);
assert!(!d.is_empty());
assert!(d.summary().contains("warmup"));
}
#[test] #[test]
fn test_tier_ordering() { fn test_tier_ordering() {
assert!(ChangeTier::None < ChangeTier::Live); assert!(ChangeTier::None < ChangeTier::Live);

View File

@ -102,6 +102,11 @@ pub struct DaemonStatus {
pub webdav_running: bool, pub webdav_running: bool,
/// Whether NFS exports are active. /// Whether NFS exports are active.
pub nfs_exported: bool, pub nfs_exported: bool,
/// Per-rule warmup status (populated when warmup is triggered).
pub warmup: Vec<WarmupRuleStatus>,
/// Generation counter — incremented each time warmup is (re)started.
/// Workers check this to detect when they've been superseded.
pub warmup_generation: u64,
} }
impl DaemonStatus { impl DaemonStatus {
@ -127,9 +132,52 @@ impl DaemonStatus {
smbd_running: false, smbd_running: false,
webdav_running: false, webdav_running: false,
nfs_exported: false, nfs_exported: false,
warmup: Vec::new(),
warmup_generation: 0,
} }
} }
/// Aggregate warmup summary for a share.
///
/// Returns `(state_label, done_count, total_count)` where state_label is
/// one of "none", "pending", "running", "complete", "failed".
pub fn warmup_summary_for(&self, share_name: &str) -> (&str, usize, usize) {
let rules: Vec<&WarmupRuleStatus> = self
.warmup
.iter()
.filter(|r| r.share == share_name)
.collect();
if rules.is_empty() {
return ("none", 0, 0);
}
let total: usize = rules.iter().map(|r| r.total_files).sum();
let done: usize = rules.iter().map(|r| r.cached + r.skipped).sum();
let any_failed = rules
.iter()
.any(|r| matches!(r.state, WarmupRuleState::Failed(_)));
let any_running = rules
.iter()
.any(|r| matches!(r.state, WarmupRuleState::Listing | WarmupRuleState::Caching));
let all_complete = rules
.iter()
.all(|r| matches!(r.state, WarmupRuleState::Complete));
let label = if any_failed {
"failed"
} else if any_running {
"running"
} else if all_complete {
"complete"
} else {
"pending"
};
(label, done, total)
}
/// Format uptime as a human-readable string. /// Format uptime as a human-readable string.
pub fn uptime_string(&self) -> String { pub fn uptime_string(&self) -> String {
let secs = self.started_at.elapsed().as_secs(); let secs = self.started_at.elapsed().as_secs();
@ -243,6 +291,29 @@ pub enum ShareHealth {
Failed(String), Failed(String),
} }
/// Per-rule warmup progress, updated by the warmup worker thread.
#[derive(Clone, Debug, serde::Serialize)]
pub struct WarmupRuleStatus {
pub share: String,
pub path: String,
pub newer_than: Option<String>,
pub state: WarmupRuleState,
pub total_files: usize,
pub skipped: usize,
pub cached: usize,
pub errors: usize,
}
/// State machine for a single warmup rule.
#[derive(Clone, Debug, PartialEq, serde::Serialize)]
pub enum WarmupRuleState {
Pending,
Listing,
Caching,
Complete,
Failed(String),
}
/// Commands sent from the web server (or CLI) to the supervisor. /// Commands sent from the web server (or CLI) to the supervisor.
pub enum SupervisorCmd { pub enum SupervisorCmd {
/// Apply a new configuration (triggers tiered reload). /// Apply a new configuration (triggers tiered reload).
@ -278,6 +349,8 @@ mod tests {
assert!(!status.smbd_running); assert!(!status.smbd_running);
assert!(!status.webdav_running); assert!(!status.webdav_running);
assert!(!status.nfs_exported); assert!(!status.nfs_exported);
assert!(status.warmup.is_empty());
assert_eq!(status.warmup_generation, 0);
} }
#[test] #[test]
@ -352,4 +425,85 @@ mod tests {
assert_eq!(share.health_message(), Some("remote path not found")); assert_eq!(share.health_message(), Some("remote path not found"));
assert!(!share.is_healthy()); assert!(!share.is_healthy());
} }
#[test]
fn test_warmup_summary_no_rules() {
let status = DaemonStatus::new(&["photos".to_string()]);
let (label, done, total) = status.warmup_summary_for("photos");
assert_eq!(label, "none");
assert_eq!(done, 0);
assert_eq!(total, 0);
}
#[test]
fn test_warmup_summary_pending() {
let mut status = DaemonStatus::new(&["photos".to_string()]);
status.warmup.push(WarmupRuleStatus {
share: "photos".into(),
path: "/2024".into(),
newer_than: None,
state: WarmupRuleState::Pending,
total_files: 0,
skipped: 0,
cached: 0,
errors: 0,
});
let (label, _, _) = status.warmup_summary_for("photos");
assert_eq!(label, "pending");
}
#[test]
fn test_warmup_summary_running() {
let mut status = DaemonStatus::new(&["photos".to_string()]);
status.warmup.push(WarmupRuleStatus {
share: "photos".into(),
path: "/2024".into(),
newer_than: None,
state: WarmupRuleState::Caching,
total_files: 100,
skipped: 10,
cached: 40,
errors: 0,
});
let (label, done, total) = status.warmup_summary_for("photos");
assert_eq!(label, "running");
assert_eq!(done, 50);
assert_eq!(total, 100);
}
#[test]
fn test_warmup_summary_complete() {
let mut status = DaemonStatus::new(&["photos".to_string()]);
status.warmup.push(WarmupRuleStatus {
share: "photos".into(),
path: "/2024".into(),
newer_than: None,
state: WarmupRuleState::Complete,
total_files: 100,
skipped: 30,
cached: 70,
errors: 0,
});
let (label, done, total) = status.warmup_summary_for("photos");
assert_eq!(label, "complete");
assert_eq!(done, 100);
assert_eq!(total, 100);
}
#[test]
fn test_warmup_summary_wrong_share() {
let mut status = DaemonStatus::new(&["photos".to_string()]);
status.warmup.push(WarmupRuleStatus {
share: "photos".into(),
path: "/2024".into(),
newer_than: None,
state: WarmupRuleState::Caching,
total_files: 50,
skipped: 0,
cached: 10,
errors: 0,
});
let (label, _, _) = status.warmup_summary_for("videos");
assert_eq!(label, "none");
}
} }

View File

@ -17,7 +17,7 @@ use anyhow::{Context, Result};
use crate::config::Config; use crate::config::Config;
use crate::config_diff::{self, ChangeTier}; use crate::config_diff::{self, ChangeTier};
use crate::daemon::{DaemonStatus, ShareHealth, SupervisorCmd}; use crate::daemon::{DaemonStatus, ShareHealth, SupervisorCmd, WarmupRuleState, WarmupRuleStatus};
use crate::rclone::mount::{build_mount_args, is_mounted}; use crate::rclone::mount::{build_mount_args, is_mounted};
use crate::rclone::rc; use crate::rclone::rc;
use crate::services::{nfs, samba, webdav}; use crate::services::{nfs, samba, webdav};
@ -196,28 +196,7 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
} }
// Phase 3.5: Auto-warmup in background thread (non-blocking) // Phase 3.5: Auto-warmup in background thread (non-blocking)
if !config.warmup.rules.is_empty() && config.warmup.auto { spawn_warmup(config, &shared_status, &shutdown);
let warmup_config = config.clone();
let warmup_shutdown = Arc::clone(&shutdown);
thread::spawn(move || {
println!("Auto-warmup started (background)...");
for rule in &warmup_config.warmup.rules {
if warmup_shutdown.load(Ordering::SeqCst) {
println!("Auto-warmup interrupted by shutdown.");
break;
}
if let Err(e) = crate::cli::warmup::run(
&warmup_config,
&rule.share,
&rule.path,
rule.newer_than.as_deref(),
) {
eprintln!("Warmup warning: {e}");
}
}
println!("Auto-warmup complete.");
});
}
// Phase 4: Supervision loop with command channel // Phase 4: Supervision loop with command channel
println!("Supervision active. Web UI at http://localhost:8090. Press Ctrl+C to stop."); println!("Supervision active. Web UI at http://localhost:8090. Press Ctrl+C to stop.");
@ -239,6 +218,78 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
result result
} }
/// Spawn a background warmup thread for all configured warmup rules.
///
/// Increments the warmup generation counter so any previous warmup thread
/// will detect the change and exit. Each rule is processed sequentially
/// with progress reported into `shared_status.warmup`.
fn spawn_warmup(
config: &Config,
shared_status: &Arc<RwLock<DaemonStatus>>,
shutdown: &Arc<AtomicBool>,
) {
if config.warmup.rules.is_empty() || !config.warmup.auto {
return;
}
// Pre-populate warmup status entries and bump generation
let generation = {
let mut status = shared_status.write().unwrap();
status.warmup_generation += 1;
status.warmup = config
.warmup
.rules
.iter()
.map(|rule| WarmupRuleStatus {
share: rule.share.clone(),
path: rule.path.clone(),
newer_than: rule.newer_than.clone(),
state: WarmupRuleState::Pending,
total_files: 0,
skipped: 0,
cached: 0,
errors: 0,
})
.collect();
status.warmup_generation
};
let warmup_config = config.clone();
let warmup_status = Arc::clone(shared_status);
let warmup_shutdown = Arc::clone(shutdown);
thread::spawn(move || {
println!("Auto-warmup started (background, generation {generation})...");
for (i, rule) in warmup_config.warmup.rules.iter().enumerate() {
if warmup_shutdown.load(Ordering::SeqCst) {
println!("Auto-warmup interrupted by shutdown.");
break;
}
// Check if our generation is still current
{
let status = warmup_status.read().unwrap();
if status.warmup_generation != generation {
println!("Auto-warmup superseded by newer generation.");
return;
}
}
if let Err(e) = crate::cli::warmup::run_tracked(
&warmup_config,
&rule.share,
&rule.path,
rule.newer_than.as_deref(),
&warmup_status,
i,
generation,
&warmup_shutdown,
) {
eprintln!("Warmup warning: {e}");
}
}
println!("Auto-warmup complete.");
});
}
/// Write rclone config and create directories (protocol configs generated after probe). /// Write rclone config and create directories (protocol configs generated after probe).
fn preflight(config: &Config) -> Result<()> { fn preflight(config: &Config) -> Result<()> {
// Ensure mount points exist for each share // Ensure mount points exist for each share
@ -535,6 +586,7 @@ fn supervise(
&mut smbd_tracker, &mut smbd_tracker,
&mut webdav_tracker, &mut webdav_tracker,
new_config, new_config,
&shutdown,
)?; )?;
println!("Config reload complete."); println!("Config reload complete.");
} }
@ -642,6 +694,9 @@ fn supervise(
} }
/// Poll RC API for each share and update the shared DaemonStatus. /// Poll RC API for each share and update the shared DaemonStatus.
///
/// Matches mounts to status entries by name (not index) so the mapping
/// stays correct after dynamic PerShare add/remove/modify reloads.
fn update_status( fn update_status(
shared_status: &Arc<RwLock<DaemonStatus>>, shared_status: &Arc<RwLock<DaemonStatus>>,
mounts: &[MountChild], mounts: &[MountChild],
@ -650,17 +705,17 @@ fn update_status(
) { ) {
let mut status = shared_status.write().unwrap(); let mut status = shared_status.write().unwrap();
// Update per-share stats from RC API // Update per-share stats from RC API — match by name, not index
for (i, mc) in mounts.iter().enumerate() { for mc in mounts.iter() {
if let Some(ss) = status.shares.get_mut(i) { let mount_point = config
ss.mounted = is_mounted( .shares
&config .iter()
.shares .find(|s| s.name == mc.name)
.get(i) .map(|s| s.mount_point.clone())
.map(|s| s.mount_point.clone()) .unwrap_or_default();
.unwrap_or_default(),
) if let Some(ss) = status.shares.iter_mut().find(|s| s.name == mc.name) {
.unwrap_or(false); ss.mounted = is_mounted(&mount_point).unwrap_or(false);
ss.rc_port = mc.rc_port; ss.rc_port = mc.rc_port;
// Fetch VFS stats (cache info, dirty files) // Fetch VFS stats (cache info, dirty files)
@ -706,6 +761,7 @@ fn handle_reload(
smbd_tracker: &mut RestartTracker, smbd_tracker: &mut RestartTracker,
webdav_tracker: &mut RestartTracker, webdav_tracker: &mut RestartTracker,
new_config: Config, new_config: Config,
shutdown: &Arc<AtomicBool>,
) -> Result<()> { ) -> Result<()> {
let old_config = shared_config.read().unwrap().clone(); let old_config = shared_config.read().unwrap().clone();
let diff = config_diff::diff(&old_config, &new_config); let diff = config_diff::diff(&old_config, &new_config);
@ -880,7 +936,14 @@ fn handle_reload(
errors: existing.map(|e| e.errors).unwrap_or(0), errors: existing.map(|e| e.errors).unwrap_or(0),
health: existing health: existing
.map(|e| e.health.clone()) .map(|e| e.health.clone())
.unwrap_or(ShareHealth::Pending), .unwrap_or_else(|| {
// New share: if mount succeeded, it's healthy
if mounts.iter().any(|mc| mc.name == s.name) {
ShareHealth::Healthy
} else {
ShareHealth::Pending
}
}),
} }
}) })
.collect(); .collect();
@ -890,6 +953,12 @@ fn handle_reload(
status.nfs_exported = new_config.protocols.enable_nfs; status.nfs_exported = new_config.protocols.enable_nfs;
} }
// Re-trigger warmup if settings changed
if diff.warmup_changed {
println!(" Warmup settings changed, re-triggering...");
spawn_warmup(&new_config, shared_status, shutdown);
}
Ok(()) Ok(())
} }

View File

@ -31,6 +31,19 @@ struct StatusResponse {
smbd_running: bool, smbd_running: bool,
webdav_running: bool, webdav_running: bool,
nfs_exported: bool, nfs_exported: bool,
warmup: Vec<WarmupRuleStatusResponse>,
}
#[derive(Serialize)]
struct WarmupRuleStatusResponse {
share: String,
path: String,
newer_than: Option<String>,
state: String,
total_files: usize,
skipped: usize,
cached: usize,
errors: usize,
} }
#[derive(Serialize)] #[derive(Serialize)]
@ -48,6 +61,9 @@ struct ShareStatusResponse {
errors: u64, errors: u64,
health: String, health: String,
health_message: Option<String>, health_message: Option<String>,
warmup_state: String,
warmup_done: usize,
warmup_total: usize,
} }
async fn get_status(State(state): State<SharedState>) -> Json<StatusResponse> { async fn get_status(State(state): State<SharedState>) -> Json<StatusResponse> {
@ -57,25 +73,55 @@ async fn get_status(State(state): State<SharedState>) -> Json<StatusResponse> {
shares: status shares: status
.shares .shares
.iter() .iter()
.map(|s| ShareStatusResponse { .map(|s| {
name: s.name.clone(), let (warmup_state, warmup_done, warmup_total) =
mounted: s.mounted, status.warmup_summary_for(&s.name);
rc_port: s.rc_port, ShareStatusResponse {
cache_bytes: s.cache_bytes, name: s.name.clone(),
cache_display: s.cache_display(), mounted: s.mounted,
dirty_count: s.dirty_count, rc_port: s.rc_port,
errored_files: s.errored_files, cache_bytes: s.cache_bytes,
speed: s.speed, cache_display: s.cache_display(),
speed_display: s.speed_display(), dirty_count: s.dirty_count,
transfers: s.transfers, errored_files: s.errored_files,
errors: s.errors, speed: s.speed,
health: s.health_label().to_string(), speed_display: s.speed_display(),
health_message: s.health_message().map(|m| m.to_string()), transfers: s.transfers,
errors: s.errors,
health: s.health_label().to_string(),
health_message: s.health_message().map(|m| m.to_string()),
warmup_state: warmup_state.to_string(),
warmup_done,
warmup_total,
}
}) })
.collect(), .collect(),
smbd_running: status.smbd_running, smbd_running: status.smbd_running,
webdav_running: status.webdav_running, webdav_running: status.webdav_running,
nfs_exported: status.nfs_exported, nfs_exported: status.nfs_exported,
warmup: status
.warmup
.iter()
.map(|r| {
let state_str = match &r.state {
crate::daemon::WarmupRuleState::Pending => "pending",
crate::daemon::WarmupRuleState::Listing => "listing",
crate::daemon::WarmupRuleState::Caching => "caching",
crate::daemon::WarmupRuleState::Complete => "complete",
crate::daemon::WarmupRuleState::Failed(_) => "failed",
};
WarmupRuleStatusResponse {
share: r.share.clone(),
path: r.path.clone(),
newer_than: r.newer_than.clone(),
state: state_str.to_string(),
total_files: r.total_files,
skipped: r.skipped,
cached: r.cached,
errors: r.errors,
}
})
.collect(),
}) })
} }
@ -90,6 +136,7 @@ async fn get_share_status(
.iter() .iter()
.find(|s| s.name == share_name) .find(|s| s.name == share_name)
.ok_or(StatusCode::NOT_FOUND)?; .ok_or(StatusCode::NOT_FOUND)?;
let (warmup_state, warmup_done, warmup_total) = status.warmup_summary_for(&share.name);
Ok(Json(ShareStatusResponse { Ok(Json(ShareStatusResponse {
name: share.name.clone(), name: share.name.clone(),
mounted: share.mounted, mounted: share.mounted,
@ -104,6 +151,9 @@ async fn get_share_status(
errors: share.errors, errors: share.errors,
health: share.health_label().to_string(), health: share.health_label().to_string(),
health_message: share.health_message().map(|m| m.to_string()), health_message: share.health_message().map(|m| m.to_string()),
warmup_state: warmup_state.to_string(),
warmup_done,
warmup_total,
})) }))
} }

View File

@ -46,6 +46,9 @@ struct ShareView {
read_only: bool, read_only: bool,
health: String, health: String,
health_message: String, health_message: String,
warmup_state: String,
warmup_done: usize,
warmup_total: usize,
} }
/// Extended share view for the shares table with all detail fields. /// Extended share view for the shares table with all detail fields.
@ -66,6 +69,23 @@ struct ShareDetailView {
errors: u64, errors: u64,
health: String, health: String,
health_message: String, health_message: String,
warmup_state: String,
warmup_done: usize,
warmup_total: usize,
warmup_rules: Vec<WarmupRuleView>,
}
/// View model for a single warmup rule in the shares detail panel.
#[allow(dead_code)]
struct WarmupRuleView {
path: String,
newer_than: String,
state: String,
badge_class: String,
total_files: usize,
skipped: usize,
cached: usize,
errors: usize,
} }
/// Build compact share views from status + config. /// Build compact share views from status + config.
@ -75,6 +95,8 @@ fn build_share_views(status: &DaemonStatus, config: &Config) -> Vec<ShareView> {
.iter() .iter()
.map(|s| { .map(|s| {
let sc = config.find_share(&s.name); let sc = config.find_share(&s.name);
let (warmup_state, warmup_done, warmup_total) =
status.warmup_summary_for(&s.name);
ShareView { ShareView {
name: s.name.clone(), name: s.name.clone(),
connection: sc.map(|c| c.connection.clone()).unwrap_or_default(), connection: sc.map(|c| c.connection.clone()).unwrap_or_default(),
@ -88,6 +110,9 @@ fn build_share_views(status: &DaemonStatus, config: &Config) -> Vec<ShareView> {
read_only: sc.map(|c| c.read_only).unwrap_or(false), read_only: sc.map(|c| c.read_only).unwrap_or(false),
health: s.health_label().to_string(), health: s.health_label().to_string(),
health_message: s.health_message().unwrap_or("").to_string(), health_message: s.health_message().unwrap_or("").to_string(),
warmup_state: warmup_state.to_string(),
warmup_done,
warmup_total,
} }
}) })
.collect() .collect()
@ -100,6 +125,35 @@ fn build_share_detail_views(status: &DaemonStatus, config: &Config) -> Vec<Share
.iter() .iter()
.map(|s| { .map(|s| {
let sc = config.find_share(&s.name); let sc = config.find_share(&s.name);
let (warmup_state, warmup_done, warmup_total) =
status.warmup_summary_for(&s.name);
// Build per-rule views for this share
let warmup_rules: Vec<WarmupRuleView> = status
.warmup
.iter()
.filter(|r| r.share == s.name)
.map(|r| {
let (state_str, badge_class) = match &r.state {
crate::daemon::WarmupRuleState::Pending => ("pending", "warn"),
crate::daemon::WarmupRuleState::Listing => ("listing", "warmup"),
crate::daemon::WarmupRuleState::Caching => ("caching", "warmup"),
crate::daemon::WarmupRuleState::Complete => ("complete", "ok"),
crate::daemon::WarmupRuleState::Failed(_) => ("failed", "error"),
};
WarmupRuleView {
path: r.path.clone(),
newer_than: r.newer_than.clone().unwrap_or_default(),
state: state_str.to_string(),
badge_class: badge_class.to_string(),
total_files: r.total_files,
skipped: r.skipped,
cached: r.cached,
errors: r.errors,
}
})
.collect();
ShareDetailView { ShareDetailView {
name: s.name.clone(), name: s.name.clone(),
connection: sc.map(|c| c.connection.clone()).unwrap_or_default(), connection: sc.map(|c| c.connection.clone()).unwrap_or_default(),
@ -118,6 +172,10 @@ fn build_share_detail_views(status: &DaemonStatus, config: &Config) -> Vec<Share
errors: s.errors, errors: s.errors,
health: s.health_label().to_string(), health: s.health_label().to_string(),
health_message: s.health_message().unwrap_or("").to_string(), health_message: s.health_message().unwrap_or("").to_string(),
warmup_state: warmup_state.to_string(),
warmup_done,
warmup_total,
warmup_rules,
} }
}) })
.collect() .collect()

View File

@ -58,6 +58,8 @@ fn render_sse_payload(
.iter() .iter()
.map(|s| { .map(|s| {
let share_config = config.find_share(&s.name); let share_config = config.find_share(&s.name);
let (warmup_state, warmup_done, warmup_total) =
status.warmup_summary_for(&s.name);
SseShareView { SseShareView {
name: s.name.clone(), name: s.name.clone(),
connection: share_config connection: share_config
@ -80,6 +82,9 @@ fn render_sse_payload(
health: s.health_label().to_string(), health: s.health_label().to_string(),
health_message: s.health_message().unwrap_or("").to_string(), health_message: s.health_message().unwrap_or("").to_string(),
rc_port: s.rc_port, rc_port: s.rc_port,
warmup_state: warmup_state.to_string(),
warmup_done,
warmup_total,
} }
}) })
.collect(); .collect();
@ -169,6 +174,9 @@ pub struct SseShareView {
pub health: String, pub health: String,
pub health_message: String, pub health_message: String,
pub rc_port: u16, pub rc_port: u16,
pub warmup_state: String,
pub warmup_done: usize,
pub warmup_total: usize,
} }
#[derive(Template)] #[derive(Template)]

View File

@ -156,6 +156,7 @@ a:hover { text-decoration: underline; }
.badge-error { background: rgba(248,113,113,0.15); color: var(--red); } .badge-error { background: rgba(248,113,113,0.15); color: var(--red); }
.badge-ro { background: rgba(251,191,36,0.15); color: var(--yellow); } .badge-ro { background: rgba(251,191,36,0.15); color: var(--yellow); }
.badge-warn { background: rgba(251,191,36,0.15); color: var(--yellow); } .badge-warn { background: rgba(251,191,36,0.15); color: var(--yellow); }
.badge-warmup { background: rgba(251,191,36,0.15); color: var(--yellow); }
/* ─── Stats row (inside share cards) ──────────────────── */ /* ─── Stats row (inside share cards) ──────────────────── */

View File

@ -19,6 +19,13 @@
{% if share.read_only %} {% if share.read_only %}
<span class="badge badge-ro">RO</span> <span class="badge badge-ro">RO</span>
{% endif %} {% endif %}
{% if share.warmup_state == "running" %}
<span class="badge badge-warmup">WARMUP {{ share.warmup_done }}/{{ share.warmup_total }}</span>
{% elif share.warmup_state == "pending" %}
<span class="badge badge-warmup">WARMUP...</span>
{% elif share.warmup_state == "complete" %}
<span class="badge badge-ok">WARMED</span>
{% endif %}
</div> </div>
</div> </div>
<div class="stats"> <div class="stats">

View File

@ -28,6 +28,13 @@
{% if share.read_only %} {% if share.read_only %}
<span class="badge badge-ro">RO</span> <span class="badge badge-ro">RO</span>
{% endif %} {% endif %}
{% if share.warmup_state == "running" %}
<span class="badge badge-warmup">WARMUP {{ share.warmup_done }}/{{ share.warmup_total }}</span>
{% elif share.warmup_state == "pending" %}
<span class="badge badge-warmup">WARMUP...</span>
{% elif share.warmup_state == "complete" %}
<span class="badge badge-ok">WARMED</span>
{% endif %}
</td> </td>
<td class="mono">{{ share.mount_point }}</td> <td class="mono">{{ share.mount_point }}</td>
<td>{{ share.cache_display }}</td> <td>{{ share.cache_display }}</td>
@ -70,6 +77,27 @@
<tr><td>Mounted</td><td>{% if share.mounted %}Yes{% else %}No{% endif %}</td></tr> <tr><td>Mounted</td><td>{% if share.mounted %}Yes{% else %}No{% endif %}</td></tr>
<tr><td>Read-Only</td><td>{% if share.read_only %}Yes{% else %}No{% endif %}</td></tr> <tr><td>Read-Only</td><td>{% if share.read_only %}Yes{% else %}No{% endif %}</td></tr>
</table> </table>
{% if !share.warmup_rules.is_empty() %}
<h4 style="margin-top:1rem;margin-bottom:0.5rem;font-size:0.95em">Warmup Rules</h4>
<table class="info-table">
<thead><tr>
<td style="font-weight:600;color:var(--text-muted)">Path</td>
<td style="font-weight:600;color:var(--text-muted)">Filter</td>
<td style="font-weight:600;color:var(--text-muted)">State</td>
<td style="font-weight:600;color:var(--text-muted)">Progress</td>
</tr></thead>
<tbody>
{% for rule in share.warmup_rules %}
<tr>
<td class="mono">{{ rule.path }}</td>
<td>{% if rule.newer_than.is_empty() %}-{% else %}{{ rule.newer_than }}{% endif %}</td>
<td><span class="badge badge-{{ rule.badge_class }}">{{ rule.state }}</span></td>
<td>{{ rule.cached + rule.skipped }}/{{ rule.total_files }}</td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</div> </div>
</td> </td>
</tr> </tr>