diff --git a/src/cli/status.rs b/src/cli/status.rs index f0bacc4..066fc98 100644 --- a/src/cli/status.rs +++ b/src/cli/status.rs @@ -28,6 +28,12 @@ struct ApiShare { speed: f64, transfers: u64, errors: u64, + #[serde(default)] + warmup_state: Option, + #[serde(default)] + warmup_done: Option, + #[serde(default)] + warmup_total: Option, } pub fn run(config: &Config) -> Result<()> { @@ -52,10 +58,23 @@ fn print_api_status(api: &ApiStatus) -> Result<()> { let mut any_active = false; for share in &api.shares { + // Build warmup suffix + let warmup_suffix = match share.warmup_state.as_deref() { + Some("running") => { + let done = share.warmup_done.unwrap_or(0); + let total = share.warmup_total.unwrap_or(0); + format!("\tWarmup [{done}/{total}]") + } + Some("pending") => "\tWarmup...".to_string(), + Some("complete") => "\tWarmup done".to_string(), + Some("failed") => "\tWarmup FAILED".to_string(), + _ => String::new(), + }; + match share.health.as_str() { "OK" => { if share.mounted { - println!("Mount: OK {}", share.name); + println!("Mount: OK {}{}", share.name, warmup_suffix); any_active = true; } else { println!("Mount: DOWN {} — mount lost", share.name); diff --git a/src/cli/warmup.rs b/src/cli/warmup.rs index a7dc9fc..07d07b5 100644 --- a/src/cli/warmup.rs +++ b/src/cli/warmup.rs @@ -5,10 +5,13 @@ use std::io; use std::process::Command; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, RwLock}; use anyhow::{Context, Result}; use crate::config::Config; +use crate::daemon::{DaemonStatus, WarmupRuleState}; use crate::rclone::config as rclone_config; pub fn run(config: &Config, share_name: &str, path: &str, newer_than: Option<&str>) -> Result<()> { @@ -96,6 +99,165 @@ pub fn run(config: &Config, share_name: &str, path: &str, newer_than: Option<&st Ok(()) } +/// Like `run()` but reports progress into `shared_status.warmup[rule_index]`. +/// +/// Checks `shutdown` and `generation` before each file to allow early exit +/// when the daemon is stopping or a new warmup generation supersedes this one. +pub fn run_tracked( + config: &Config, + share_name: &str, + path: &str, + newer_than: Option<&str>, + shared_status: &Arc>, + rule_index: usize, + generation: u64, + shutdown: &AtomicBool, +) -> Result<()> { + let share = config + .find_share(share_name) + .with_context(|| format!("Share '{}' not found in config", share_name))?; + + let warmup_path = share.mount_point.join(path); + let remote_src = format!("{}:{}/{}", share.connection, share.remote_path, path); + + // Mark as Listing + { + let mut status = shared_status.write().unwrap(); + if status.warmup_generation != generation { + return Ok(()); + } + if let Some(rs) = status.warmup.get_mut(rule_index) { + rs.state = WarmupRuleState::Listing; + } + } + + if !warmup_path.exists() { + let msg = format!("Path not found on mount: {}", warmup_path.display()); + let mut status = shared_status.write().unwrap(); + if let Some(rs) = status.warmup.get_mut(rule_index) { + rs.state = WarmupRuleState::Failed(msg.clone()); + } + anyhow::bail!("{msg}"); + } + + // List files on remote + let mut cmd = Command::new("rclone"); + cmd.arg("lsf") + .arg("--config") + .arg(rclone_config::RCLONE_CONF_PATH) + .arg("--recursive") + .arg("--files-only") + .arg(&remote_src); + + if let Some(age) = newer_than { + cmd.arg("--max-age").arg(age); + } + + let output = match cmd.output() { + Ok(o) => o, + Err(e) => { + let msg = format!("Failed to run rclone lsf: {e}"); + let mut status = shared_status.write().unwrap(); + if let Some(rs) = status.warmup.get_mut(rule_index) { + rs.state = WarmupRuleState::Failed(msg.clone()); + } + anyhow::bail!("{msg}"); + } + }; + + if !output.status.success() { + let msg = format!( + "rclone lsf failed: {}", + String::from_utf8_lossy(&output.stderr).trim() + ); + let mut status = shared_status.write().unwrap(); + if let Some(rs) = status.warmup.get_mut(rule_index) { + rs.state = WarmupRuleState::Failed(msg.clone()); + } + anyhow::bail!("{msg}"); + } + + let file_list = String::from_utf8_lossy(&output.stdout); + let files: Vec<&str> = file_list.lines().filter(|l| !l.is_empty()).collect(); + let total = files.len(); + + // Update total and transition to Caching + { + let mut status = shared_status.write().unwrap(); + if status.warmup_generation != generation { + return Ok(()); + } + if let Some(rs) = status.warmup.get_mut(rule_index) { + rs.total_files = total; + rs.state = if total == 0 { + WarmupRuleState::Complete + } else { + WarmupRuleState::Caching + }; + } + } + + if total == 0 { + return Ok(()); + } + + for file in &files { + // Check shutdown / generation before each file + if shutdown.load(Ordering::SeqCst) { + return Ok(()); + } + { + let status = shared_status.read().unwrap(); + if status.warmup_generation != generation { + return Ok(()); + } + } + + if is_cached(config, &share.connection, &share.remote_path, path, file) { + let mut status = shared_status.write().unwrap(); + if let Some(rs) = status.warmup.get_mut(rule_index) { + rs.skipped += 1; + } + continue; + } + + let full_path = warmup_path.join(file); + match std::fs::File::open(&full_path) { + Ok(mut f) => { + if let Err(_e) = io::copy(&mut f, &mut io::sink()) { + let mut status = shared_status.write().unwrap(); + if let Some(rs) = status.warmup.get_mut(rule_index) { + rs.errors += 1; + } + } else { + let mut status = shared_status.write().unwrap(); + if let Some(rs) = status.warmup.get_mut(rule_index) { + rs.cached += 1; + } + } + } + Err(_e) => { + let mut status = shared_status.write().unwrap(); + if let Some(rs) = status.warmup.get_mut(rule_index) { + rs.errors += 1; + } + } + } + } + + // Mark complete + { + let mut status = shared_status.write().unwrap(); + if status.warmup_generation == generation { + if let Some(rs) = status.warmup.get_mut(rule_index) { + rs.state = WarmupRuleState::Complete; + } + } + } + + Ok(()) +} + /// Check if a file is already in the rclone VFS cache. fn is_cached(config: &Config, connection: &str, remote_path: &str, warmup_path: &str, relative_path: &str) -> bool { let cache_path = config diff --git a/src/config_diff.rs b/src/config_diff.rs index 231c78e..72c9488 100644 --- a/src/config_diff.rs +++ b/src/config_diff.rs @@ -28,6 +28,8 @@ pub struct ConfigDiff { pub connections_modified: Vec, /// Tier D: global settings changed (cache, read, writeback, directory_cache). pub global_changed: bool, + /// Warmup settings changed (no restart needed, just update in-memory config). + pub warmup_changed: bool, } impl ConfigDiff { @@ -42,6 +44,7 @@ impl ConfigDiff { && self.connections_removed.is_empty() && self.connections_modified.is_empty() && !self.global_changed + && !self.warmup_changed } /// Returns the highest tier of change detected. @@ -98,6 +101,9 @@ impl ConfigDiff { if self.bandwidth_changed { parts.push("bandwidth limits changed".to_string()); } + if self.warmup_changed { + parts.push("warmup settings changed".to_string()); + } if parts.is_empty() { "no changes detected".to_string() } else { @@ -234,6 +240,16 @@ pub fn diff(old: &Config, new: &Config) -> ConfigDiff { d.shares_modified = modified_set.into_iter().collect(); + // Warmup changes (no restart needed) + d.warmup_changed = old.warmup.auto != new.warmup.auto + || old.warmup.rules.len() != new.warmup.rules.len() + || old + .warmup + .rules + .iter() + .zip(new.warmup.rules.iter()) + .any(|(o, n)| o.share != n.share || o.path != n.path || o.newer_than != n.newer_than); + d } @@ -481,6 +497,25 @@ mount_point = "/mnt/photos" assert!(summary.contains("bandwidth")); } + #[test] + fn test_warmup_change() { + let old = minimal_config(); + let mut new = old.clone(); + new.warmup.auto = true; + new.warmup.rules.push(crate::config::WarmupRule { + share: "photos".to_string(), + path: "/2024".to_string(), + newer_than: Some("7d".to_string()), + }); + let d = diff(&old, &new); + assert!(d.warmup_changed); + assert!(!d.global_changed); + // Warmup-only changes need no restart + assert_eq!(d.highest_tier(), ChangeTier::None); + assert!(!d.is_empty()); + assert!(d.summary().contains("warmup")); + } + #[test] fn test_tier_ordering() { assert!(ChangeTier::None < ChangeTier::Live); diff --git a/src/daemon.rs b/src/daemon.rs index cbbe8f0..bbbcf42 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -102,6 +102,11 @@ pub struct DaemonStatus { pub webdav_running: bool, /// Whether NFS exports are active. pub nfs_exported: bool, + /// Per-rule warmup status (populated when warmup is triggered). + pub warmup: Vec, + /// Generation counter — incremented each time warmup is (re)started. + /// Workers check this to detect when they've been superseded. + pub warmup_generation: u64, } impl DaemonStatus { @@ -127,9 +132,52 @@ impl DaemonStatus { smbd_running: false, webdav_running: false, nfs_exported: false, + warmup: Vec::new(), + warmup_generation: 0, } } + /// Aggregate warmup summary for a share. + /// + /// Returns `(state_label, done_count, total_count)` where state_label is + /// one of "none", "pending", "running", "complete", "failed". + pub fn warmup_summary_for(&self, share_name: &str) -> (&str, usize, usize) { + let rules: Vec<&WarmupRuleStatus> = self + .warmup + .iter() + .filter(|r| r.share == share_name) + .collect(); + + if rules.is_empty() { + return ("none", 0, 0); + } + + let total: usize = rules.iter().map(|r| r.total_files).sum(); + let done: usize = rules.iter().map(|r| r.cached + r.skipped).sum(); + + let any_failed = rules + .iter() + .any(|r| matches!(r.state, WarmupRuleState::Failed(_))); + let any_running = rules + .iter() + .any(|r| matches!(r.state, WarmupRuleState::Listing | WarmupRuleState::Caching)); + let all_complete = rules + .iter() + .all(|r| matches!(r.state, WarmupRuleState::Complete)); + + let label = if any_failed { + "failed" + } else if any_running { + "running" + } else if all_complete { + "complete" + } else { + "pending" + }; + + (label, done, total) + } + /// Format uptime as a human-readable string. pub fn uptime_string(&self) -> String { let secs = self.started_at.elapsed().as_secs(); @@ -243,6 +291,29 @@ pub enum ShareHealth { Failed(String), } +/// Per-rule warmup progress, updated by the warmup worker thread. +#[derive(Clone, Debug, serde::Serialize)] +pub struct WarmupRuleStatus { + pub share: String, + pub path: String, + pub newer_than: Option, + pub state: WarmupRuleState, + pub total_files: usize, + pub skipped: usize, + pub cached: usize, + pub errors: usize, +} + +/// State machine for a single warmup rule. +#[derive(Clone, Debug, PartialEq, serde::Serialize)] +pub enum WarmupRuleState { + Pending, + Listing, + Caching, + Complete, + Failed(String), +} + /// Commands sent from the web server (or CLI) to the supervisor. pub enum SupervisorCmd { /// Apply a new configuration (triggers tiered reload). @@ -278,6 +349,8 @@ mod tests { assert!(!status.smbd_running); assert!(!status.webdav_running); assert!(!status.nfs_exported); + assert!(status.warmup.is_empty()); + assert_eq!(status.warmup_generation, 0); } #[test] @@ -352,4 +425,85 @@ mod tests { assert_eq!(share.health_message(), Some("remote path not found")); assert!(!share.is_healthy()); } + + #[test] + fn test_warmup_summary_no_rules() { + let status = DaemonStatus::new(&["photos".to_string()]); + let (label, done, total) = status.warmup_summary_for("photos"); + assert_eq!(label, "none"); + assert_eq!(done, 0); + assert_eq!(total, 0); + } + + #[test] + fn test_warmup_summary_pending() { + let mut status = DaemonStatus::new(&["photos".to_string()]); + status.warmup.push(WarmupRuleStatus { + share: "photos".into(), + path: "/2024".into(), + newer_than: None, + state: WarmupRuleState::Pending, + total_files: 0, + skipped: 0, + cached: 0, + errors: 0, + }); + let (label, _, _) = status.warmup_summary_for("photos"); + assert_eq!(label, "pending"); + } + + #[test] + fn test_warmup_summary_running() { + let mut status = DaemonStatus::new(&["photos".to_string()]); + status.warmup.push(WarmupRuleStatus { + share: "photos".into(), + path: "/2024".into(), + newer_than: None, + state: WarmupRuleState::Caching, + total_files: 100, + skipped: 10, + cached: 40, + errors: 0, + }); + let (label, done, total) = status.warmup_summary_for("photos"); + assert_eq!(label, "running"); + assert_eq!(done, 50); + assert_eq!(total, 100); + } + + #[test] + fn test_warmup_summary_complete() { + let mut status = DaemonStatus::new(&["photos".to_string()]); + status.warmup.push(WarmupRuleStatus { + share: "photos".into(), + path: "/2024".into(), + newer_than: None, + state: WarmupRuleState::Complete, + total_files: 100, + skipped: 30, + cached: 70, + errors: 0, + }); + let (label, done, total) = status.warmup_summary_for("photos"); + assert_eq!(label, "complete"); + assert_eq!(done, 100); + assert_eq!(total, 100); + } + + #[test] + fn test_warmup_summary_wrong_share() { + let mut status = DaemonStatus::new(&["photos".to_string()]); + status.warmup.push(WarmupRuleStatus { + share: "photos".into(), + path: "/2024".into(), + newer_than: None, + state: WarmupRuleState::Caching, + total_files: 50, + skipped: 0, + cached: 10, + errors: 0, + }); + let (label, _, _) = status.warmup_summary_for("videos"); + assert_eq!(label, "none"); + } } diff --git a/src/supervisor.rs b/src/supervisor.rs index 1bc48df..4855409 100644 --- a/src/supervisor.rs +++ b/src/supervisor.rs @@ -17,7 +17,7 @@ use anyhow::{Context, Result}; use crate::config::Config; use crate::config_diff::{self, ChangeTier}; -use crate::daemon::{DaemonStatus, ShareHealth, SupervisorCmd}; +use crate::daemon::{DaemonStatus, ShareHealth, SupervisorCmd, WarmupRuleState, WarmupRuleStatus}; use crate::rclone::mount::{build_mount_args, is_mounted}; use crate::rclone::rc; use crate::services::{nfs, samba, webdav}; @@ -196,28 +196,7 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> { } // Phase 3.5: Auto-warmup in background thread (non-blocking) - if !config.warmup.rules.is_empty() && config.warmup.auto { - let warmup_config = config.clone(); - let warmup_shutdown = Arc::clone(&shutdown); - thread::spawn(move || { - println!("Auto-warmup started (background)..."); - for rule in &warmup_config.warmup.rules { - if warmup_shutdown.load(Ordering::SeqCst) { - println!("Auto-warmup interrupted by shutdown."); - break; - } - if let Err(e) = crate::cli::warmup::run( - &warmup_config, - &rule.share, - &rule.path, - rule.newer_than.as_deref(), - ) { - eprintln!("Warmup warning: {e}"); - } - } - println!("Auto-warmup complete."); - }); - } + spawn_warmup(config, &shared_status, &shutdown); // Phase 4: Supervision loop with command channel println!("Supervision active. Web UI at http://localhost:8090. Press Ctrl+C to stop."); @@ -239,6 +218,78 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> { result } +/// Spawn a background warmup thread for all configured warmup rules. +/// +/// Increments the warmup generation counter so any previous warmup thread +/// will detect the change and exit. Each rule is processed sequentially +/// with progress reported into `shared_status.warmup`. +fn spawn_warmup( + config: &Config, + shared_status: &Arc>, + shutdown: &Arc, +) { + if config.warmup.rules.is_empty() || !config.warmup.auto { + return; + } + + // Pre-populate warmup status entries and bump generation + let generation = { + let mut status = shared_status.write().unwrap(); + status.warmup_generation += 1; + status.warmup = config + .warmup + .rules + .iter() + .map(|rule| WarmupRuleStatus { + share: rule.share.clone(), + path: rule.path.clone(), + newer_than: rule.newer_than.clone(), + state: WarmupRuleState::Pending, + total_files: 0, + skipped: 0, + cached: 0, + errors: 0, + }) + .collect(); + status.warmup_generation + }; + + let warmup_config = config.clone(); + let warmup_status = Arc::clone(shared_status); + let warmup_shutdown = Arc::clone(shutdown); + + thread::spawn(move || { + println!("Auto-warmup started (background, generation {generation})..."); + for (i, rule) in warmup_config.warmup.rules.iter().enumerate() { + if warmup_shutdown.load(Ordering::SeqCst) { + println!("Auto-warmup interrupted by shutdown."); + break; + } + // Check if our generation is still current + { + let status = warmup_status.read().unwrap(); + if status.warmup_generation != generation { + println!("Auto-warmup superseded by newer generation."); + return; + } + } + if let Err(e) = crate::cli::warmup::run_tracked( + &warmup_config, + &rule.share, + &rule.path, + rule.newer_than.as_deref(), + &warmup_status, + i, + generation, + &warmup_shutdown, + ) { + eprintln!("Warmup warning: {e}"); + } + } + println!("Auto-warmup complete."); + }); +} + /// Write rclone config and create directories (protocol configs generated after probe). fn preflight(config: &Config) -> Result<()> { // Ensure mount points exist for each share @@ -535,6 +586,7 @@ fn supervise( &mut smbd_tracker, &mut webdav_tracker, new_config, + &shutdown, )?; println!("Config reload complete."); } @@ -642,6 +694,9 @@ fn supervise( } /// Poll RC API for each share and update the shared DaemonStatus. +/// +/// Matches mounts to status entries by name (not index) so the mapping +/// stays correct after dynamic PerShare add/remove/modify reloads. fn update_status( shared_status: &Arc>, mounts: &[MountChild], @@ -650,17 +705,17 @@ fn update_status( ) { let mut status = shared_status.write().unwrap(); - // Update per-share stats from RC API - for (i, mc) in mounts.iter().enumerate() { - if let Some(ss) = status.shares.get_mut(i) { - ss.mounted = is_mounted( - &config - .shares - .get(i) - .map(|s| s.mount_point.clone()) - .unwrap_or_default(), - ) - .unwrap_or(false); + // Update per-share stats from RC API — match by name, not index + for mc in mounts.iter() { + let mount_point = config + .shares + .iter() + .find(|s| s.name == mc.name) + .map(|s| s.mount_point.clone()) + .unwrap_or_default(); + + if let Some(ss) = status.shares.iter_mut().find(|s| s.name == mc.name) { + ss.mounted = is_mounted(&mount_point).unwrap_or(false); ss.rc_port = mc.rc_port; // Fetch VFS stats (cache info, dirty files) @@ -706,6 +761,7 @@ fn handle_reload( smbd_tracker: &mut RestartTracker, webdav_tracker: &mut RestartTracker, new_config: Config, + shutdown: &Arc, ) -> Result<()> { let old_config = shared_config.read().unwrap().clone(); let diff = config_diff::diff(&old_config, &new_config); @@ -880,7 +936,14 @@ fn handle_reload( errors: existing.map(|e| e.errors).unwrap_or(0), health: existing .map(|e| e.health.clone()) - .unwrap_or(ShareHealth::Pending), + .unwrap_or_else(|| { + // New share: if mount succeeded, it's healthy + if mounts.iter().any(|mc| mc.name == s.name) { + ShareHealth::Healthy + } else { + ShareHealth::Pending + } + }), } }) .collect(); @@ -890,6 +953,12 @@ fn handle_reload( status.nfs_exported = new_config.protocols.enable_nfs; } + // Re-trigger warmup if settings changed + if diff.warmup_changed { + println!(" Warmup settings changed, re-triggering..."); + spawn_warmup(&new_config, shared_status, shutdown); + } + Ok(()) } diff --git a/src/web/api.rs b/src/web/api.rs index 849da0e..e6dfdfc 100644 --- a/src/web/api.rs +++ b/src/web/api.rs @@ -31,6 +31,19 @@ struct StatusResponse { smbd_running: bool, webdav_running: bool, nfs_exported: bool, + warmup: Vec, +} + +#[derive(Serialize)] +struct WarmupRuleStatusResponse { + share: String, + path: String, + newer_than: Option, + state: String, + total_files: usize, + skipped: usize, + cached: usize, + errors: usize, } #[derive(Serialize)] @@ -48,6 +61,9 @@ struct ShareStatusResponse { errors: u64, health: String, health_message: Option, + warmup_state: String, + warmup_done: usize, + warmup_total: usize, } async fn get_status(State(state): State) -> Json { @@ -57,25 +73,55 @@ async fn get_status(State(state): State) -> Json { shares: status .shares .iter() - .map(|s| ShareStatusResponse { - name: s.name.clone(), - mounted: s.mounted, - rc_port: s.rc_port, - cache_bytes: s.cache_bytes, - cache_display: s.cache_display(), - dirty_count: s.dirty_count, - errored_files: s.errored_files, - speed: s.speed, - speed_display: s.speed_display(), - transfers: s.transfers, - errors: s.errors, - health: s.health_label().to_string(), - health_message: s.health_message().map(|m| m.to_string()), + .map(|s| { + let (warmup_state, warmup_done, warmup_total) = + status.warmup_summary_for(&s.name); + ShareStatusResponse { + name: s.name.clone(), + mounted: s.mounted, + rc_port: s.rc_port, + cache_bytes: s.cache_bytes, + cache_display: s.cache_display(), + dirty_count: s.dirty_count, + errored_files: s.errored_files, + speed: s.speed, + speed_display: s.speed_display(), + transfers: s.transfers, + errors: s.errors, + health: s.health_label().to_string(), + health_message: s.health_message().map(|m| m.to_string()), + warmup_state: warmup_state.to_string(), + warmup_done, + warmup_total, + } }) .collect(), smbd_running: status.smbd_running, webdav_running: status.webdav_running, nfs_exported: status.nfs_exported, + warmup: status + .warmup + .iter() + .map(|r| { + let state_str = match &r.state { + crate::daemon::WarmupRuleState::Pending => "pending", + crate::daemon::WarmupRuleState::Listing => "listing", + crate::daemon::WarmupRuleState::Caching => "caching", + crate::daemon::WarmupRuleState::Complete => "complete", + crate::daemon::WarmupRuleState::Failed(_) => "failed", + }; + WarmupRuleStatusResponse { + share: r.share.clone(), + path: r.path.clone(), + newer_than: r.newer_than.clone(), + state: state_str.to_string(), + total_files: r.total_files, + skipped: r.skipped, + cached: r.cached, + errors: r.errors, + } + }) + .collect(), }) } @@ -90,6 +136,7 @@ async fn get_share_status( .iter() .find(|s| s.name == share_name) .ok_or(StatusCode::NOT_FOUND)?; + let (warmup_state, warmup_done, warmup_total) = status.warmup_summary_for(&share.name); Ok(Json(ShareStatusResponse { name: share.name.clone(), mounted: share.mounted, @@ -104,6 +151,9 @@ async fn get_share_status( errors: share.errors, health: share.health_label().to_string(), health_message: share.health_message().map(|m| m.to_string()), + warmup_state: warmup_state.to_string(), + warmup_done, + warmup_total, })) } diff --git a/src/web/pages.rs b/src/web/pages.rs index 152c7a9..9fba7eb 100644 --- a/src/web/pages.rs +++ b/src/web/pages.rs @@ -46,6 +46,9 @@ struct ShareView { read_only: bool, health: String, health_message: String, + warmup_state: String, + warmup_done: usize, + warmup_total: usize, } /// Extended share view for the shares table with all detail fields. @@ -66,6 +69,23 @@ struct ShareDetailView { errors: u64, health: String, health_message: String, + warmup_state: String, + warmup_done: usize, + warmup_total: usize, + warmup_rules: Vec, +} + +/// View model for a single warmup rule in the shares detail panel. +#[allow(dead_code)] +struct WarmupRuleView { + path: String, + newer_than: String, + state: String, + badge_class: String, + total_files: usize, + skipped: usize, + cached: usize, + errors: usize, } /// Build compact share views from status + config. @@ -75,6 +95,8 @@ fn build_share_views(status: &DaemonStatus, config: &Config) -> Vec { .iter() .map(|s| { let sc = config.find_share(&s.name); + let (warmup_state, warmup_done, warmup_total) = + status.warmup_summary_for(&s.name); ShareView { name: s.name.clone(), connection: sc.map(|c| c.connection.clone()).unwrap_or_default(), @@ -88,6 +110,9 @@ fn build_share_views(status: &DaemonStatus, config: &Config) -> Vec { read_only: sc.map(|c| c.read_only).unwrap_or(false), health: s.health_label().to_string(), health_message: s.health_message().unwrap_or("").to_string(), + warmup_state: warmup_state.to_string(), + warmup_done, + warmup_total, } }) .collect() @@ -100,6 +125,35 @@ fn build_share_detail_views(status: &DaemonStatus, config: &Config) -> Vec = status + .warmup + .iter() + .filter(|r| r.share == s.name) + .map(|r| { + let (state_str, badge_class) = match &r.state { + crate::daemon::WarmupRuleState::Pending => ("pending", "warn"), + crate::daemon::WarmupRuleState::Listing => ("listing", "warmup"), + crate::daemon::WarmupRuleState::Caching => ("caching", "warmup"), + crate::daemon::WarmupRuleState::Complete => ("complete", "ok"), + crate::daemon::WarmupRuleState::Failed(_) => ("failed", "error"), + }; + WarmupRuleView { + path: r.path.clone(), + newer_than: r.newer_than.clone().unwrap_or_default(), + state: state_str.to_string(), + badge_class: badge_class.to_string(), + total_files: r.total_files, + skipped: r.skipped, + cached: r.cached, + errors: r.errors, + } + }) + .collect(); + ShareDetailView { name: s.name.clone(), connection: sc.map(|c| c.connection.clone()).unwrap_or_default(), @@ -118,6 +172,10 @@ fn build_share_detail_views(status: &DaemonStatus, config: &Config) -> VecRO {% endif %} + {% if share.warmup_state == "running" %} + WARMUP {{ share.warmup_done }}/{{ share.warmup_total }} + {% elif share.warmup_state == "pending" %} + WARMUP... + {% elif share.warmup_state == "complete" %} + WARMED + {% endif %}
diff --git a/templates/web/tabs/shares.html b/templates/web/tabs/shares.html index 934b9a7..03b3e4b 100644 --- a/templates/web/tabs/shares.html +++ b/templates/web/tabs/shares.html @@ -28,6 +28,13 @@ {% if share.read_only %} RO {% endif %} + {% if share.warmup_state == "running" %} + WARMUP {{ share.warmup_done }}/{{ share.warmup_total }} + {% elif share.warmup_state == "pending" %} + WARMUP... + {% elif share.warmup_state == "complete" %} + WARMED + {% endif %} {{ share.mount_point }} {{ share.cache_display }} @@ -70,6 +77,27 @@ Mounted{% if share.mounted %}Yes{% else %}No{% endif %} Read-Only{% if share.read_only %}Yes{% else %}No{% endif %} + {% if !share.warmup_rules.is_empty() %} +

Warmup Rules

+ + + + + + + + + {% for rule in share.warmup_rules %} + + + + + + + {% endfor %} + +
PathFilterStateProgress
{{ rule.path }}{% if rule.newer_than.is_empty() %}-{% else %}{{ rule.newer_than }}{% endif %}{{ rule.state }}{{ rule.cached + rule.skipped }}/{{ rule.total_files }}
+ {% endif %}