Add periodic dir-refresh and per-share refresh status display

Introduces a ScheduledTask mechanism that periodically calls rclone RC
vfs/refresh to keep directory listing caches warm (no file downloads),
with two-level config (global default + per-share override). Adds
dir-refresh status badges and timestamps to the web UI shares tab and
CLI status output, following the same pattern as warmup/warmed.

- src/scheduler.rs: New generic ScheduledTask runner with generation-based
  cancellation and parse_interval() helper
- src/rclone/rc.rs: Add vfs_refresh() RC API call
- src/config.rs: Add DirRefreshConfig, per-share dir_refresh_interval
  override, effective_dir_refresh_interval() resolution method
- src/config_diff.rs: Track dir_refresh_changed for hot-reload
- src/daemon.rs: Track per-share last_dir_refresh timestamps (HashMap),
  add dir_refresh_ago_for() helper and format_ago()
- src/supervisor.rs: spawn_dir_refresh() per-share background threads,
  called on startup and config reload
- src/web/api.rs: Expose dir_refresh_active + last_dir_refresh_ago in
  ShareStatusResponse
- src/web/pages.rs: Populate dir_refresh_active + last_dir_refresh_ago
  in ShareView and ShareDetailView
- templates/web/tabs/shares.html: DIR-REFRESH badge (yellow=pending,
  green=N ago) in health column; Dir Refresh row in detail panel
- templates/web/tabs/config.html: Dir Refresh section and per-share
  interval field in interactive config editor
- src/cli/status.rs: Append Dir-Refresh suffix to mount status lines

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
grabbit 2026-02-19 10:54:08 +08:00
parent 15f915fbee
commit 74b0e72549
12 changed files with 557 additions and 50 deletions

View File

@ -34,6 +34,10 @@ struct ApiShare {
warmup_done: Option<usize>,
#[serde(default)]
warmup_total: Option<usize>,
#[serde(default)]
dir_refresh_active: Option<bool>,
#[serde(default)]
last_dir_refresh_ago: Option<String>,
}
pub fn run(config: &Config) -> Result<()> {
@ -71,10 +75,20 @@ fn print_api_status(api: &ApiStatus) -> Result<()> {
_ => String::new(),
};
// Build dir-refresh suffix
let dir_refresh_suffix = if share.dir_refresh_active == Some(true) {
match share.last_dir_refresh_ago.as_deref() {
Some(ago) => format!("\tDir-Refresh {ago}"),
None => "\tDir-Refresh pending...".to_string(),
}
} else {
String::new()
};
match share.health.as_str() {
"OK" => {
if share.mounted {
println!("Mount: OK {}{}", share.name, warmup_suffix);
println!("Mount: OK {}{}{}", share.name, warmup_suffix, dir_refresh_suffix);
any_active = true;
} else {
println!("Mount: DOWN {} — mount lost", share.name);

View File

@ -4,6 +4,7 @@
//! Environment variables can override config file values (prefixed with `WARPGATE_`).
use std::path::{Path, PathBuf};
use std::time::Duration;
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
@ -28,6 +29,8 @@ pub struct Config {
pub warmup: WarmupConfig,
#[serde(default)]
pub smb_auth: SmbAuthConfig,
#[serde(default)]
pub dir_refresh: DirRefreshConfig,
pub shares: Vec<ShareConfig>,
}
@ -185,6 +188,31 @@ pub struct SmbAuthConfig {
pub smb_pass: Option<String>,
}
/// Directory listing cache refresh — actively re-fetches directory metadata
/// so clients see new remote files without waiting for cache expiry.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct DirRefreshConfig {
/// Enable periodic directory refresh (default: false).
#[serde(default)]
pub enabled: bool,
/// Refresh interval (e.g. "30m", "1h"). Parsed by `scheduler::parse_interval`.
#[serde(default = "default_dir_refresh_interval")]
pub interval: String,
/// Refresh subdirectories recursively.
#[serde(default = "default_true")]
pub recursive: bool,
}
impl Default for DirRefreshConfig {
fn default() -> Self {
Self {
enabled: false,
interval: "30m".into(),
recursive: true,
}
}
}
/// A single share exported as SMB/NFS, each with its own rclone mount.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ShareConfig {
@ -199,6 +227,10 @@ pub struct ShareConfig {
/// Export as read-only.
#[serde(default)]
pub read_only: bool,
/// Override the global `dir_refresh.interval` for this share.
/// `"0"` disables refresh for this share. `None` inherits the global setting.
#[serde(default)]
pub dir_refresh_interval: Option<String>,
}
// --- Default value functions ---
@ -245,6 +277,9 @@ fn default_transfers() -> u32 {
fn default_dir_cache_time() -> String {
"1h".into()
}
fn default_dir_refresh_interval() -> String {
"30m".into()
}
fn default_nfs_network() -> String {
"192.168.0.0/24".into()
}
@ -288,6 +323,23 @@ impl Config {
RC_BASE_PORT + share_index as u16
}
/// Effective directory refresh interval for a share.
///
/// Resolution order:
/// 1. Per-share `dir_refresh_interval` if set (`"0"` → `None`).
/// 2. Global `dir_refresh.enabled` + `dir_refresh.interval` otherwise.
pub fn effective_dir_refresh_interval(&self, share: &ShareConfig) -> Option<Duration> {
if let Some(ref s) = share.dir_refresh_interval {
// Per-share override (may disable with "0")
return crate::scheduler::parse_interval(s);
}
// Fall back to global setting
if !self.dir_refresh.enabled {
return None;
}
crate::scheduler::parse_interval(&self.dir_refresh.interval)
}
/// Serialize config to human-readable TOML with section comments.
///
/// Unlike `toml::to_string_pretty`, this produces output that mirrors
@ -380,6 +432,14 @@ impl Config {
}
writeln!(out).unwrap();
// --- Dir Refresh ---
writeln!(out, "# --- Dir Refresh (change = no restart) ---").unwrap();
writeln!(out, "[dir_refresh]").unwrap();
writeln!(out, "enabled = {}", self.dir_refresh.enabled).unwrap();
writeln!(out, "interval = {:?}", self.dir_refresh.interval).unwrap();
writeln!(out, "recursive = {}", self.dir_refresh.recursive).unwrap();
writeln!(out).unwrap();
// --- Shares ---
writeln!(out, "# --- Shares (change = per-share restart) ---").unwrap();
for share in &self.shares {
@ -391,6 +451,9 @@ impl Config {
if share.read_only {
writeln!(out, "read_only = true").unwrap();
}
if let Some(ref interval) = share.dir_refresh_interval {
writeln!(out, "dir_refresh_interval = {:?}", interval).unwrap();
}
writeln!(out).unwrap();
}

View File

@ -30,6 +30,8 @@ pub struct ConfigDiff {
pub global_changed: bool,
/// Warmup settings changed (no restart needed, just update in-memory config).
pub warmup_changed: bool,
/// Dir-refresh settings changed (no restart needed, just re-spawn background threads).
pub dir_refresh_changed: bool,
}
impl ConfigDiff {
@ -45,6 +47,7 @@ impl ConfigDiff {
&& self.connections_modified.is_empty()
&& !self.global_changed
&& !self.warmup_changed
&& !self.dir_refresh_changed
}
/// Returns the highest tier of change detected.
@ -104,6 +107,9 @@ impl ConfigDiff {
if self.warmup_changed {
parts.push("warmup settings changed".to_string());
}
if self.dir_refresh_changed {
parts.push("dir-refresh settings changed".to_string());
}
if parts.is_empty() {
"no changes detected".to_string()
} else {
@ -250,6 +256,24 @@ pub fn diff(old: &Config, new: &Config) -> ConfigDiff {
.zip(new.warmup.rules.iter())
.any(|(o, n)| o.share != n.share || o.path != n.path || o.newer_than != n.newer_than);
// Dir-refresh changes (no restart needed, just re-spawn background threads)
let dir_refresh_global_changed = old.dir_refresh != new.dir_refresh;
let dir_refresh_per_share_changed = {
// Compare per-share overrides by (name → dir_refresh_interval) mapping
let old_map: std::collections::HashMap<&str, Option<&str>> = old
.shares
.iter()
.map(|s| (s.name.as_str(), s.dir_refresh_interval.as_deref()))
.collect();
let new_map: std::collections::HashMap<&str, Option<&str>> = new
.shares
.iter()
.map(|s| (s.name.as_str(), s.dir_refresh_interval.as_deref()))
.collect();
old_map != new_map
};
d.dir_refresh_changed = dir_refresh_global_changed || dir_refresh_per_share_changed;
d
}
@ -323,6 +347,7 @@ mount_point = "/mnt/photos"
remote_path: "/videos".to_string(),
mount_point: "/mnt/videos".into(),
read_only: false,
dir_refresh_interval: None,
});
let d = diff(&old, &new);
assert_eq!(d.shares_added, vec!["videos"]);
@ -340,6 +365,7 @@ mount_point = "/mnt/photos"
remote_path: "/videos".to_string(),
mount_point: "/mnt/videos".into(),
read_only: false,
dir_refresh_interval: None,
});
let d = diff(&old, &new);
assert_eq!(d.shares_removed, vec!["photos"]);

View File

@ -3,8 +3,9 @@
//! The supervisor owns all mutable state. The web server gets read-only access
//! to status via `Arc<RwLock<DaemonStatus>>` and sends commands via an mpsc channel.
use std::collections::VecDeque;
use std::collections::{HashMap, VecDeque};
use std::path::PathBuf;
use std::sync::atomic::AtomicU64;
use std::sync::mpsc;
use std::sync::{Arc, RwLock};
use std::time::{Instant, SystemTime, UNIX_EPOCH};
@ -107,6 +108,15 @@ pub struct DaemonStatus {
/// Generation counter — incremented each time warmup is (re)started.
/// Workers check this to detect when they've been superseded.
pub warmup_generation: u64,
/// Whether any dir-refresh threads are active.
pub dir_refresh_running: bool,
/// Timestamp of the last successful dir-refresh call, keyed by share name.
pub last_dir_refresh: HashMap<String, SystemTime>,
/// Generation counter for dir-refresh threads (incremented on each re-spawn).
pub dir_refresh_generation: u64,
/// Shared atomic for generation — dir-refresh threads hold a clone and check
/// this directly (without taking the RwLock) to detect supersession.
pub dir_refresh_gen_arc: Arc<AtomicU64>,
}
impl DaemonStatus {
@ -134,6 +144,10 @@ impl DaemonStatus {
nfs_exported: false,
warmup: Vec::new(),
warmup_generation: 0,
dir_refresh_running: false,
last_dir_refresh: HashMap::new(),
dir_refresh_generation: 0,
dir_refresh_gen_arc: Arc::new(AtomicU64::new(0)),
}
}
@ -178,6 +192,15 @@ impl DaemonStatus {
(label, done, total)
}
/// How long ago the last dir-refresh ran for this share.
///
/// Returns `None` if no refresh has completed yet for the share.
pub fn dir_refresh_ago_for(&self, share_name: &str) -> Option<String> {
let ts = self.last_dir_refresh.get(share_name)?;
let secs = ts.elapsed().unwrap_or_default().as_secs();
Some(format_ago(secs))
}
/// Format uptime as a human-readable string.
pub fn uptime_string(&self) -> String {
let secs = self.started_at.elapsed().as_secs();
@ -257,6 +280,17 @@ impl ShareStatus {
}
}
/// Format an elapsed-seconds count as "5s ago", "3m ago", "2h ago".
fn format_ago(secs: u64) -> String {
if secs < 60 {
format!("{secs}s ago")
} else if secs < 3600 {
format!("{}m ago", secs / 60)
} else {
format!("{}h ago", secs / 3600)
}
}
/// Format bytes as human-readable (e.g. "45.2 GiB").
fn format_bytes(bytes: u64) -> String {
const KIB: f64 = 1024.0;

View File

@ -4,6 +4,7 @@ mod config_diff;
mod daemon;
mod deploy;
mod rclone;
mod scheduler;
mod services;
mod supervisor;
mod web;

View File

@ -85,6 +85,17 @@ pub fn vfs_forget(port: u16, dir: &str) -> Result<()> {
Ok(())
}
/// Call `vfs/refresh` — refresh directory listing cache.
///
/// Triggers rclone to re-fetch directory metadata from the remote.
/// Does NOT download file contents — only refreshes directory entries.
pub fn vfs_refresh(port: u16, path: &str, recursive: bool) -> Result<()> {
let addr = rc_addr(port);
ureq::post(format!("{addr}/vfs/refresh"))
.send_json(serde_json::json!({ "dir": path, "recursive": recursive }))?;
Ok(())
}
/// Call `core/bwlimit` — get or set bandwidth limits.
///
/// If both `upload` and `download` are `None`, returns current limits.

202
src/scheduler.rs Normal file
View File

@ -0,0 +1,202 @@
//! Generic scheduled task runner with generation-based cancellation.
//!
//! Each `ScheduledTask` spawns a background thread that waits `interval`,
//! runs `work`, repeats — and exits cleanly when `shutdown` is set or the
//! generation counter advances past `generation`.
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, Instant};
/// A named periodic task.
pub struct ScheduledTask {
pub name: &'static str,
pub interval: Duration,
}
impl ScheduledTask {
/// Cancellation check granularity: how often the sleep loop wakes to check
/// for shutdown / generation change. Kept short enough for responsive cancellation
/// without meaningfully impacting CPU usage for long intervals (30m, 1h, etc.).
const CHECK_INTERVAL: Duration = Duration::from_millis(100);
/// Spawn a background thread that runs `work` every `interval`.
///
/// The thread exits when:
/// - `shutdown` is set to true, or
/// - `gen_arc` no longer equals `generation` (superseded by a newer spawn).
pub fn spawn<F>(
self,
generation: u64,
gen_arc: Arc<AtomicU64>,
shutdown: Arc<AtomicBool>,
work: F,
) where
F: Fn() -> anyhow::Result<()> + Send + 'static,
{
thread::spawn(move || loop {
// Sleep in short increments so we can detect cancellation quickly.
let deadline = Instant::now() + self.interval;
while Instant::now() < deadline {
if shutdown.load(Ordering::SeqCst) {
return;
}
if gen_arc.load(Ordering::SeqCst) != generation {
return;
}
thread::sleep(Self::CHECK_INTERVAL);
}
// Final cancellation check before running work.
if shutdown.load(Ordering::SeqCst) {
return;
}
if gen_arc.load(Ordering::SeqCst) != generation {
return;
}
if let Err(e) = work() {
eprintln!("[{}] error: {e}", self.name);
}
});
}
}
/// Parse an interval string into a `Duration`.
///
/// Supported suffixes: `h` (hours), `m` (minutes), `s` (seconds).
/// A bare number is treated as seconds.
/// Returns `None` for `"0"`, `""`, or invalid input.
pub fn parse_interval(s: &str) -> Option<Duration> {
let s = s.trim();
if s.is_empty() || s == "0" {
return None;
}
let (num_str, multiplier) = if let Some(n) = s.strip_suffix('h') {
(n, 3600u64)
} else if let Some(n) = s.strip_suffix('m') {
(n, 60u64)
} else if let Some(n) = s.strip_suffix('s') {
(n, 1u64)
} else {
(s, 1u64)
};
num_str.parse::<u64>().ok().and_then(|n| {
let secs = n.checked_mul(multiplier)?;
if secs == 0 {
None
} else {
Some(Duration::from_secs(secs))
}
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_interval_zero() {
assert!(parse_interval("0").is_none());
}
#[test]
fn test_parse_interval_empty() {
assert!(parse_interval("").is_none());
assert!(parse_interval(" ").is_none());
}
#[test]
fn test_parse_interval_hours() {
assert_eq!(parse_interval("1h"), Some(Duration::from_secs(3600)));
assert_eq!(parse_interval("2h"), Some(Duration::from_secs(7200)));
}
#[test]
fn test_parse_interval_minutes() {
assert_eq!(parse_interval("30m"), Some(Duration::from_secs(1800)));
assert_eq!(parse_interval("1m"), Some(Duration::from_secs(60)));
}
#[test]
fn test_parse_interval_seconds() {
assert_eq!(parse_interval("90s"), Some(Duration::from_secs(90)));
assert_eq!(parse_interval("10s"), Some(Duration::from_secs(10)));
}
#[test]
fn test_parse_interval_bare_number() {
assert_eq!(parse_interval("60"), Some(Duration::from_secs(60)));
}
#[test]
fn test_parse_interval_invalid() {
assert!(parse_interval("abc").is_none());
assert!(parse_interval("10x").is_none());
}
#[test]
fn test_parse_interval_zero_minutes() {
assert!(parse_interval("0m").is_none());
}
#[test]
fn test_scheduled_task_exits_on_shutdown() {
use std::sync::atomic::AtomicU64;
use std::sync::Arc;
let gen_arc = Arc::new(AtomicU64::new(1));
let shutdown = Arc::new(AtomicBool::new(false));
let called = Arc::new(AtomicBool::new(false));
let called2 = Arc::clone(&called);
let shutdown2 = Arc::clone(&shutdown);
// Very short interval so the test completes quickly.
ScheduledTask {
name: "test",
interval: Duration::from_millis(50),
}
.spawn(1, Arc::clone(&gen_arc), Arc::clone(&shutdown), move || {
called2.store(true, Ordering::SeqCst);
shutdown2.store(true, Ordering::SeqCst);
Ok(())
});
thread::sleep(Duration::from_millis(200));
assert!(called.load(Ordering::SeqCst));
}
#[test]
fn test_scheduled_task_exits_on_generation_change() {
use std::sync::atomic::AtomicU64;
use std::sync::Arc;
let gen_arc = Arc::new(AtomicU64::new(1));
let shutdown = Arc::new(AtomicBool::new(false));
let call_count = Arc::new(std::sync::atomic::AtomicU32::new(0));
let count2 = Arc::clone(&call_count);
let gen2 = Arc::clone(&gen_arc);
ScheduledTask {
name: "test",
interval: Duration::from_millis(50),
}
.spawn(1, Arc::clone(&gen_arc), Arc::clone(&shutdown), move || {
count2.fetch_add(1, Ordering::SeqCst);
// Bump generation to signal the thread should exit after this call.
gen2.store(2, Ordering::SeqCst);
Ok(())
});
thread::sleep(Duration::from_millis(300));
// Should have run exactly once and then exited.
assert_eq!(call_count.load(Ordering::SeqCst), 1);
shutdown.store(true, Ordering::SeqCst);
}
}

View File

@ -7,17 +7,18 @@
use std::os::unix::process::CommandExt;
use std::path::PathBuf;
use std::process::{Child, Command};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::mpsc::{self, RecvTimeoutError};
use std::sync::{Arc, RwLock};
use std::thread;
use std::time::{Duration, Instant};
use std::time::{Duration, Instant, SystemTime};
use anyhow::{Context, Result};
use crate::config::Config;
use crate::config_diff::{self, ChangeTier};
use crate::daemon::{DaemonStatus, ShareHealth, SupervisorCmd, WarmupRuleState, WarmupRuleStatus};
use crate::scheduler::ScheduledTask;
use crate::rclone::mount::{build_mount_args, is_mounted};
use crate::rclone::rc;
use crate::services::{nfs, samba, webdav};
@ -197,6 +198,8 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
// Phase 3.5: Auto-warmup in background thread (non-blocking)
spawn_warmup(config, &shared_status, &shutdown);
// Phase 3.6: Dir-refresh background threads (non-blocking)
spawn_dir_refresh(config, &shared_status, &shutdown);
// Phase 4: Supervision loop with command channel
println!("Supervision active. Web UI at http://localhost:8090. Press Ctrl+C to stop.");
@ -293,6 +296,71 @@ fn spawn_warmup(
});
}
/// Spawn per-share background threads that periodically call `vfs/refresh` to
/// keep the rclone directory listing cache warm.
///
/// Bumps the dir-refresh generation counter so any previous threads detect
/// that they've been superseded and exit cleanly. Each share whose effective
/// interval is non-zero gets its own `ScheduledTask` thread.
fn spawn_dir_refresh(
config: &Config,
shared_status: &Arc<RwLock<DaemonStatus>>,
shutdown: &Arc<AtomicBool>,
) {
// Quick check: skip entirely if no share will actually refresh.
let any_active = config
.shares
.iter()
.any(|s| config.effective_dir_refresh_interval(s).is_some());
if !any_active {
return;
}
// Bump generation and clone the shared Arc<AtomicU64> for threads.
let gen_arc: Arc<AtomicU64> = {
let mut s = shared_status.write().unwrap();
s.dir_refresh_generation += 1;
s.dir_refresh_running = true;
let g = s.dir_refresh_generation;
s.dir_refresh_gen_arc.store(g, Ordering::SeqCst);
Arc::clone(&s.dir_refresh_gen_arc)
};
let generation = gen_arc.load(Ordering::SeqCst);
for (i, share) in config.shares.iter().enumerate() {
let interval = match config.effective_dir_refresh_interval(share) {
Some(d) => d,
None => continue,
};
let share_name = share.name.clone();
let recursive = config.dir_refresh.recursive;
let rc_port = config.rc_port(i);
let status = Arc::clone(shared_status);
let gen_arc2 = Arc::clone(&gen_arc);
let sd = Arc::clone(shutdown);
println!(
" dir-refresh: scheduling '{}' every {}s",
share_name,
interval.as_secs()
);
ScheduledTask {
name: "dir-refresh",
interval,
}
.spawn(generation, gen_arc2, sd, move || {
rc::vfs_refresh(rc_port, "/", recursive)
.with_context(|| format!("dir-refresh for '{share_name}'"))?;
println!(" dir-refresh OK: {share_name}");
let mut s = status.write().unwrap();
s.last_dir_refresh.insert(share_name.clone(), SystemTime::now());
Ok(())
});
}
}
/// Write rclone config and create directories (protocol configs generated after probe).
fn preflight(config: &Config) -> Result<()> {
// Ensure mount points exist for each share
@ -963,6 +1031,12 @@ fn handle_reload(
spawn_warmup(&new_config, shared_status, shutdown);
}
// Re-trigger dir-refresh if settings changed
if diff.dir_refresh_changed {
println!(" Dir-refresh settings changed, re-triggering...");
spawn_dir_refresh(&new_config, shared_status, shutdown);
}
Ok(())
}

View File

@ -10,6 +10,7 @@ use axum::routing::{get, post};
use axum::Router;
use serde::Serialize;
use crate::config::Config;
use crate::daemon::{LogEntry, SupervisorCmd};
use crate::web::SharedState;
@ -64,18 +65,22 @@ struct ShareStatusResponse {
warmup_state: String,
warmup_done: usize,
warmup_total: usize,
dir_refresh_active: bool,
last_dir_refresh_ago: Option<String>,
}
async fn get_status(State(state): State<SharedState>) -> Json<StatusResponse> {
let status = state.status.read().unwrap();
Json(StatusResponse {
uptime: status.uptime_string(),
shares: status
.shares
.iter()
.map(|s| {
let (warmup_state, warmup_done, warmup_total) =
status.warmup_summary_for(&s.name);
/// Build a `ShareStatusResponse` for one share, including dir-refresh fields.
fn share_to_response(
s: &crate::daemon::ShareStatus,
status: &crate::daemon::DaemonStatus,
config: &Config,
) -> ShareStatusResponse {
let (warmup_state, warmup_done, warmup_total) = status.warmup_summary_for(&s.name);
let dir_refresh_active = config
.find_share(&s.name)
.map(|sc| config.effective_dir_refresh_interval(sc).is_some())
.unwrap_or(false);
let last_dir_refresh_ago = status.dir_refresh_ago_for(&s.name);
ShareStatusResponse {
name: s.name.clone(),
mounted: s.mounted,
@ -93,9 +98,29 @@ async fn get_status(State(state): State<SharedState>) -> Json<StatusResponse> {
warmup_state: warmup_state.to_string(),
warmup_done,
warmup_total,
dir_refresh_active,
last_dir_refresh_ago,
}
})
.collect(),
}
/// Build all share status responses.
fn build_share_status_responses(
status: &crate::daemon::DaemonStatus,
config: &Config,
) -> Vec<ShareStatusResponse> {
status
.shares
.iter()
.map(|s| share_to_response(s, status, config))
.collect()
}
async fn get_status(State(state): State<SharedState>) -> Json<StatusResponse> {
let status = state.status.read().unwrap();
let config = state.config.read().unwrap();
Json(StatusResponse {
uptime: status.uptime_string(),
shares: build_share_status_responses(&status, &config),
smbd_running: status.smbd_running,
webdav_running: status.webdav_running,
nfs_exported: status.nfs_exported,
@ -131,30 +156,13 @@ async fn get_share_status(
Path(share_name): Path<String>,
) -> Result<Json<ShareStatusResponse>, StatusCode> {
let status = state.status.read().unwrap();
let config = state.config.read().unwrap();
let share = status
.shares
.iter()
.find(|s| s.name == share_name)
.ok_or(StatusCode::NOT_FOUND)?;
let (warmup_state, warmup_done, warmup_total) = status.warmup_summary_for(&share.name);
Ok(Json(ShareStatusResponse {
name: share.name.clone(),
mounted: share.mounted,
rc_port: share.rc_port,
cache_bytes: share.cache_bytes,
cache_display: share.cache_display(),
dirty_count: share.dirty_count,
errored_files: share.errored_files,
speed: share.speed,
speed_display: share.speed_display(),
transfers: share.transfers,
errors: share.errors,
health: share.health_label().to_string(),
health_message: share.health_message().map(|m| m.to_string()),
warmup_state: warmup_state.to_string(),
warmup_done,
warmup_total,
}))
Ok(Json(share_to_response(share, &status, &config)))
}
/// GET /api/config — current config as JSON.

View File

@ -49,6 +49,8 @@ struct ShareView {
warmup_state: String,
warmup_done: usize,
warmup_total: usize,
dir_refresh_active: bool,
last_dir_refresh_ago: String,
}
/// Extended share view for the shares table with all detail fields.
@ -73,6 +75,8 @@ struct ShareDetailView {
warmup_done: usize,
warmup_total: usize,
warmup_rules: Vec<WarmupRuleView>,
dir_refresh_active: bool,
last_dir_refresh_ago: String,
}
/// View model for a single warmup rule in the shares detail panel.
@ -97,6 +101,10 @@ fn build_share_views(status: &DaemonStatus, config: &Config) -> Vec<ShareView> {
let sc = config.find_share(&s.name);
let (warmup_state, warmup_done, warmup_total) =
status.warmup_summary_for(&s.name);
let dir_refresh_active = sc
.map(|sc| config.effective_dir_refresh_interval(sc).is_some())
.unwrap_or(false);
let last_dir_refresh_ago = status.dir_refresh_ago_for(&s.name).unwrap_or_default();
ShareView {
name: s.name.clone(),
connection: sc.map(|c| c.connection.clone()).unwrap_or_default(),
@ -113,6 +121,8 @@ fn build_share_views(status: &DaemonStatus, config: &Config) -> Vec<ShareView> {
warmup_state: warmup_state.to_string(),
warmup_done,
warmup_total,
dir_refresh_active,
last_dir_refresh_ago,
}
})
.collect()
@ -127,6 +137,10 @@ fn build_share_detail_views(status: &DaemonStatus, config: &Config) -> Vec<Share
let sc = config.find_share(&s.name);
let (warmup_state, warmup_done, warmup_total) =
status.warmup_summary_for(&s.name);
let dir_refresh_active = sc
.map(|sc| config.effective_dir_refresh_interval(sc).is_some())
.unwrap_or(false);
let last_dir_refresh_ago = status.dir_refresh_ago_for(&s.name).unwrap_or_default();
// Build per-rule views for this share
let warmup_rules: Vec<WarmupRuleView> = status
@ -176,6 +190,8 @@ fn build_share_detail_views(status: &DaemonStatus, config: &Config) -> Vec<Share
warmup_done,
warmup_total,
warmup_rules,
dir_refresh_active,
last_dir_refresh_ago,
}
})
.collect()

View File

@ -18,6 +18,7 @@ function configEditorFn() {
protocols: false,
smb_auth: false,
warmup: false,
dir_refresh: false,
},
init() {
@ -41,6 +42,9 @@ function configEditorFn() {
for (const rule of config.warmup.rules) {
if (rule.newer_than == null) rule.newer_than = '';
}
for (const share of config.shares) {
if (share.dir_refresh_interval == null) share.dir_refresh_interval = '';
}
return config;
},
@ -56,6 +60,9 @@ function configEditorFn() {
for (const rule of c.warmup.rules) {
if (!rule.newer_than) rule.newer_than = null;
}
for (const share of c.shares) {
if (!share.dir_refresh_interval) share.dir_refresh_interval = null;
}
return c;
},
@ -73,7 +80,8 @@ function configEditorFn() {
connection: this.config.connections[0]?.name || '',
remote_path: '/',
mount_point: '/mnt/',
read_only: false
read_only: false,
dir_refresh_interval: ''
});
},
@ -225,6 +233,10 @@ if (window.Alpine) {
Read Only
</label>
</div>
<div class="field-row" style="margin-top:12px">
<label>Dir Refresh Interval</label>
<input type="text" x-model="share.dir_refresh_interval" placeholder='blank = global, "0" = disable, e.g. 10m' style="max-width:320px">
</div>
</div>
</template>
<button type="button" @click="addShare()" class="add-btn">+ Add Share</button>
@ -463,6 +475,42 @@ if (window.Alpine) {
</div>
</section>
<!-- ═══ Section: Dir Refresh ═══ -->
<section class="config-section">
<div class="section-header" @click="sections.dir_refresh = !sections.dir_refresh">
<h3>Dir Refresh <span class="tier-badge tier-none">No restart</span></h3>
<span class="chevron" x-text="sections.dir_refresh ? '▾' : '▸'"></span>
</div>
<div class="section-body" x-show="sections.dir_refresh" x-transition>
<div class="field-row">
<label class="toggle">
<input type="checkbox" x-model="config.dir_refresh.enabled">
<span class="slider"></span>
Enable Periodic Directory Refresh
</label>
</div>
<div x-show="config.dir_refresh.enabled" x-transition>
<div class="field-grid" style="margin-top:12px">
<div class="field-row">
<label>Interval</label>
<input type="text" x-model="config.dir_refresh.interval" placeholder="e.g. 30m, 1h" style="max-width:300px">
</div>
</div>
<div class="field-row" style="margin-top:12px">
<label class="toggle">
<input type="checkbox" x-model="config.dir_refresh.recursive">
<span class="slider"></span>
Recursive (include subdirectories)
</label>
</div>
</div>
<p style="font-size:0.82em;color:var(--text-muted);margin-top:10px">
Proactively refreshes directory listings so Finder/Explorer sees new files without waiting for cache expiry.
Per-share overrides can be set in the Shares section above.
</p>
</div>
</section>
<!-- ═══ Form Actions ═══ -->
<div class="form-actions" style="margin-top:24px">
<button type="button" @click="submitConfig()" class="btn btn-primary" :disabled="submitting">

View File

@ -35,6 +35,13 @@
{% elif share.warmup_state == "complete" %}
<span class="badge badge-ok">WARMED</span>
{% endif %}
{% if share.dir_refresh_active %}
{% if share.last_dir_refresh_ago.is_empty() %}
<span class="badge badge-warn">DIR-REFRESH...</span>
{% else %}
<span class="badge badge-ok">DIR-REFRESH {{ share.last_dir_refresh_ago }}</span>
{% endif %}
{% endif %}
</td>
<td class="mono">{{ share.mount_point }}</td>
<td>{{ share.cache_display }}</td>
@ -76,6 +83,9 @@
<tr><td>Total Errors</td><td>{{ share.errors }}</td></tr>
<tr><td>Mounted</td><td>{% if share.mounted %}Yes{% else %}No{% endif %}</td></tr>
<tr><td>Read-Only</td><td>{% if share.read_only %}Yes{% else %}No{% endif %}</td></tr>
{% if share.dir_refresh_active %}
<tr><td>Dir Refresh</td><td>{% if share.last_dir_refresh_ago.is_empty() %}pending{% else %}{{ share.last_dir_refresh_ago }}{% endif %}</td></tr>
{% endif %}
</table>
{% if !share.warmup_rules.is_empty() %}
<h4 style="margin-top:1rem;margin-bottom:0.5rem;font-size:0.95em">Warmup Rules</h4>