Add pre-mount remote path probe and per-share health status

Before mounting, probe each share's remote path with `rclone lsf`
(10s timeout, parallel execution). Failed shares are skipped — they
never get mounted or exposed to SMB/NFS/WebDAV — preventing the
silent hang that occurred when rclone mounted a nonexistent directory.

- ShareHealth enum: Pending → Probing → Healthy / Failed(reason)
- Supervisor: probe phase between preflight and mount, protocol
  configs generated after probe with only healthy shares
- Web UI: health-aware badges (OK/FAILED/PROBING/PENDING) with
  error messages on dashboard, status partial, and share detail
- JSON API: health + health_message fields on /api/status
- CLI: `warpgate status` queries daemon API first for tri-state
  display (OK/FAILED/DOWN), falls back to direct mount checks

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
grabbit 2026-02-18 15:28:56 +08:00
parent ba1cae7f75
commit 466ea5cfa8
10 changed files with 541 additions and 48 deletions

View File

@ -1,18 +1,130 @@
//! `warpgate status` — show service status, cache stats, write-back queue, bandwidth.
//!
//! Prefers querying the daemon's web API for complete health information.
//! Falls back to direct mount/RC checks when the daemon API is unreachable.
use anyhow::Result;
use serde::Deserialize;
use crate::config::Config;
use crate::daemon::DEFAULT_WEB_PORT;
use crate::rclone::{mount, rc};
/// JSON response from GET /api/status.
#[derive(Deserialize)]
struct ApiStatus {
shares: Vec<ApiShare>,
}
#[derive(Deserialize)]
struct ApiShare {
name: String,
mounted: bool,
health: String,
health_message: Option<String>,
cache_bytes: u64,
dirty_count: u64,
errored_files: u64,
speed: f64,
transfers: u64,
errors: u64,
}
pub fn run(config: &Config) -> Result<()> {
// Check mount status for each share
// Try daemon API first for full health information
if let Some(api) = try_daemon_api() {
return print_api_status(&api);
}
// Fallback: direct mount/RC checks (daemon not running)
print_direct_status(config)
}
/// Try to reach the daemon's web API.
fn try_daemon_api() -> Option<ApiStatus> {
let url = format!("http://127.0.0.1:{DEFAULT_WEB_PORT}/api/status");
let resp = ureq::get(&url).call().ok()?;
resp.into_body().read_json().ok()
}
/// Print status using daemon API response (includes health info).
fn print_api_status(api: &ApiStatus) -> Result<()> {
let mut any_active = false;
for share in &api.shares {
match share.health.as_str() {
"OK" => {
if share.mounted {
println!("Mount: OK {}", share.name);
any_active = true;
} else {
println!("Mount: DOWN {} — mount lost", share.name);
any_active = true;
}
}
"FAILED" => {
let msg = share
.health_message
.as_deref()
.unwrap_or("probe failed");
println!("Mount: FAILED {}{}", share.name, msg);
}
"PROBING" => {
println!("Mount: PROBING {}", share.name);
}
_ => {
println!("Mount: PENDING {}", share.name);
}
}
}
if !any_active {
println!("\nNo healthy mounts are active.");
return Ok(());
}
// Aggregate stats from API response
let mut total_speed = 0.0f64;
let mut total_cache = 0u64;
let mut total_dirty = 0u64;
let mut total_transfers = 0u64;
let mut total_errors = 0u64;
let mut total_errored = 0u64;
for share in &api.shares {
if share.health == "OK" {
total_speed += share.speed;
total_cache += share.cache_bytes;
total_dirty += share.dirty_count;
total_transfers += share.transfers;
total_errors += share.errors;
total_errored += share.errored_files;
}
}
println!("Speed: {}/s", format_bytes(total_speed as u64));
println!("Active: {} transfers", total_transfers);
println!("Errors: {}", total_errors);
println!("Cache: {}", format_bytes(total_cache));
println!("Dirty: {}", total_dirty);
if total_errored > 0 {
println!("Errored: {} files", total_errored);
}
Ok(())
}
/// Fallback: check mounts and RC API directly (no daemon API).
fn print_direct_status(config: &Config) -> Result<()> {
let mut any_mounted = false;
for share in &config.shares {
let mounted = match mount::is_mounted(&share.mount_point) {
Ok(m) => m,
Err(e) => {
eprintln!("Warning: could not check mount for '{}': {}", share.name, e);
eprintln!(
"Warning: could not check mount for '{}': {}",
share.name, e
);
false
}
};
@ -20,14 +132,14 @@ pub fn run(config: &Config) -> Result<()> {
let ro_tag = if share.read_only { " (ro)" } else { "" };
if mounted {
println!(
"Mount: UP {} → {}{}",
share.mount_point.display(),
"Mount: OK {} → {}{}",
share.name,
share.mount_point.display(),
ro_tag
);
any_mounted = true;
} else {
println!("Mount: DOWN {}{}", share.name, ro_tag);
println!("Mount: DOWN {}{}", share.name, ro_tag);
}
}
@ -68,17 +180,17 @@ pub fn run(config: &Config) -> Result<()> {
}
if rc_reachable {
println!("Speed: {}/s", format_bytes(total_speed as u64));
println!("Moved: {}", format_bytes(total_bytes));
println!("Active: {} transfers", total_transfers);
println!("Errors: {}", total_errors);
println!("Cache: {}", format_bytes(total_cache_used));
println!("Speed: {}/s", format_bytes(total_speed as u64));
println!("Moved: {}", format_bytes(total_bytes));
println!("Active: {} transfers", total_transfers);
println!("Errors: {}", total_errors);
println!("Cache: {}", format_bytes(total_cache_used));
println!(
"Dirty: {} uploading, {} queued",
"Dirty: {} uploading, {} queued",
total_uploading, total_queued
);
if total_errored > 0 {
println!("Errored: {} files", total_errored);
println!("Errored: {} files", total_errored);
}
} else {
eprintln!("Could not reach any rclone RC API.");

View File

@ -56,6 +56,7 @@ impl DaemonStatus {
speed: 0.0,
transfers: 0,
errors: 0,
health: ShareHealth::Pending,
})
.collect(),
smbd_running: false,
@ -100,6 +101,8 @@ pub struct ShareStatus {
pub transfers: u64,
/// Cumulative error count.
pub errors: u64,
/// Pre-mount probe result.
pub health: ShareHealth,
}
impl ShareStatus {
@ -116,6 +119,29 @@ impl ShareStatus {
format!("{}/s", format_bytes(self.speed as u64))
}
}
/// Human-readable health label: "PENDING", "PROBING", "OK", or "FAILED".
pub fn health_label(&self) -> &str {
match &self.health {
ShareHealth::Pending => "PENDING",
ShareHealth::Probing => "PROBING",
ShareHealth::Healthy => "OK",
ShareHealth::Failed(_) => "FAILED",
}
}
/// Error message when health is Failed, None otherwise.
pub fn health_message(&self) -> Option<&str> {
match &self.health {
ShareHealth::Failed(msg) => Some(msg),
_ => None,
}
}
/// Whether the share is healthy (probe succeeded).
pub fn is_healthy(&self) -> bool {
self.health == ShareHealth::Healthy
}
}
/// Format bytes as human-readable (e.g. "45.2 GiB").
@ -139,6 +165,19 @@ fn format_bytes(bytes: u64) -> String {
}
}
/// Per-share health state from pre-mount probing.
#[derive(Clone, Debug, PartialEq)]
pub enum ShareHealth {
/// Not yet probed (initial state).
Pending,
/// Probe in progress.
Probing,
/// Remote path verified, ready to mount.
Healthy,
/// Probe failed — share will not be mounted.
Failed(String),
}
/// Commands sent from the web server (or CLI) to the supervisor.
pub enum SupervisorCmd {
/// Apply a new configuration (triggers tiered reload).
@ -195,6 +234,7 @@ mod tests {
speed: 2_200_000.0,
transfers: 2,
errors: 0,
health: ShareHealth::Healthy,
};
assert!(share.cache_display().contains("GiB"));
assert!(share.speed_display().contains("/s"));
@ -212,7 +252,39 @@ mod tests {
speed: 0.0,
transfers: 0,
errors: 0,
health: ShareHealth::Pending,
};
assert_eq!(share.speed_display(), "-");
}
#[test]
fn test_share_health_labels() {
let mut share = ShareStatus {
name: "test".into(),
mounted: false,
rc_port: 0,
cache_bytes: 0,
dirty_count: 0,
errored_files: 0,
speed: 0.0,
transfers: 0,
errors: 0,
health: ShareHealth::Pending,
};
assert_eq!(share.health_label(), "PENDING");
assert!(share.health_message().is_none());
assert!(!share.is_healthy());
share.health = ShareHealth::Probing;
assert_eq!(share.health_label(), "PROBING");
share.health = ShareHealth::Healthy;
assert_eq!(share.health_label(), "OK");
assert!(share.is_healthy());
share.health = ShareHealth::Failed("remote path not found".into());
assert_eq!(share.health_label(), "FAILED");
assert_eq!(share.health_message(), Some("remote path not found"));
assert!(!share.is_healthy());
}
}

View File

@ -1,3 +1,4 @@
pub mod config;
pub mod mount;
pub mod probe;
pub mod rc;

156
src/rclone/probe.rs Normal file
View File

@ -0,0 +1,156 @@
//! Pre-mount remote path probing using `rclone lsf`.
//!
//! Before mounting, we verify that each share's remote path actually exists.
//! This prevents rclone from mounting a FUSE filesystem that silently fails
//! when clients try to access it.
use std::process::Command;
use std::time::Duration;
use anyhow::{Context, Result};
use crate::config::{Config, ShareConfig};
use crate::rclone::config::RCLONE_CONF_PATH;
/// Probe timeout per share.
const PROBE_TIMEOUT: Duration = Duration::from_secs(10);
/// Probe whether a remote path exists using `rclone lsf`.
///
/// Runs: `rclone lsf <connection>:<remote_path> --max-depth 1 --config <rclone.conf>`
///
/// Returns `Ok(())` if the directory exists, `Err` with a descriptive message if not.
pub fn probe_remote_path(_config: &Config, share: &ShareConfig) -> Result<()> {
let remote = format!("{}:{}", share.connection, share.remote_path);
let mut child = Command::new("rclone")
.args([
"lsf",
&remote,
"--max-depth",
"1",
"--config",
RCLONE_CONF_PATH,
])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::piped())
.spawn()
.with_context(|| format!("Failed to spawn rclone lsf for '{}'", share.name))?;
// Wait with timeout using a polling loop
let deadline = std::time::Instant::now() + PROBE_TIMEOUT;
loop {
match child.try_wait() {
Ok(Some(status)) => {
if status.success() {
return Ok(());
}
// Non-zero exit: extract stderr for error message
let stderr = if let Some(mut err) = child.stderr.take() {
let mut buf = String::new();
std::io::Read::read_to_string(&mut err, &mut buf).unwrap_or(0);
buf
} else {
String::new()
};
let msg = stderr.trim();
if msg.is_empty() {
anyhow::bail!(
"remote path not found: {} (exit code {})",
share.remote_path,
status.code().unwrap_or(-1)
);
} else {
anyhow::bail!("{}", extract_rclone_error(msg));
}
}
Ok(None) => {
// Still running — check timeout
if std::time::Instant::now() > deadline {
let _ = child.kill();
let _ = child.wait();
anyhow::bail!(
"probe timed out after {}s for path: {}",
PROBE_TIMEOUT.as_secs(),
share.remote_path
);
}
std::thread::sleep(Duration::from_millis(100));
}
Err(e) => {
anyhow::bail!("failed to check rclone lsf status: {e}");
}
}
}
}
/// Extract the most useful part of rclone's error output.
///
/// rclone stderr often contains timestamps and log levels; we strip those
/// to get a cleaner message for the user.
fn extract_rclone_error(stderr: &str) -> String {
// rclone errors look like:
// 2024/01/01 12:00:00 ERROR : : error listing: directory not found
// We want just the error message part after "ERROR : : ".
for line in stderr.lines().rev() {
if let Some(pos) = line.find("ERROR") {
let after_error = &line[pos + 5..]; // skip "ERROR"
// Skip " : : " or " : name : " separators
let trimmed = after_error.trim_start_matches(|c: char| c == ' ' || c == ':');
if !trimmed.is_empty() {
return trimmed.to_string();
}
}
}
// Fallback: return last non-empty line
stderr
.lines()
.rev()
.find(|l| !l.trim().is_empty())
.unwrap_or(stderr)
.trim()
.to_string()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_extract_rclone_error_with_error_prefix() {
let stderr = "2024/01/01 12:00:00 ERROR : : error listing: directory not found";
assert_eq!(
extract_rclone_error(stderr),
"error listing: directory not found"
);
}
#[test]
fn test_extract_rclone_error_multiline() {
let stderr = "2024/01/01 12:00:00 NOTICE: some notice\n\
2024/01/01 12:00:01 ERROR : : Failed to ls: directory not found";
assert_eq!(
extract_rclone_error(stderr),
"Failed to ls: directory not found"
);
}
#[test]
fn test_extract_rclone_error_no_error_prefix() {
let stderr = "some unexpected error output\n";
assert_eq!(
extract_rclone_error(stderr),
"some unexpected error output"
);
}
#[test]
fn test_extract_rclone_error_empty() {
assert_eq!(extract_rclone_error(""), "");
}
#[test]
fn test_probe_timeout_is_10s() {
assert_eq!(PROBE_TIMEOUT, Duration::from_secs(10));
}
}

View File

@ -17,7 +17,7 @@ use anyhow::{Context, Result};
use crate::config::Config;
use crate::config_diff::{self, ChangeTier};
use crate::daemon::{DaemonStatus, SupervisorCmd};
use crate::daemon::{DaemonStatus, ShareHealth, SupervisorCmd};
use crate::rclone::mount::{build_mount_args, is_mounted};
use crate::rclone::rc;
use crate::services::{nfs, samba, webdav};
@ -132,22 +132,39 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
}
});
// Phase 1: Preflight — generate configs, create dirs
// Phase 1: Preflight — create dirs, write rclone.conf
println!("Preflight checks...");
preflight(config)?;
// Phase 2: Start rclone mounts (one per share) and wait for all to become ready
// Phase 1.5: Probe remote paths in parallel
println!("Probing remote paths...");
let healthy_names = probe_all_shares(config, &shared_status, &shutdown)?;
if healthy_names.is_empty() {
anyhow::bail!("All shares failed probe — no healthy mounts to start");
}
// Build a config containing only healthy shares for protocol configs
let mut healthy_config = config.clone();
healthy_config
.shares
.retain(|s| healthy_names.contains(&s.name));
// Phase 1.75: Generate protocol configs with only healthy shares
write_protocol_configs(&healthy_config)?;
// Phase 2: Start rclone mounts only for healthy shares
println!("Starting rclone mounts...");
let mut mount_children = start_and_wait_mounts(config, &shutdown)?;
for share in &config.shares {
let mut mount_children = start_and_wait_mounts(&healthy_config, &shutdown)?;
for share in &healthy_config.shares {
println!(" Mount ready at {}", share.mount_point.display());
}
// Update status: mounts are ready
// Update status: mounts are ready (match by name, not index)
{
let mut status = shared_status.write().unwrap();
for (i, mc) in mount_children.iter().enumerate() {
if let Some(ss) = status.shares.get_mut(i) {
for mc in &mount_children {
if let Some(ss) = status.shares.iter_mut().find(|s| s.name == mc.name) {
ss.mounted = true;
ss.rc_port = mc.rc_port;
}
@ -164,14 +181,14 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
return Ok(());
}
println!("Starting protocol services...");
let mut protocols = start_protocols(config)?;
let mut protocols = start_protocols(&healthy_config)?;
// Update status: protocols running
{
let mut status = shared_status.write().unwrap();
status.smbd_running = protocols.smbd.is_some();
status.webdav_running = protocols.webdav.is_some();
status.nfs_exported = config.protocols.enable_nfs;
status.nfs_exported = healthy_config.protocols.enable_nfs;
}
// Phase 3.5: Auto-warmup in background thread (non-blocking)
@ -217,7 +234,7 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
result
}
/// Write configs and create directories.
/// Write rclone config and create directories (protocol configs generated after probe).
fn preflight(config: &Config) -> Result<()> {
// Ensure mount points exist for each share
for share in &config.shares {
@ -240,7 +257,13 @@ fn preflight(config: &Config) -> Result<()> {
// Generate rclone config
crate::rclone::config::write_config(config)?;
// Generate protocol configs
Ok(())
}
/// Generate protocol configs (SMB/NFS) for the given config.
///
/// Called after probing so only healthy shares are included.
fn write_protocol_configs(config: &Config) -> Result<()> {
if config.protocols.enable_smb {
samba::write_config(config)?;
if config.smb_auth.enabled {
@ -250,10 +273,75 @@ fn preflight(config: &Config) -> Result<()> {
if config.protocols.enable_nfs {
nfs::write_config(config)?;
}
Ok(())
}
/// Probe all shares in parallel and return the set of healthy share names.
///
/// Updates `shared_status` with probe results as they complete.
fn probe_all_shares(
config: &Config,
shared_status: &Arc<RwLock<DaemonStatus>>,
shutdown: &AtomicBool,
) -> Result<Vec<String>> {
use std::collections::HashSet;
let shares: Vec<_> = config.shares.clone();
let config_clone = config.clone();
// Mark all shares as Probing
{
let mut status = shared_status.write().unwrap();
for ss in &mut status.shares {
ss.health = ShareHealth::Probing;
}
}
// Spawn one thread per share
let handles: Vec<_> = shares
.into_iter()
.map(|share| {
let cfg = config_clone.clone();
let name = share.name.clone();
thread::spawn(move || {
let result = crate::rclone::probe::probe_remote_path(&cfg, &share);
(name, result)
})
})
.collect();
// Collect results
let mut healthy = HashSet::new();
for handle in handles {
if shutdown.load(Ordering::SeqCst) {
anyhow::bail!("Interrupted during probe");
}
match handle.join() {
Ok((name, Ok(()))) => {
println!(" Probe OK: {name}");
let mut status = shared_status.write().unwrap();
if let Some(ss) = status.shares.iter_mut().find(|s| s.name == name) {
ss.health = ShareHealth::Healthy;
}
healthy.insert(name);
}
Ok((name, Err(e))) => {
let msg = format!("{e}");
eprintln!(" Probe FAILED: {name}{msg}");
let mut status = shared_status.write().unwrap();
if let Some(ss) = status.shares.iter_mut().find(|s| s.name == name) {
ss.health = ShareHealth::Failed(msg);
}
}
Err(_) => {
eprintln!(" Probe thread panicked");
}
}
}
Ok(healthy.into_iter().collect())
}
/// Spawn rclone mount processes for all shares and poll until each FUSE mount appears.
fn start_and_wait_mounts(config: &Config, shutdown: &AtomicBool) -> Result<Vec<MountChild>> {
let mut children = Vec::new();
@ -645,6 +733,15 @@ fn handle_reload(
apply_bwlimit(mounts, &new_config.bandwidth.limit_up, &new_config.bandwidth.limit_down);
}
// Regenerate rclone.conf if connections changed
if !diff.connections_added.is_empty()
|| !diff.connections_removed.is_empty()
|| !diff.connections_modified.is_empty()
{
println!(" Regenerating rclone.conf (connections changed)...");
crate::rclone::config::write_config(&new_config)?;
}
// Handle removed shares: drain → unmount → kill
for name in &diff.shares_removed {
println!(" Removing share '{name}'...");
@ -717,13 +814,24 @@ fn handle_reload(
// Re-preflight with new config
preflight(&new_config)?;
// Re-start mounts
let shutdown = AtomicBool::new(false);
let mut new_mounts = start_and_wait_mounts(&new_config, &shutdown)?;
// Re-probe all shares
let shutdown_flag = AtomicBool::new(false);
let healthy_names =
probe_all_shares(&new_config, shared_status, &shutdown_flag)?;
// Build healthy-only config for mounts and protocols
let mut healthy_config = new_config.clone();
healthy_config
.shares
.retain(|s| healthy_names.contains(&s.name));
write_protocol_configs(&healthy_config)?;
// Re-start mounts (healthy only)
let mut new_mounts = start_and_wait_mounts(&healthy_config, &shutdown_flag)?;
mounts.append(&mut new_mounts);
// Re-start protocols
let new_protocols = start_protocols(&new_config)?;
let new_protocols = start_protocols(&healthy_config)?;
// Replace old protocol children (Drop will handle any leftover)
*protocols = new_protocols;
*smbd_tracker = RestartTracker::new();
@ -757,6 +865,9 @@ fn handle_reload(
speed: existing.map(|e| e.speed).unwrap_or(0.0),
transfers: existing.map(|e| e.transfers).unwrap_or(0),
errors: existing.map(|e| e.errors).unwrap_or(0),
health: existing
.map(|e| e.health.clone())
.unwrap_or(ShareHealth::Pending),
}
})
.collect();

View File

@ -45,6 +45,8 @@ struct ShareStatusResponse {
speed_display: String,
transfers: u64,
errors: u64,
health: String,
health_message: Option<String>,
}
async fn get_status(State(state): State<SharedState>) -> Json<StatusResponse> {
@ -66,6 +68,8 @@ async fn get_status(State(state): State<SharedState>) -> Json<StatusResponse> {
speed_display: s.speed_display(),
transfers: s.transfers,
errors: s.errors,
health: s.health_label().to_string(),
health_message: s.health_message().map(|m| m.to_string()),
})
.collect(),
smbd_running: status.smbd_running,
@ -97,6 +101,8 @@ async fn get_share_status(
speed_display: share.speed_display(),
transfers: share.transfers,
errors: share.errors,
health: share.health_label().to_string(),
health_message: share.health_message().map(|m| m.to_string()),
}))
}

View File

@ -34,18 +34,22 @@ struct DashboardTemplate {
struct ShareView {
name: String,
connection: String,
mount_point: String,
mounted: bool,
cache_display: String,
dirty_count: u64,
speed_display: String,
read_only: bool,
health: String,
health_message: String,
}
#[derive(Template)]
#[template(path = "web/share_detail.html")]
struct ShareDetailTemplate {
name: String,
connection: String,
mount_point: String,
remote_path: String,
mounted: bool,
@ -57,6 +61,8 @@ struct ShareDetailTemplate {
speed_display: String,
transfers: u64,
errors: u64,
health: String,
health_message: String,
}
#[derive(Template)]
@ -88,21 +94,22 @@ async fn dashboard(State(state): State<SharedState>) -> Response {
.shares
.iter()
.map(|s| {
let read_only = config
.find_share(&s.name)
.map(|sc| sc.read_only)
.unwrap_or(false);
let share_config = config.find_share(&s.name);
ShareView {
name: s.name.clone(),
mount_point: config
.find_share(&s.name)
connection: share_config
.map(|sc| sc.connection.clone())
.unwrap_or_default(),
mount_point: share_config
.map(|sc| sc.mount_point.display().to_string())
.unwrap_or_default(),
mounted: s.mounted,
cache_display: s.cache_display(),
dirty_count: s.dirty_count,
speed_display: s.speed_display(),
read_only,
read_only: share_config.map(|sc| sc.read_only).unwrap_or(false),
health: s.health_label().to_string(),
health_message: s.health_message().unwrap_or("").to_string(),
}
})
.collect();
@ -138,6 +145,9 @@ async fn share_detail(
let tmpl = ShareDetailTemplate {
name: share_status.name.clone(),
connection: share_config
.map(|sc| sc.connection.clone())
.unwrap_or_default(),
mount_point: share_config
.map(|sc| sc.mount_point.display().to_string())
.unwrap_or_default(),
@ -153,6 +163,8 @@ async fn share_detail(
speed_display: share_status.speed_display(),
transfers: share_status.transfers,
errors: share_status.errors,
health: share_status.health_label().to_string(),
health_message: share_status.health_message().unwrap_or("").to_string(),
};
match tmpl.render() {
@ -278,21 +290,22 @@ async fn status_partial(State(state): State<SharedState>) -> Response {
.shares
.iter()
.map(|s| {
let read_only = config
.find_share(&s.name)
.map(|sc| sc.read_only)
.unwrap_or(false);
let share_config = config.find_share(&s.name);
ShareView {
name: s.name.clone(),
mount_point: config
.find_share(&s.name)
connection: share_config
.map(|sc| sc.connection.clone())
.unwrap_or_default(),
mount_point: share_config
.map(|sc| sc.mount_point.display().to_string())
.unwrap_or_default(),
mounted: s.mounted,
cache_display: s.cache_display(),
dirty_count: s.dirty_count,
speed_display: s.speed_display(),
read_only,
read_only: share_config.map(|sc| sc.read_only).unwrap_or(false),
health: s.health_label().to_string(),
health_message: s.health_message().unwrap_or("").to_string(),
}
})
.collect();

View File

@ -28,6 +28,8 @@
.badge-ok { background: rgba(74,222,128,0.15); color: var(--green); }
.badge-error { background: rgba(248,113,113,0.15); color: var(--red); }
.badge-ro { background: rgba(251,191,36,0.15); color: var(--yellow); }
.badge-warn { background: rgba(251,191,36,0.15); color: var(--yellow); }
.error-msg { margin-top: 8px; padding: 8px 12px; background: rgba(248,113,113,0.08); border-radius: 4px; color: var(--red); font-size: 0.85em; }
.stats { display: flex; gap: 24px; font-size: 0.9em; color: var(--text-muted); flex-wrap: wrap; }
.stats span { white-space: nowrap; }
.stats .label { color: var(--text-muted); }
@ -55,10 +57,14 @@
<div class="card-header">
<h2><a href="/shares/{{ share.name }}">{{ share.name }}</a></h2>
<div>
{% if share.mounted %}
{% if share.health == "OK" %}
<span class="badge badge-ok">OK</span>
{% elif share.health == "FAILED" %}
<span class="badge badge-error" title="{{ share.health_message }}">FAILED</span>
{% elif share.health == "PROBING" %}
<span class="badge badge-warn">PROBING</span>
{% else %}
<span class="badge badge-error">DOWN</span>
<span class="badge badge-warn">PENDING</span>
{% endif %}
{% if share.read_only %}
<span class="badge badge-ro">RO</span>
@ -71,6 +77,9 @@
<span><span class="label">Dirty:</span> <span class="value">{{ share.dirty_count }}</span></span>
<span><span class="label">Speed:</span> <span class="value">{{ share.speed_display }}</span></span>
</div>
{% if share.health == "FAILED" %}
<div class="error-msg">{{ share.health_message }}</div>
{% endif %}
</div>
{% endfor %}

View File

@ -22,6 +22,8 @@
.badge-ok { background: rgba(74,222,128,0.15); color: var(--green); }
.badge-error { background: rgba(248,113,113,0.15); color: var(--red); }
.badge-ro { background: rgba(251,191,36,0.15); color: var(--yellow); }
.badge-warn { background: rgba(251,191,36,0.15); color: var(--yellow); }
.error-text { color: var(--red); }
.detail-grid { display: grid; grid-template-columns: 1fr 1fr; gap: 12px; margin-bottom: 24px; }
.detail-card { background: var(--surface); border: 1px solid var(--border); border-radius: 8px; padding: 16px; }
.detail-card .label { font-size: 0.8em; color: var(--text-muted); margin-bottom: 4px; }
@ -36,7 +38,7 @@
<h1>
{{ name }}
{% if mounted %}<span class="badge badge-ok">OK</span>{% else %}<span class="badge badge-error">DOWN</span>{% endif %}
{% if health == "OK" %}<span class="badge badge-ok">OK</span>{% elif health == "FAILED" %}<span class="badge badge-error">FAILED</span>{% elif health == "PROBING" %}<span class="badge badge-warn">PROBING</span>{% else %}<span class="badge badge-warn">PENDING</span>{% endif %}
{% if read_only %}<span class="badge badge-ro">Read-Only</span>{% endif %}
</h1>
@ -60,6 +62,10 @@
</div>
<table class="info-table">
<tr><td>Health</td><td>{{ health }}</td></tr>
{% if health == "FAILED" %}
<tr><td>Probe Error</td><td class="error-text">{{ health_message }}</td></tr>
{% endif %}
<tr><td>Mount Point</td><td>{{ mount_point }}</td></tr>
<tr><td>Remote Path</td><td>{{ remote_path }}</td></tr>
<tr><td>RC Port</td><td>{{ rc_port }}</td></tr>

View File

@ -3,10 +3,14 @@
<div class="card-header">
<h2><a href="/shares/{{ share.name }}">{{ share.name }}</a></h2>
<div>
{% if share.mounted %}
{% if share.health == "OK" %}
<span class="badge badge-ok">OK</span>
{% elif share.health == "FAILED" %}
<span class="badge badge-error" title="{{ share.health_message }}">FAILED</span>
{% elif share.health == "PROBING" %}
<span class="badge badge-warn">PROBING</span>
{% else %}
<span class="badge badge-error">DOWN</span>
<span class="badge badge-warn">PENDING</span>
{% endif %}
{% if share.read_only %}
<span class="badge badge-ro">RO</span>
@ -19,6 +23,9 @@
<span><span class="label">Dirty:</span> <span class="value">{{ share.dirty_count }}</span></span>
<span><span class="label">Speed:</span> <span class="value">{{ share.speed_display }}</span></span>
</div>
{% if share.health == "FAILED" %}
<div class="error-msg">{{ share.health_message }}</div>
{% endif %}
</div>
{% endfor %}