warpgate/src/daemon.rs
grabbit 6bb7ec4d27 Web UI overhaul: interactive config editor, SSE live updates, log viewer, and SMB reload fixes
- Replace raw TOML textarea with Alpine.js interactive form editor (10 collapsible
  sections with change-tier badges, dynamic array management for connections/shares/
  warmup rules, proper input controls per field type)
- Add SSE-based live dashboard updates replacing htmx polling
- Add log viewer tab with ring buffer backend and incremental polling
- Fix SMB not seeing new shares after config reload: kill entire smbd process group
  (not just parent PID) so forked workers release port 445
- Add SIGHUP-based smbd config reload for share changes instead of full restart,
  preserving existing client connections
- Generate human-readable commented TOML from config editor instead of bare
  toml::to_string_pretty() output
- Fix Alpine.js 2.x __x.$data calls in dashboard/share templates (now Alpine 3.x)
- Fix toggle switch CSS overlap with field labels
- Fix dashboard going blank on tab switch (remove hx-swap-oob from tab content)
- Add htmx:afterSettle → Alpine.initTree() bridge for robust tab switching

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-18 18:06:52 +08:00

356 lines
10 KiB
Rust

//! Shared state and command types for the daemon (supervisor + web server).
//!
//! The supervisor owns all mutable state. The web server gets read-only access
//! to status via `Arc<RwLock<DaemonStatus>>` and sends commands via an mpsc channel.
use std::collections::VecDeque;
use std::path::PathBuf;
use std::sync::mpsc;
use std::sync::{Arc, RwLock};
use std::time::{Instant, SystemTime, UNIX_EPOCH};
use crate::config::Config;
/// Default port for the built-in web UI / API server.
pub const DEFAULT_WEB_PORT: u16 = 8090;
/// Shared application state accessible by both the supervisor and web server.
pub struct AppState {
/// Current active configuration (read by UI, updated on reload).
pub config: Arc<RwLock<Config>>,
/// Live daemon status (updated by supervisor every poll cycle).
pub status: Arc<RwLock<DaemonStatus>>,
/// Command channel: web server → supervisor.
pub cmd_tx: mpsc::Sender<SupervisorCmd>,
/// Path to the config file on disk.
pub config_path: PathBuf,
/// SSE broadcast: supervisor sends `()` after each status update;
/// web server subscribers render partials and push to connected clients.
pub sse_tx: tokio::sync::broadcast::Sender<()>,
/// Ring buffer of log entries for the web UI.
pub logs: Arc<RwLock<LogBuffer>>,
}
/// Ring buffer of timestamped log entries for the web log viewer.
pub struct LogBuffer {
entries: VecDeque<LogEntry>,
/// Monotonically increasing ID for the next entry.
next_id: u64,
}
/// A single log entry with unix timestamp and message.
#[derive(Clone, serde::Serialize)]
pub struct LogEntry {
pub id: u64,
pub ts: u64,
pub msg: String,
}
const LOG_BUFFER_MAX: usize = 500;
impl LogBuffer {
pub fn new() -> Self {
Self {
entries: VecDeque::new(),
next_id: 0,
}
}
/// Push a new log message. Timestamps are added automatically.
pub fn push(&mut self, msg: impl Into<String>) {
let ts = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
self.entries.push_back(LogEntry {
id: self.next_id,
ts,
msg: msg.into(),
});
self.next_id += 1;
if self.entries.len() > LOG_BUFFER_MAX {
self.entries.pop_front();
}
}
/// Get entries with ID >= `since_id`.
pub fn since(&self, since_id: u64) -> Vec<LogEntry> {
let start_id = self.next_id.saturating_sub(self.entries.len() as u64);
let skip = if since_id > start_id {
(since_id - start_id) as usize
} else {
0
};
self.entries.iter().skip(skip).cloned().collect()
}
/// The ID that the next pushed entry will have.
pub fn next_id(&self) -> u64 {
self.next_id
}
}
/// Overall daemon status, updated by the supervisor loop.
pub struct DaemonStatus {
/// When the daemon started.
pub started_at: Instant,
/// Per-share mount status.
pub shares: Vec<ShareStatus>,
/// Whether smbd is running.
pub smbd_running: bool,
/// Whether WebDAV is running.
pub webdav_running: bool,
/// Whether NFS exports are active.
pub nfs_exported: bool,
}
impl DaemonStatus {
/// Create initial status for a set of share names.
pub fn new(share_names: &[String]) -> Self {
Self {
started_at: Instant::now(),
shares: share_names
.iter()
.map(|name| ShareStatus {
name: name.clone(),
mounted: false,
rc_port: 0,
cache_bytes: 0,
dirty_count: 0,
errored_files: 0,
speed: 0.0,
transfers: 0,
errors: 0,
health: ShareHealth::Pending,
})
.collect(),
smbd_running: false,
webdav_running: false,
nfs_exported: false,
}
}
/// Format uptime as a human-readable string.
pub fn uptime_string(&self) -> String {
let secs = self.started_at.elapsed().as_secs();
let days = secs / 86400;
let hours = (secs % 86400) / 3600;
let mins = (secs % 3600) / 60;
if days > 0 {
format!("{days}d {hours}h {mins}m")
} else if hours > 0 {
format!("{hours}h {mins}m")
} else {
format!("{mins}m")
}
}
}
/// Per-share runtime status.
pub struct ShareStatus {
/// Share name (matches ShareConfig.name).
pub name: String,
/// Whether the FUSE mount is active.
pub mounted: bool,
/// RC API port for this share's rclone instance.
pub rc_port: u16,
/// Bytes used in VFS disk cache.
pub cache_bytes: u64,
/// Number of dirty files (uploads_in_progress + uploads_queued).
pub dirty_count: u64,
/// Number of errored cache files.
pub errored_files: u64,
/// Current transfer speed (bytes/sec from core/stats).
pub speed: f64,
/// Active transfer count.
pub transfers: u64,
/// Cumulative error count.
pub errors: u64,
/// Pre-mount probe result.
pub health: ShareHealth,
}
impl ShareStatus {
/// Format cache size as human-readable string.
pub fn cache_display(&self) -> String {
format_bytes(self.cache_bytes)
}
/// Format speed as human-readable string.
pub fn speed_display(&self) -> String {
if self.speed < 1.0 {
"-".to_string()
} else {
format!("{}/s", format_bytes(self.speed as u64))
}
}
/// Human-readable health label: "PENDING", "PROBING", "OK", or "FAILED".
pub fn health_label(&self) -> &str {
match &self.health {
ShareHealth::Pending => "PENDING",
ShareHealth::Probing => "PROBING",
ShareHealth::Healthy => "OK",
ShareHealth::Failed(_) => "FAILED",
}
}
/// Error message when health is Failed, None otherwise.
pub fn health_message(&self) -> Option<&str> {
match &self.health {
ShareHealth::Failed(msg) => Some(msg),
_ => None,
}
}
/// Whether the share is healthy (probe succeeded).
pub fn is_healthy(&self) -> bool {
self.health == ShareHealth::Healthy
}
}
/// Format bytes as human-readable (e.g. "45.2 GiB").
fn format_bytes(bytes: u64) -> String {
const KIB: f64 = 1024.0;
const MIB: f64 = KIB * 1024.0;
const GIB: f64 = MIB * 1024.0;
const TIB: f64 = GIB * 1024.0;
let b = bytes as f64;
if b >= TIB {
format!("{:.1} TiB", b / TIB)
} else if b >= GIB {
format!("{:.1} GiB", b / GIB)
} else if b >= MIB {
format!("{:.1} MiB", b / MIB)
} else if b >= KIB {
format!("{:.1} KiB", b / KIB)
} else {
format!("{bytes} B")
}
}
/// Per-share health state from pre-mount probing.
#[derive(Clone, Debug, PartialEq)]
pub enum ShareHealth {
/// Not yet probed (initial state).
Pending,
/// Probe in progress.
Probing,
/// Remote path verified, ready to mount.
Healthy,
/// Probe failed — share will not be mounted.
Failed(String),
}
/// Commands sent from the web server (or CLI) to the supervisor.
pub enum SupervisorCmd {
/// Apply a new configuration (triggers tiered reload).
Reload(Config),
/// Graceful shutdown.
Shutdown,
/// Live bandwidth adjustment (Tier A — no restart needed).
BwLimit { up: String, down: String },
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_format_bytes() {
assert_eq!(format_bytes(0), "0 B");
assert_eq!(format_bytes(500), "500 B");
assert_eq!(format_bytes(1024), "1.0 KiB");
assert_eq!(format_bytes(1536), "1.5 KiB");
assert_eq!(format_bytes(1048576), "1.0 MiB");
assert_eq!(format_bytes(1073741824), "1.0 GiB");
assert_eq!(format_bytes(1099511627776), "1.0 TiB");
}
#[test]
fn test_daemon_status_new() {
let names = vec!["photos".to_string(), "projects".to_string()];
let status = DaemonStatus::new(&names);
assert_eq!(status.shares.len(), 2);
assert_eq!(status.shares[0].name, "photos");
assert_eq!(status.shares[1].name, "projects");
assert!(!status.smbd_running);
assert!(!status.webdav_running);
assert!(!status.nfs_exported);
}
#[test]
fn test_uptime_string() {
let status = DaemonStatus::new(&[]);
let uptime = status.uptime_string();
assert!(uptime.contains('m'));
}
#[test]
fn test_share_status_display() {
let share = ShareStatus {
name: "test".into(),
mounted: true,
rc_port: 5572,
cache_bytes: 48_578_891_776, // ~45.2 GiB
dirty_count: 3,
errored_files: 0,
speed: 2_200_000.0,
transfers: 2,
errors: 0,
health: ShareHealth::Healthy,
};
assert!(share.cache_display().contains("GiB"));
assert!(share.speed_display().contains("/s"));
}
#[test]
fn test_share_status_no_speed() {
let share = ShareStatus {
name: "test".into(),
mounted: true,
rc_port: 5572,
cache_bytes: 0,
dirty_count: 0,
errored_files: 0,
speed: 0.0,
transfers: 0,
errors: 0,
health: ShareHealth::Pending,
};
assert_eq!(share.speed_display(), "-");
}
#[test]
fn test_share_health_labels() {
let mut share = ShareStatus {
name: "test".into(),
mounted: false,
rc_port: 0,
cache_bytes: 0,
dirty_count: 0,
errored_files: 0,
speed: 0.0,
transfers: 0,
errors: 0,
health: ShareHealth::Pending,
};
assert_eq!(share.health_label(), "PENDING");
assert!(share.health_message().is_none());
assert!(!share.is_healthy());
share.health = ShareHealth::Probing;
assert_eq!(share.health_label(), "PROBING");
share.health = ShareHealth::Healthy;
assert_eq!(share.health_label(), "OK");
assert!(share.is_healthy());
share.health = ShareHealth::Failed("remote path not found".into());
assert_eq!(share.health_label(), "FAILED");
assert_eq!(share.health_message(), Some("remote path not found"));
assert!(!share.is_healthy());
}
}