From 64d6171ec976e3662fedd440c938d0ccf1d69fb1 Mon Sep 17 00:00:00 2001 From: grabbit Date: Thu, 19 Feb 2026 11:24:06 +0800 Subject: [PATCH] Unify logging to tracing: file appender + unified log viewer Replace scattered println!/eprintln! with structured tracing macros throughout supervisor, scheduler, and web modules. Add LogConfig (file + level) to Config and a new logging module that initialises a stderr + optional non-blocking file appender on `warpgate run`. Remove the in-memory LogBuffer/LogEntry from AppState; the web /api/logs endpoint now reads the log file directly with from_line/lines pagination. `warpgate log` replaces journalctl with `tail`, and the Logs tab Alpine.js is updated to match the new API response shape. Co-Authored-By: Claude Sonnet 4.6 --- Cargo.lock | 146 +++++++++++++++++++++++++++++++++++ Cargo.toml | 3 + src/cli/log.rs | 35 ++++----- src/config.rs | 37 +++++++++ src/daemon.rs | 65 +--------------- src/logging.rs | 66 ++++++++++++++++ src/main.rs | 33 +++++--- src/scheduler.rs | 4 +- src/supervisor.rs | 130 +++++++++++++++---------------- src/web/api.rs | 53 +++++++++++-- src/web/mod.rs | 12 +-- src/web/pages.rs | 5 +- templates/web/tabs/logs.html | 16 ++-- 13 files changed, 413 insertions(+), 192 deletions(-) create mode 100644 src/logging.rs diff --git a/Cargo.lock b/Cargo.lock index a75afc0..8acffa8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,6 +8,15 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + [[package]] name = "anstream" version = "0.6.21" @@ -316,6 +325,21 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + [[package]] name = "ctrlc" version = "3.5.2" @@ -666,6 +690,12 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + [[package]] name = "libc" version = "0.2.180" @@ -690,6 +720,15 @@ version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + [[package]] name = "matchit" version = "0.8.4" @@ -741,6 +780,15 @@ dependencies = [ "libc", ] +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + [[package]] name = "num-conv" version = "0.1.0" @@ -825,6 +873,23 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "regex-automata" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" + [[package]] name = "ring" version = "0.17.14" @@ -961,6 +1026,15 @@ dependencies = [ "serde", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "1.3.0" @@ -1061,6 +1135,15 @@ dependencies = [ "syn", ] +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + [[package]] name = "time" version = "0.3.44" @@ -1241,9 +1324,33 @@ checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-appender" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf" +dependencies = [ + "crossbeam-channel", + "thiserror", + "time", + "tracing-subscriber", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tracing-core" version = "0.1.36" @@ -1251,6 +1358,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", ] [[package]] @@ -1327,6 +1464,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + [[package]] name = "version_check" version = "0.9.5" @@ -1350,6 +1493,9 @@ dependencies = [ "tokio-stream", "toml", "tower-http", + "tracing", + "tracing-appender", + "tracing-subscriber", "ureq", ] diff --git a/Cargo.toml b/Cargo.toml index 4395abf..74b628f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,3 +18,6 @@ tokio-stream = { version = "0.1", features = ["sync"] } axum = "0.8" askama = "0.15" tower-http = { version = "0.6", features = ["cors"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +tracing-appender = "0.2" diff --git a/src/cli/log.rs b/src/cli/log.rs index 9b05ae5..587e144 100644 --- a/src/cli/log.rs +++ b/src/cli/log.rs @@ -1,4 +1,4 @@ -//! `warpgate log` — stream service logs in real time. +//! `warpgate log` — stream service logs from the configured log file. use std::process::Command; @@ -6,27 +6,24 @@ use anyhow::{Context, Result}; use crate::config::Config; -pub fn run(_config: &Config, lines: u32, follow: bool) -> Result<()> { - let mut cmd = Command::new("journalctl"); - cmd.arg("-u") - .arg("warpgate-mount") - .arg("-n") - .arg(lines.to_string()); +pub fn run(config: &Config, lines: u32, follow: bool) -> Result<()> { + let log_file = &config.log.file; + if log_file.is_empty() { + anyhow::bail!( + "No log file configured. Set [log] file = \"/var/log/warpgate/warpgate.log\" in config." + ); + } if follow { - // Stream directly to stdout with -f (like tail -f) - cmd.arg("-f"); - let status = cmd.status().context("Failed to run journalctl")?; - if !status.success() { - anyhow::bail!("journalctl exited with status {}", status); - } + Command::new("tail") + .args(["-f", "-n", &lines.to_string(), log_file]) + .status() + .context("Failed to run tail -f")?; } else { - let output = cmd.output().context("Failed to run journalctl")?; - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - anyhow::bail!("journalctl failed: {}", stderr.trim()); - } - print!("{}", String::from_utf8_lossy(&output.stdout)); + Command::new("tail") + .args(["-n", &lines.to_string(), log_file]) + .status() + .context("Failed to run tail")?; } Ok(()) diff --git a/src/config.rs b/src/config.rs index 0468cf5..44b2104 100644 --- a/src/config.rs +++ b/src/config.rs @@ -31,9 +31,39 @@ pub struct Config { pub smb_auth: SmbAuthConfig, #[serde(default)] pub dir_refresh: DirRefreshConfig, + #[serde(default)] + pub log: LogConfig, pub shares: Vec, } +/// Logging configuration. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct LogConfig { + /// Log file path. Empty string = no file logging. + #[serde(default = "default_log_file")] + pub file: String, + /// Minimum log level: error / warn / info / debug / trace. + #[serde(default = "default_log_level")] + pub level: String, +} + +impl Default for LogConfig { + fn default() -> Self { + Self { + file: default_log_file(), + level: default_log_level(), + } + } +} + +fn default_log_file() -> String { + "/var/log/warpgate/warpgate.log".into() +} + +fn default_log_level() -> String { + "info".into() +} + /// SFTP connection to a remote NAS. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct ConnectionConfig { @@ -457,6 +487,13 @@ impl Config { writeln!(out).unwrap(); } + // --- Log --- + writeln!(out, "# --- Log ---").unwrap(); + writeln!(out, "[log]").unwrap(); + writeln!(out, "file = {:?}", self.log.file).unwrap(); + writeln!(out, "level = {:?}", self.log.level).unwrap(); + writeln!(out).unwrap(); + // --- Warmup --- writeln!(out, "# --- Warmup (change = no restart) ---").unwrap(); writeln!(out, "[warmup]").unwrap(); diff --git a/src/daemon.rs b/src/daemon.rs index e6898a1..ba48445 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -3,12 +3,12 @@ //! The supervisor owns all mutable state. The web server gets read-only access //! to status via `Arc>` and sends commands via an mpsc channel. -use std::collections::{HashMap, VecDeque}; +use std::collections::HashMap; use std::path::PathBuf; use std::sync::atomic::AtomicU64; use std::sync::mpsc; use std::sync::{Arc, RwLock}; -use std::time::{Instant, SystemTime, UNIX_EPOCH}; +use std::time::{Instant, SystemTime}; use crate::config::Config; @@ -28,67 +28,6 @@ pub struct AppState { /// SSE broadcast: supervisor sends `()` after each status update; /// web server subscribers render partials and push to connected clients. pub sse_tx: tokio::sync::broadcast::Sender<()>, - /// Ring buffer of log entries for the web UI. - pub logs: Arc>, -} - -/// Ring buffer of timestamped log entries for the web log viewer. -pub struct LogBuffer { - entries: VecDeque, - /// Monotonically increasing ID for the next entry. - next_id: u64, -} - -/// A single log entry with unix timestamp and message. -#[derive(Clone, serde::Serialize)] -pub struct LogEntry { - pub id: u64, - pub ts: u64, - pub msg: String, -} - -const LOG_BUFFER_MAX: usize = 500; - -impl LogBuffer { - pub fn new() -> Self { - Self { - entries: VecDeque::new(), - next_id: 0, - } - } - - /// Push a new log message. Timestamps are added automatically. - pub fn push(&mut self, msg: impl Into) { - let ts = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_default() - .as_secs(); - self.entries.push_back(LogEntry { - id: self.next_id, - ts, - msg: msg.into(), - }); - self.next_id += 1; - if self.entries.len() > LOG_BUFFER_MAX { - self.entries.pop_front(); - } - } - - /// Get entries with ID >= `since_id`. - pub fn since(&self, since_id: u64) -> Vec { - let start_id = self.next_id.saturating_sub(self.entries.len() as u64); - let skip = if since_id > start_id { - (since_id - start_id) as usize - } else { - 0 - }; - self.entries.iter().skip(skip).cloned().collect() - } - - /// The ID that the next pushed entry will have. - pub fn next_id(&self) -> u64 { - self.next_id - } } /// Overall daemon status, updated by the supervisor loop. diff --git a/src/logging.rs b/src/logging.rs new file mode 100644 index 0000000..f52b68d --- /dev/null +++ b/src/logging.rs @@ -0,0 +1,66 @@ +//! Unified logging initializer. +//! +//! Configures `tracing` with both a stderr console layer and an optional +//! non-blocking file appender. The returned `WorkerGuard` must be kept alive +//! for the duration of the process — dropping it flushes and closes the file. + +use tracing_appender::non_blocking::WorkerGuard; + +/// Initialize tracing for the `run` command (console + optional file). +/// +/// Returns a `WorkerGuard` when file logging is active. The caller must hold +/// this value until the process exits so the background writer thread can flush. +pub fn init(log_config: &crate::config::LogConfig) -> Option { + use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; + + let filter = EnvFilter::try_new(&log_config.level).unwrap_or_else(|_| EnvFilter::new("info")); + + let console_layer = fmt::layer().with_target(false).compact(); + + if log_config.file.is_empty() { + tracing_subscriber::registry() + .with(filter) + .with(console_layer) + .init(); + return None; + } + + // Ensure log directory exists. + let log_path = std::path::Path::new(&log_config.file); + if let Some(dir) = log_path.parent() { + let _ = std::fs::create_dir_all(dir); + } + + let file_appender = tracing_appender::rolling::never( + log_path + .parent() + .unwrap_or(std::path::Path::new(".")), + log_path + .file_name() + .unwrap_or(std::ffi::OsStr::new("warpgate.log")), + ); + let (non_blocking, guard) = tracing_appender::non_blocking(file_appender); + let file_layer = fmt::layer() + .with_writer(non_blocking) + .with_ansi(false) + .with_target(false) + .compact(); + + tracing_subscriber::registry() + .with(filter) + .with(console_layer) + .with(file_layer) + .init(); + + Some(guard) +} + +/// Initialize tracing for CLI sub-commands (console only, no file). +pub fn init_console() { + use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; + let filter = EnvFilter::try_new("info").unwrap(); + let _ = tracing_subscriber::registry() + .with(filter) + .with(fmt::layer().with_target(false).compact()) + .try_init(); +} diff --git a/src/main.rs b/src/main.rs index e24a326..5b7761a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,6 +3,7 @@ mod config; mod config_diff; mod daemon; mod deploy; +mod logging; mod rclone; mod scheduler; mod services; @@ -100,20 +101,28 @@ fn main() -> Result<()> { cmd => { let config = Config::load(&cli.config)?; match cmd { - Commands::Status => cli::status::run(&config), - Commands::CacheList => cli::cache::list(&config), - Commands::CacheClean { all } => cli::cache::clean(&config, all), - Commands::Warmup { share, path, newer_than } => { - cli::warmup::run(&config, &share, &path, newer_than.as_deref()) + Commands::Run => { + let _guard = logging::init(&config.log); + supervisor::run(&config, cli.config.clone()) } - Commands::Bwlimit { up, down } => { - cli::bwlimit::run(&config, up.as_deref(), down.as_deref()) + other => { + logging::init_console(); + match other { + Commands::Status => cli::status::run(&config), + Commands::CacheList => cli::cache::list(&config), + Commands::CacheClean { all } => cli::cache::clean(&config, all), + Commands::Warmup { share, path, newer_than } => { + cli::warmup::run(&config, &share, &path, newer_than.as_deref()) + } + Commands::Bwlimit { up, down } => { + cli::bwlimit::run(&config, up.as_deref(), down.as_deref()) + } + Commands::Log { lines, follow } => cli::log::run(&config, lines, follow), + Commands::SpeedTest => cli::speed_test::run(&config), + // already handled above + Commands::Run | Commands::ConfigInit { .. } | Commands::Deploy => unreachable!(), + } } - Commands::Log { lines, follow } => cli::log::run(&config, lines, follow), - Commands::SpeedTest => cli::speed_test::run(&config), - Commands::Run => supervisor::run(&config, cli.config.clone()), - // already handled above - Commands::ConfigInit { .. } | Commands::Deploy => unreachable!(), } } } diff --git a/src/scheduler.rs b/src/scheduler.rs index 9f07598..59c920d 100644 --- a/src/scheduler.rs +++ b/src/scheduler.rs @@ -9,6 +9,8 @@ use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; +use tracing::warn; + /// A named periodic task. pub struct ScheduledTask { pub name: &'static str, @@ -57,7 +59,7 @@ impl ScheduledTask { } if let Err(e) = work() { - eprintln!("[{}] error: {e}", self.name); + warn!("[{}] error: {e}", self.name); } }); } diff --git a/src/supervisor.rs b/src/supervisor.rs index b2f616a..e05b9ee 100644 --- a/src/supervisor.rs +++ b/src/supervisor.rs @@ -14,6 +14,7 @@ use std::thread; use std::time::{Duration, Instant, SystemTime}; use anyhow::{Context, Result}; +use tracing::{error, info, warn}; use crate::config::Config; use crate::config_diff::{self, ChangeTier}; @@ -100,7 +101,7 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> { // Install signal handler (SIGTERM + SIGINT) let shutdown_flag = Arc::clone(&shutdown); ctrlc::set_handler(move || { - eprintln!("Signal received, shutting down..."); + info!("Signal received, shutting down..."); shutdown_flag.store(true, Ordering::SeqCst); }) .context("Failed to set signal handler")?; @@ -138,11 +139,11 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> { }); // Phase 1: Preflight — create dirs, write rclone.conf - println!("Preflight checks..."); + info!("Preflight checks..."); preflight(config)?; // Phase 1.5: Probe remote paths in parallel - println!("Probing remote paths..."); + info!("Probing remote paths..."); let healthy_names = probe_all_shares(config, &shared_status, &shutdown)?; if healthy_names.is_empty() { @@ -159,10 +160,10 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> { write_protocol_configs(&healthy_config)?; // Phase 2: Start rclone mounts only for healthy shares - println!("Starting rclone mounts..."); + info!("Starting rclone mounts..."); let mut mount_children = start_and_wait_mounts(&healthy_config, &shutdown)?; for share in &healthy_config.shares { - println!(" Mount ready at {}", share.mount_point.display()); + info!(" Mount ready at {}", share.mount_point.display()); } // Update status: mounts are ready (match by name, not index) @@ -178,14 +179,14 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> { // Phase 3: Start protocol services if shutdown.load(Ordering::SeqCst) { - println!("Shutdown signal received during mount."); + info!("Shutdown signal received during mount."); for mc in &mut mount_children { let _ = mc.child.kill(); let _ = mc.child.wait(); } return Ok(()); } - println!("Starting protocol services..."); + info!("Starting protocol services..."); let mut protocols = start_protocols(&healthy_config)?; // Update status: protocols running @@ -202,7 +203,7 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> { spawn_dir_refresh(config, &shared_status, &shutdown); // Phase 4: Supervision loop with command channel - println!("Supervision active. Web UI at http://localhost:8090. Press Ctrl+C to stop."); + info!("Supervision active. Web UI at http://localhost:8090. Press Ctrl+C to stop."); let result = supervise( &shared_config, &shared_status, @@ -214,7 +215,7 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> { ); // Phase 5: Teardown (always runs) - println!("Shutting down..."); + info!("Shutting down..."); let config = shared_config.read().unwrap().clone(); shutdown_services(&config, &mut mount_children, &mut protocols); @@ -265,17 +266,17 @@ fn spawn_warmup( let warmup_shutdown = Arc::clone(shutdown); thread::spawn(move || { - println!("Auto-warmup started (background, generation {generation})..."); + info!("Auto-warmup started (background, generation {generation})..."); for (i, rule) in warmup_config.warmup.rules.iter().enumerate() { if warmup_shutdown.load(Ordering::SeqCst) { - println!("Auto-warmup interrupted by shutdown."); + info!("Auto-warmup interrupted by shutdown."); break; } // Check if our generation is still current { let status = warmup_status.read().unwrap(); if status.warmup_generation != generation { - println!("Auto-warmup superseded by newer generation."); + info!("Auto-warmup superseded by newer generation."); return; } } @@ -289,10 +290,10 @@ fn spawn_warmup( generation, &warmup_shutdown, ) { - eprintln!("Warmup warning: {e}"); + warn!("Warmup warning: {e}"); } } - println!("Auto-warmup complete."); + info!("Auto-warmup complete."); }); } @@ -340,7 +341,7 @@ fn spawn_dir_refresh( let gen_arc2 = Arc::clone(&gen_arc); let sd = Arc::clone(shutdown); - println!( + info!( " dir-refresh: scheduling '{}' every {}s", share_name, interval.as_secs() @@ -353,7 +354,7 @@ fn spawn_dir_refresh( .spawn(generation, gen_arc2, sd, move || { rc::vfs_refresh(rc_port, "/", recursive) .with_context(|| format!("dir-refresh for '{share_name}'"))?; - println!(" dir-refresh OK: {share_name}"); + info!(" dir-refresh OK: {share_name}"); let mut s = status.write().unwrap(); s.last_dir_refresh.insert(share_name.clone(), SystemTime::now()); Ok(()) @@ -445,7 +446,7 @@ fn probe_all_shares( } match handle.join() { Ok((name, Ok(()))) => { - println!(" Probe OK: {name}"); + info!(" Probe OK: {name}"); let mut status = shared_status.write().unwrap(); if let Some(ss) = status.shares.iter_mut().find(|s| s.name == name) { ss.health = ShareHealth::Healthy; @@ -454,14 +455,14 @@ fn probe_all_shares( } Ok((name, Err(e))) => { let msg = format!("{e}"); - eprintln!(" Probe FAILED: {name} — {msg}"); + error!(" Probe FAILED: {name} — {msg}"); let mut status = shared_status.write().unwrap(); if let Some(ss) = status.shares.iter_mut().find(|s| s.name == name) { ss.health = ShareHealth::Failed(msg); } } Err(_) => { - eprintln!(" Probe thread panicked"); + error!(" Probe thread panicked"); } } } @@ -549,7 +550,7 @@ fn start_and_wait_mounts(config: &Config, shutdown: &AtomicBool) -> Result ready[i] = true, Ok(false) => all_ready = false, Err(e) => { - eprintln!("Warning: mount check failed for '{}': {e}", share.name); + warn!("Warning: mount check failed for '{}': {e}", share.name); all_ready = false; } } @@ -579,7 +580,7 @@ fn spawn_smbd() -> Result { fn start_protocols(config: &Config) -> Result { let smbd = if config.protocols.enable_smb { let child = spawn_smbd()?; - println!(" SMB: started"); + info!(" SMB: started"); Some(child) } else { None @@ -593,12 +594,12 @@ fn start_protocols(config: &Config) -> Result { if !status.success() { anyhow::bail!("exportfs -ra failed: {status}"); } - println!(" NFS: exported"); + info!(" NFS: exported"); } let webdav = if config.protocols.enable_webdav { let child = spawn_webdav(config)?; - println!(" WebDAV: started"); + info!(" WebDAV: started"); Some(child) } else { None @@ -640,15 +641,15 @@ fn supervise( // Check for commands (non-blocking with timeout = POLL_INTERVAL) match cmd_rx.recv_timeout(POLL_INTERVAL) { Ok(SupervisorCmd::Shutdown) => { - println!("Shutdown command received."); + info!("Shutdown command received."); return Ok(()); } Ok(SupervisorCmd::BwLimit { up, down }) => { - println!("Applying bandwidth limit: up={up}, down={down}"); + info!("Applying bandwidth limit: up={up}, down={down}"); apply_bwlimit(mounts, &up, &down); } Ok(SupervisorCmd::Reload(new_config)) => { - println!("Config reload requested..."); + info!("Config reload requested..."); handle_reload( shared_config, shared_status, @@ -659,18 +660,18 @@ fn supervise( new_config, &shutdown, )?; - println!("Config reload complete."); + info!("Config reload complete."); } Err(RecvTimeoutError::Timeout) => {} // normal poll cycle Err(RecvTimeoutError::Disconnected) => { - println!("Command channel disconnected, shutting down."); + info!("Command channel disconnected, shutting down."); return Ok(()); } } // Check for shutdown signal if shutdown.load(Ordering::SeqCst) { - println!("Shutdown signal received."); + info!("Shutdown signal received."); return Ok(()); } @@ -695,11 +696,11 @@ fn supervise( if let Some(child) = &mut protocols.smbd { match child.try_wait() { Ok(Some(status)) => { - eprintln!("smbd exited ({status})."); + warn!("smbd exited ({status})."); if smbd_tracker.can_restart() { smbd_tracker.record_restart(); let delay = smbd_tracker.count * 2; - eprintln!( + warn!( "Restarting smbd in {delay}s ({}/{MAX_RESTARTS})...", smbd_tracker.count, ); @@ -707,19 +708,19 @@ fn supervise( match spawn_smbd() { Ok(new_child) => *child = new_child, Err(e) => { - eprintln!("Failed to restart smbd: {e}"); + error!("Failed to restart smbd: {e}"); protocols.smbd = None; } } } else { - eprintln!( + error!( "smbd exceeded max restarts ({MAX_RESTARTS}), giving up." ); protocols.smbd = None; } } Ok(None) => {} - Err(e) => eprintln!("Warning: failed to check smbd status: {e}"), + Err(e) => warn!("Warning: failed to check smbd status: {e}"), } } @@ -728,11 +729,11 @@ fn supervise( if let Some(child) = &mut protocols.webdav { match child.try_wait() { Ok(Some(status)) => { - eprintln!("WebDAV exited ({status})."); + warn!("WebDAV exited ({status})."); if webdav_tracker.can_restart() { webdav_tracker.record_restart(); let delay = webdav_tracker.count * 2; - eprintln!( + warn!( "Restarting WebDAV in {delay}s ({}/{MAX_RESTARTS})...", webdav_tracker.count, ); @@ -740,19 +741,19 @@ fn supervise( match spawn_webdav(&config) { Ok(new_child) => *child = new_child, Err(e) => { - eprintln!("Failed to restart WebDAV: {e}"); + error!("Failed to restart WebDAV: {e}"); protocols.webdav = None; } } } else { - eprintln!( + error!( "WebDAV exceeded max restarts ({MAX_RESTARTS}), giving up." ); protocols.webdav = None; } } Ok(None) => {} - Err(e) => eprintln!("Warning: failed to check WebDAV status: {e}"), + Err(e) => warn!("Warning: failed to check WebDAV status: {e}"), } } @@ -818,8 +819,8 @@ fn update_status( fn apply_bwlimit(mounts: &[MountChild], up: &str, down: &str) { for mc in mounts { match rc::bwlimit(mc.rc_port, Some(up), Some(down)) { - Ok(_) => println!(" bwlimit applied to '{}'", mc.name), - Err(e) => eprintln!(" bwlimit failed for '{}': {e}", mc.name), + Ok(_) => info!(" bwlimit applied to '{}'", mc.name), + Err(e) => warn!(" bwlimit failed for '{}': {e}", mc.name), } } } @@ -839,18 +840,18 @@ fn handle_reload( let diff = config_diff::diff(&old_config, &new_config); if diff.is_empty() { - println!(" No changes detected."); + info!(" No changes detected."); return Ok(()); } - println!(" Changes: {}", diff.summary()); + info!(" Changes: {}", diff.summary()); match diff.highest_tier() { ChangeTier::None => {} ChangeTier::Live => { // Tier A: bandwidth only — RC API call, no restart - println!(" Tier A: applying bandwidth limits via RC API..."); + info!(" Tier A: applying bandwidth limits via RC API..."); apply_bwlimit(mounts, &new_config.bandwidth.limit_up, &new_config.bandwidth.limit_down); } @@ -860,7 +861,7 @@ fn handle_reload( if diff.bandwidth_changed { apply_bwlimit(mounts, &new_config.bandwidth.limit_up, &new_config.bandwidth.limit_down); } - println!(" Tier B: restarting protocol services..."); + info!(" Tier B: restarting protocol services..."); restart_protocols(protocols, smbd_tracker, webdav_tracker, &new_config)?; } @@ -875,13 +876,13 @@ fn handle_reload( || !diff.connections_removed.is_empty() || !diff.connections_modified.is_empty() { - println!(" Regenerating rclone.conf (connections changed)..."); + info!(" Regenerating rclone.conf (connections changed)..."); crate::rclone::config::write_config(&new_config)?; } // Handle removed shares: drain → unmount → kill for name in &diff.shares_removed { - println!(" Removing share '{name}'..."); + info!(" Removing share '{name}'..."); if let Some(idx) = mounts.iter().position(|mc| mc.name == *name) { let mc = &mounts[idx]; wait_writeback_drain(mc.rc_port); @@ -893,7 +894,7 @@ fn handle_reload( // Handle modified shares: treat as remove + add for name in &diff.shares_modified { - println!(" Restarting modified share '{name}'..."); + info!(" Restarting modified share '{name}'..."); // Remove old if let Some(idx) = mounts.iter().position(|mc| mc.name == *name) { let mc = &mounts[idx]; @@ -913,7 +914,7 @@ fn handle_reload( // Handle added shares: spawn new mount for name in &diff.shares_added { - println!(" Adding share '{name}'..."); + info!(" Adding share '{name}'..."); if let Some((i, share)) = new_config.shares.iter().enumerate().find(|(_, s)| s.name == *name) { let rc_port = new_config.rc_port(i); std::fs::create_dir_all(&share.mount_point).ok(); @@ -935,7 +936,7 @@ fn handle_reload( ChangeTier::Global => { // Tier D: global restart — drain all → stop everything → restart - println!(" Tier D: full restart (global settings changed)..."); + info!(" Tier D: full restart (global settings changed)..."); // Drain all write-back queues for mc in mounts.iter() { @@ -1027,13 +1028,13 @@ fn handle_reload( // Re-trigger warmup if settings changed if diff.warmup_changed { - println!(" Warmup settings changed, re-triggering..."); + info!(" Warmup settings changed, re-triggering..."); spawn_warmup(&new_config, shared_status, shutdown); } // Re-trigger dir-refresh if settings changed if diff.dir_refresh_changed { - println!(" Dir-refresh settings changed, re-triggering..."); + info!(" Dir-refresh settings changed, re-triggering..."); spawn_dir_refresh(&new_config, shared_status, shutdown); } @@ -1061,7 +1062,7 @@ fn spawn_mount(config: &Config, share: &crate::config::ShareConfig, rc_port: u16 } } - println!(" Mount ready: {} at {}", share.name, share.mount_point.display()); + info!(" Mount ready: {} at {}", share.name, share.mount_point.display()); Ok(MountChild { name: share.name.clone(), child, @@ -1092,18 +1093,18 @@ fn unmount_share(config: &Config, share_name: &str) { fn stop_protocols(protocols: &mut ProtocolChildren, config: &Config) { if let Some(child) = &mut protocols.smbd { graceful_kill(child); - println!(" SMB: stopped"); + info!(" SMB: stopped"); } protocols.smbd = None; if config.protocols.enable_nfs { let _ = Command::new("exportfs").arg("-ua").status(); - println!(" NFS: unexported"); + info!(" NFS: unexported"); } if let Some(child) = &mut protocols.webdav { graceful_kill(child); - println!(" WebDAV: stopped"); + info!(" WebDAV: stopped"); } protocols.webdav = None; } @@ -1123,13 +1124,13 @@ fn reload_protocol_configs(protocols: &ProtocolChildren, config: &Config) -> Res let pid = child.id() as i32; // SAFETY: sending SIGHUP to a known child PID is safe. unsafe { libc::kill(pid, libc::SIGHUP) }; - println!(" SMB: config reloaded (SIGHUP)"); + info!(" SMB: config reloaded (SIGHUP)"); } } if config.protocols.enable_nfs { nfs::write_config(config)?; let _ = Command::new("exportfs").arg("-ra").status(); - println!(" NFS: re-exported"); + info!(" NFS: re-exported"); } Ok(()) } @@ -1206,17 +1207,17 @@ fn wait_writeback_drain(port: u16) { let pending = dc.uploads_in_progress + dc.uploads_queued; if pending == 0 { if !first { - println!(" Write-back queue drained."); + info!(" Write-back queue drained."); } return; } if first { - println!( + info!( " Waiting for write-back queue ({pending} files pending)..." ); first = false; } else { - eprint!("\r Write-back: {pending} files remaining... "); + info!(" Write-back: {pending} files remaining..."); } } else { return; @@ -1226,8 +1227,7 @@ fn wait_writeback_drain(port: u16) { } if Instant::now() > deadline { - eprintln!(); - eprintln!( + warn!( " Warning: write-back drain timed out after {}s, proceeding.", WRITEBACK_DRAIN_TIMEOUT.as_secs() ); @@ -1264,13 +1264,13 @@ fn shutdown_services(config: &Config, mounts: &mut Vec, protocols: & } } } - println!(" FUSE: unmounted"); + info!(" FUSE: unmounted"); // Gracefully stop all rclone mount processes for mc in mounts.iter_mut() { graceful_kill(&mut mc.child); } - println!(" rclone: stopped"); + info!(" rclone: stopped"); } #[cfg(test)] diff --git a/src/web/api.rs b/src/web/api.rs index fe07ee5..ca47026 100644 --- a/src/web/api.rs +++ b/src/web/api.rs @@ -11,7 +11,7 @@ use axum::Router; use serde::Serialize; use crate::config::Config; -use crate::daemon::{LogEntry, SupervisorCmd}; +use crate::daemon::SupervisorCmd; use crate::web::SharedState; pub fn routes() -> Router { @@ -305,27 +305,64 @@ async fn post_bwlimit( } } -/// GET /api/logs?since=0 — recent log entries. +/// GET /api/logs?lines=200&from_line=0 — recent log file entries. #[derive(serde::Deserialize)] struct LogsQuery { + #[serde(default = "default_lines")] + lines: usize, #[serde(default)] - since: u64, + from_line: usize, +} + +fn default_lines() -> usize { + 200 } #[derive(Serialize)] struct LogsResponse { - next_id: u64, - entries: Vec, + total_lines: usize, + entries: Vec, } async fn get_logs( State(state): State, Query(params): Query, ) -> Json { - let logs = state.logs.read().unwrap(); - let entries = logs.since(params.since); + let log_file = { + let config = state.config.read().unwrap(); + config.log.file.clone() + }; + + if log_file.is_empty() { + return Json(LogsResponse { + total_lines: 0, + entries: vec![], + }); + } + + let content = match std::fs::read_to_string(&log_file) { + Ok(c) => c, + Err(_) => { + return Json(LogsResponse { + total_lines: 0, + entries: vec![], + }) + } + }; + + let all_lines: Vec = content + .lines() + .map(|l| l.to_string()) + .collect(); + let total_lines = all_lines.len(); + let entries: Vec = all_lines + .into_iter() + .skip(params.from_line) + .take(params.lines) + .collect(); + Json(LogsResponse { - next_id: logs.next_id(), + total_lines, entries, }) } diff --git a/src/web/mod.rs b/src/web/mod.rs index 080ab17..46aa587 100644 --- a/src/web/mod.rs +++ b/src/web/mod.rs @@ -17,7 +17,7 @@ use axum::routing::get; use axum::Router; use crate::config::Config; -use crate::daemon::{AppState, DaemonStatus, LogBuffer, SupervisorCmd, DEFAULT_WEB_PORT}; +use crate::daemon::{AppState, DaemonStatus, SupervisorCmd, DEFAULT_WEB_PORT}; /// Axum-compatible shared state (wraps AppState in an Arc for axum). pub type SharedState = Arc; @@ -50,18 +50,12 @@ pub fn spawn_web_server( sse_tx: tokio::sync::broadcast::Sender<()>, ) -> thread::JoinHandle<()> { thread::spawn(move || { - let logs = Arc::new(RwLock::new(LogBuffer::new())); - { - let mut lb = logs.write().unwrap(); - lb.push("Web UI started"); - } let state = Arc::new(AppState { config, status, cmd_tx, config_path, sse_tx, - logs, }); let rt = tokio::runtime::Builder::new_multi_thread() @@ -76,9 +70,9 @@ pub fn spawn_web_server( let listener = tokio::net::TcpListener::bind(&addr) .await .unwrap_or_else(|e| panic!("Failed to bind web server to {addr}: {e}")); - println!(" Web UI: http://localhost:{DEFAULT_WEB_PORT}"); + tracing::info!(" Web UI: http://localhost:{DEFAULT_WEB_PORT}"); if let Err(e) = axum::serve(listener, app).await { - eprintln!("Web server error: {e}"); + tracing::error!("Web server error: {e}"); } }); }) diff --git a/src/web/pages.rs b/src/web/pages.rs index a53de13..ec182e2 100644 --- a/src/web/pages.rs +++ b/src/web/pages.rs @@ -571,10 +571,7 @@ async fn config_apply( }); } - { - let mut logs = state.logs.write().unwrap(); - logs.push(format!("Config applied: {diff_summary}")); - } + tracing::info!("Config applied: {diff_summary}"); Json(ConfigApplyResponse { ok: true, diff --git a/templates/web/tabs/logs.html b/templates/web/tabs/logs.html index cc3c07f..291e499 100644 --- a/templates/web/tabs/logs.html +++ b/templates/web/tabs/logs.html @@ -2,7 +2,7 @@ function logViewerFn() { return { entries: [], - nextId: 0, + fromLine: 0, polling: null, autoScroll: true, @@ -17,7 +17,7 @@ function logViewerFn() { async fetchLogs() { try { - const resp = await fetch('/api/logs?since=' + this.nextId); + const resp = await fetch('/api/logs?lines=200&from_line=' + this.fromLine); const data = await resp.json(); if (data.entries.length > 0) { this.entries = this.entries.concat(data.entries); @@ -25,7 +25,7 @@ function logViewerFn() { if (this.entries.length > 1000) { this.entries = this.entries.slice(-500); } - this.nextId = data.next_id; + this.fromLine = data.total_lines; if (this.autoScroll) { this.$nextTick(() => { const el = this.$refs.logBox; @@ -36,11 +36,6 @@ function logViewerFn() { } catch(e) { /* ignore fetch errors */ } }, - formatTime(ts) { - const d = new Date(ts * 1000); - return d.toLocaleTimeString('en-GB', { hour12: false }); - }, - clear() { this.entries = []; } @@ -74,10 +69,9 @@ if (window.Alpine) { -