Unify logging to tracing: file appender + unified log viewer

Replace scattered println!/eprintln! with structured tracing macros throughout
supervisor, scheduler, and web modules. Add LogConfig (file + level) to Config
and a new logging module that initialises a stderr + optional non-blocking file
appender on `warpgate run`. Remove the in-memory LogBuffer/LogEntry from
AppState; the web /api/logs endpoint now reads the log file directly with
from_line/lines pagination. `warpgate log` replaces journalctl with `tail`,
and the Logs tab Alpine.js is updated to match the new API response shape.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
grabbit 2026-02-19 11:24:06 +08:00
parent 74b0e72549
commit 64d6171ec9
13 changed files with 413 additions and 192 deletions

146
Cargo.lock generated
View File

@ -8,6 +8,15 @@ version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
[[package]]
name = "aho-corasick"
version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301"
dependencies = [
"memchr",
]
[[package]]
name = "anstream"
version = "0.6.21"
@ -316,6 +325,21 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "ctrlc"
version = "3.5.2"
@ -666,6 +690,12 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
[[package]]
name = "lazy_static"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
version = "0.2.180"
@ -690,6 +720,15 @@ version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "matchers"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9"
dependencies = [
"regex-automata",
]
[[package]]
name = "matchit"
version = "0.8.4"
@ -741,6 +780,15 @@ dependencies = [
"libc",
]
[[package]]
name = "nu-ansi-term"
version = "0.50.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "num-conv"
version = "0.1.0"
@ -825,6 +873,23 @@ dependencies = [
"proc-macro2",
]
[[package]]
name = "regex-automata"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c"
[[package]]
name = "ring"
version = "0.17.14"
@ -961,6 +1026,15 @@ dependencies = [
"serde",
]
[[package]]
name = "sharded-slab"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
dependencies = [
"lazy_static",
]
[[package]]
name = "shlex"
version = "1.3.0"
@ -1061,6 +1135,15 @@ dependencies = [
"syn",
]
[[package]]
name = "thread_local"
version = "1.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185"
dependencies = [
"cfg-if",
]
[[package]]
name = "time"
version = "0.3.44"
@ -1241,9 +1324,33 @@ checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100"
dependencies = [
"log",
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-appender"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf"
dependencies = [
"crossbeam-channel",
"thiserror",
"time",
"tracing-subscriber",
]
[[package]]
name = "tracing-attributes"
version = "0.1.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.36"
@ -1251,6 +1358,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a"
dependencies = [
"once_cell",
"valuable",
]
[[package]]
name = "tracing-log"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
dependencies = [
"log",
"once_cell",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.3.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e"
dependencies = [
"matchers",
"nu-ansi-term",
"once_cell",
"regex-automata",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log",
]
[[package]]
@ -1327,6 +1464,12 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "valuable"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65"
[[package]]
name = "version_check"
version = "0.9.5"
@ -1350,6 +1493,9 @@ dependencies = [
"tokio-stream",
"toml",
"tower-http",
"tracing",
"tracing-appender",
"tracing-subscriber",
"ureq",
]

View File

@ -18,3 +18,6 @@ tokio-stream = { version = "0.1", features = ["sync"] }
axum = "0.8"
askama = "0.15"
tower-http = { version = "0.6", features = ["cors"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tracing-appender = "0.2"

View File

@ -1,4 +1,4 @@
//! `warpgate log` — stream service logs in real time.
//! `warpgate log` — stream service logs from the configured log file.
use std::process::Command;
@ -6,27 +6,24 @@ use anyhow::{Context, Result};
use crate::config::Config;
pub fn run(_config: &Config, lines: u32, follow: bool) -> Result<()> {
let mut cmd = Command::new("journalctl");
cmd.arg("-u")
.arg("warpgate-mount")
.arg("-n")
.arg(lines.to_string());
pub fn run(config: &Config, lines: u32, follow: bool) -> Result<()> {
let log_file = &config.log.file;
if log_file.is_empty() {
anyhow::bail!(
"No log file configured. Set [log] file = \"/var/log/warpgate/warpgate.log\" in config."
);
}
if follow {
// Stream directly to stdout with -f (like tail -f)
cmd.arg("-f");
let status = cmd.status().context("Failed to run journalctl")?;
if !status.success() {
anyhow::bail!("journalctl exited with status {}", status);
}
Command::new("tail")
.args(["-f", "-n", &lines.to_string(), log_file])
.status()
.context("Failed to run tail -f")?;
} else {
let output = cmd.output().context("Failed to run journalctl")?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
anyhow::bail!("journalctl failed: {}", stderr.trim());
}
print!("{}", String::from_utf8_lossy(&output.stdout));
Command::new("tail")
.args(["-n", &lines.to_string(), log_file])
.status()
.context("Failed to run tail")?;
}
Ok(())

View File

@ -31,9 +31,39 @@ pub struct Config {
pub smb_auth: SmbAuthConfig,
#[serde(default)]
pub dir_refresh: DirRefreshConfig,
#[serde(default)]
pub log: LogConfig,
pub shares: Vec<ShareConfig>,
}
/// Logging configuration.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct LogConfig {
/// Log file path. Empty string = no file logging.
#[serde(default = "default_log_file")]
pub file: String,
/// Minimum log level: error / warn / info / debug / trace.
#[serde(default = "default_log_level")]
pub level: String,
}
impl Default for LogConfig {
fn default() -> Self {
Self {
file: default_log_file(),
level: default_log_level(),
}
}
}
fn default_log_file() -> String {
"/var/log/warpgate/warpgate.log".into()
}
fn default_log_level() -> String {
"info".into()
}
/// SFTP connection to a remote NAS.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ConnectionConfig {
@ -457,6 +487,13 @@ impl Config {
writeln!(out).unwrap();
}
// --- Log ---
writeln!(out, "# --- Log ---").unwrap();
writeln!(out, "[log]").unwrap();
writeln!(out, "file = {:?}", self.log.file).unwrap();
writeln!(out, "level = {:?}", self.log.level).unwrap();
writeln!(out).unwrap();
// --- Warmup ---
writeln!(out, "# --- Warmup (change = no restart) ---").unwrap();
writeln!(out, "[warmup]").unwrap();

View File

@ -3,12 +3,12 @@
//! The supervisor owns all mutable state. The web server gets read-only access
//! to status via `Arc<RwLock<DaemonStatus>>` and sends commands via an mpsc channel.
use std::collections::{HashMap, VecDeque};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::atomic::AtomicU64;
use std::sync::mpsc;
use std::sync::{Arc, RwLock};
use std::time::{Instant, SystemTime, UNIX_EPOCH};
use std::time::{Instant, SystemTime};
use crate::config::Config;
@ -28,67 +28,6 @@ pub struct AppState {
/// SSE broadcast: supervisor sends `()` after each status update;
/// web server subscribers render partials and push to connected clients.
pub sse_tx: tokio::sync::broadcast::Sender<()>,
/// Ring buffer of log entries for the web UI.
pub logs: Arc<RwLock<LogBuffer>>,
}
/// Ring buffer of timestamped log entries for the web log viewer.
pub struct LogBuffer {
entries: VecDeque<LogEntry>,
/// Monotonically increasing ID for the next entry.
next_id: u64,
}
/// A single log entry with unix timestamp and message.
#[derive(Clone, serde::Serialize)]
pub struct LogEntry {
pub id: u64,
pub ts: u64,
pub msg: String,
}
const LOG_BUFFER_MAX: usize = 500;
impl LogBuffer {
pub fn new() -> Self {
Self {
entries: VecDeque::new(),
next_id: 0,
}
}
/// Push a new log message. Timestamps are added automatically.
pub fn push(&mut self, msg: impl Into<String>) {
let ts = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
self.entries.push_back(LogEntry {
id: self.next_id,
ts,
msg: msg.into(),
});
self.next_id += 1;
if self.entries.len() > LOG_BUFFER_MAX {
self.entries.pop_front();
}
}
/// Get entries with ID >= `since_id`.
pub fn since(&self, since_id: u64) -> Vec<LogEntry> {
let start_id = self.next_id.saturating_sub(self.entries.len() as u64);
let skip = if since_id > start_id {
(since_id - start_id) as usize
} else {
0
};
self.entries.iter().skip(skip).cloned().collect()
}
/// The ID that the next pushed entry will have.
pub fn next_id(&self) -> u64 {
self.next_id
}
}
/// Overall daemon status, updated by the supervisor loop.

66
src/logging.rs Normal file
View File

@ -0,0 +1,66 @@
//! Unified logging initializer.
//!
//! Configures `tracing` with both a stderr console layer and an optional
//! non-blocking file appender. The returned `WorkerGuard` must be kept alive
//! for the duration of the process — dropping it flushes and closes the file.
use tracing_appender::non_blocking::WorkerGuard;
/// Initialize tracing for the `run` command (console + optional file).
///
/// Returns a `WorkerGuard` when file logging is active. The caller must hold
/// this value until the process exits so the background writer thread can flush.
pub fn init(log_config: &crate::config::LogConfig) -> Option<WorkerGuard> {
use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
let filter = EnvFilter::try_new(&log_config.level).unwrap_or_else(|_| EnvFilter::new("info"));
let console_layer = fmt::layer().with_target(false).compact();
if log_config.file.is_empty() {
tracing_subscriber::registry()
.with(filter)
.with(console_layer)
.init();
return None;
}
// Ensure log directory exists.
let log_path = std::path::Path::new(&log_config.file);
if let Some(dir) = log_path.parent() {
let _ = std::fs::create_dir_all(dir);
}
let file_appender = tracing_appender::rolling::never(
log_path
.parent()
.unwrap_or(std::path::Path::new(".")),
log_path
.file_name()
.unwrap_or(std::ffi::OsStr::new("warpgate.log")),
);
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
let file_layer = fmt::layer()
.with_writer(non_blocking)
.with_ansi(false)
.with_target(false)
.compact();
tracing_subscriber::registry()
.with(filter)
.with(console_layer)
.with(file_layer)
.init();
Some(guard)
}
/// Initialize tracing for CLI sub-commands (console only, no file).
pub fn init_console() {
use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
let filter = EnvFilter::try_new("info").unwrap();
let _ = tracing_subscriber::registry()
.with(filter)
.with(fmt::layer().with_target(false).compact())
.try_init();
}

View File

@ -3,6 +3,7 @@ mod config;
mod config_diff;
mod daemon;
mod deploy;
mod logging;
mod rclone;
mod scheduler;
mod services;
@ -100,6 +101,13 @@ fn main() -> Result<()> {
cmd => {
let config = Config::load(&cli.config)?;
match cmd {
Commands::Run => {
let _guard = logging::init(&config.log);
supervisor::run(&config, cli.config.clone())
}
other => {
logging::init_console();
match other {
Commands::Status => cli::status::run(&config),
Commands::CacheList => cli::cache::list(&config),
Commands::CacheClean { all } => cli::cache::clean(&config, all),
@ -111,9 +119,10 @@ fn main() -> Result<()> {
}
Commands::Log { lines, follow } => cli::log::run(&config, lines, follow),
Commands::SpeedTest => cli::speed_test::run(&config),
Commands::Run => supervisor::run(&config, cli.config.clone()),
// already handled above
Commands::ConfigInit { .. } | Commands::Deploy => unreachable!(),
Commands::Run | Commands::ConfigInit { .. } | Commands::Deploy => unreachable!(),
}
}
}
}
}

View File

@ -9,6 +9,8 @@ use std::sync::Arc;
use std::thread;
use std::time::{Duration, Instant};
use tracing::warn;
/// A named periodic task.
pub struct ScheduledTask {
pub name: &'static str,
@ -57,7 +59,7 @@ impl ScheduledTask {
}
if let Err(e) = work() {
eprintln!("[{}] error: {e}", self.name);
warn!("[{}] error: {e}", self.name);
}
});
}

View File

@ -14,6 +14,7 @@ use std::thread;
use std::time::{Duration, Instant, SystemTime};
use anyhow::{Context, Result};
use tracing::{error, info, warn};
use crate::config::Config;
use crate::config_diff::{self, ChangeTier};
@ -100,7 +101,7 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
// Install signal handler (SIGTERM + SIGINT)
let shutdown_flag = Arc::clone(&shutdown);
ctrlc::set_handler(move || {
eprintln!("Signal received, shutting down...");
info!("Signal received, shutting down...");
shutdown_flag.store(true, Ordering::SeqCst);
})
.context("Failed to set signal handler")?;
@ -138,11 +139,11 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
});
// Phase 1: Preflight — create dirs, write rclone.conf
println!("Preflight checks...");
info!("Preflight checks...");
preflight(config)?;
// Phase 1.5: Probe remote paths in parallel
println!("Probing remote paths...");
info!("Probing remote paths...");
let healthy_names = probe_all_shares(config, &shared_status, &shutdown)?;
if healthy_names.is_empty() {
@ -159,10 +160,10 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
write_protocol_configs(&healthy_config)?;
// Phase 2: Start rclone mounts only for healthy shares
println!("Starting rclone mounts...");
info!("Starting rclone mounts...");
let mut mount_children = start_and_wait_mounts(&healthy_config, &shutdown)?;
for share in &healthy_config.shares {
println!(" Mount ready at {}", share.mount_point.display());
info!(" Mount ready at {}", share.mount_point.display());
}
// Update status: mounts are ready (match by name, not index)
@ -178,14 +179,14 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
// Phase 3: Start protocol services
if shutdown.load(Ordering::SeqCst) {
println!("Shutdown signal received during mount.");
info!("Shutdown signal received during mount.");
for mc in &mut mount_children {
let _ = mc.child.kill();
let _ = mc.child.wait();
}
return Ok(());
}
println!("Starting protocol services...");
info!("Starting protocol services...");
let mut protocols = start_protocols(&healthy_config)?;
// Update status: protocols running
@ -202,7 +203,7 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
spawn_dir_refresh(config, &shared_status, &shutdown);
// Phase 4: Supervision loop with command channel
println!("Supervision active. Web UI at http://localhost:8090. Press Ctrl+C to stop.");
info!("Supervision active. Web UI at http://localhost:8090. Press Ctrl+C to stop.");
let result = supervise(
&shared_config,
&shared_status,
@ -214,7 +215,7 @@ pub fn run(config: &Config, config_path: PathBuf) -> Result<()> {
);
// Phase 5: Teardown (always runs)
println!("Shutting down...");
info!("Shutting down...");
let config = shared_config.read().unwrap().clone();
shutdown_services(&config, &mut mount_children, &mut protocols);
@ -265,17 +266,17 @@ fn spawn_warmup(
let warmup_shutdown = Arc::clone(shutdown);
thread::spawn(move || {
println!("Auto-warmup started (background, generation {generation})...");
info!("Auto-warmup started (background, generation {generation})...");
for (i, rule) in warmup_config.warmup.rules.iter().enumerate() {
if warmup_shutdown.load(Ordering::SeqCst) {
println!("Auto-warmup interrupted by shutdown.");
info!("Auto-warmup interrupted by shutdown.");
break;
}
// Check if our generation is still current
{
let status = warmup_status.read().unwrap();
if status.warmup_generation != generation {
println!("Auto-warmup superseded by newer generation.");
info!("Auto-warmup superseded by newer generation.");
return;
}
}
@ -289,10 +290,10 @@ fn spawn_warmup(
generation,
&warmup_shutdown,
) {
eprintln!("Warmup warning: {e}");
warn!("Warmup warning: {e}");
}
}
println!("Auto-warmup complete.");
info!("Auto-warmup complete.");
});
}
@ -340,7 +341,7 @@ fn spawn_dir_refresh(
let gen_arc2 = Arc::clone(&gen_arc);
let sd = Arc::clone(shutdown);
println!(
info!(
" dir-refresh: scheduling '{}' every {}s",
share_name,
interval.as_secs()
@ -353,7 +354,7 @@ fn spawn_dir_refresh(
.spawn(generation, gen_arc2, sd, move || {
rc::vfs_refresh(rc_port, "/", recursive)
.with_context(|| format!("dir-refresh for '{share_name}'"))?;
println!(" dir-refresh OK: {share_name}");
info!(" dir-refresh OK: {share_name}");
let mut s = status.write().unwrap();
s.last_dir_refresh.insert(share_name.clone(), SystemTime::now());
Ok(())
@ -445,7 +446,7 @@ fn probe_all_shares(
}
match handle.join() {
Ok((name, Ok(()))) => {
println!(" Probe OK: {name}");
info!(" Probe OK: {name}");
let mut status = shared_status.write().unwrap();
if let Some(ss) = status.shares.iter_mut().find(|s| s.name == name) {
ss.health = ShareHealth::Healthy;
@ -454,14 +455,14 @@ fn probe_all_shares(
}
Ok((name, Err(e))) => {
let msg = format!("{e}");
eprintln!(" Probe FAILED: {name}{msg}");
error!(" Probe FAILED: {name} — {msg}");
let mut status = shared_status.write().unwrap();
if let Some(ss) = status.shares.iter_mut().find(|s| s.name == name) {
ss.health = ShareHealth::Failed(msg);
}
}
Err(_) => {
eprintln!(" Probe thread panicked");
error!(" Probe thread panicked");
}
}
}
@ -549,7 +550,7 @@ fn start_and_wait_mounts(config: &Config, shutdown: &AtomicBool) -> Result<Vec<M
Ok(true) => ready[i] = true,
Ok(false) => all_ready = false,
Err(e) => {
eprintln!("Warning: mount check failed for '{}': {e}", share.name);
warn!("Warning: mount check failed for '{}': {e}", share.name);
all_ready = false;
}
}
@ -579,7 +580,7 @@ fn spawn_smbd() -> Result<Child> {
fn start_protocols(config: &Config) -> Result<ProtocolChildren> {
let smbd = if config.protocols.enable_smb {
let child = spawn_smbd()?;
println!(" SMB: started");
info!(" SMB: started");
Some(child)
} else {
None
@ -593,12 +594,12 @@ fn start_protocols(config: &Config) -> Result<ProtocolChildren> {
if !status.success() {
anyhow::bail!("exportfs -ra failed: {status}");
}
println!(" NFS: exported");
info!(" NFS: exported");
}
let webdav = if config.protocols.enable_webdav {
let child = spawn_webdav(config)?;
println!(" WebDAV: started");
info!(" WebDAV: started");
Some(child)
} else {
None
@ -640,15 +641,15 @@ fn supervise(
// Check for commands (non-blocking with timeout = POLL_INTERVAL)
match cmd_rx.recv_timeout(POLL_INTERVAL) {
Ok(SupervisorCmd::Shutdown) => {
println!("Shutdown command received.");
info!("Shutdown command received.");
return Ok(());
}
Ok(SupervisorCmd::BwLimit { up, down }) => {
println!("Applying bandwidth limit: up={up}, down={down}");
info!("Applying bandwidth limit: up={up}, down={down}");
apply_bwlimit(mounts, &up, &down);
}
Ok(SupervisorCmd::Reload(new_config)) => {
println!("Config reload requested...");
info!("Config reload requested...");
handle_reload(
shared_config,
shared_status,
@ -659,18 +660,18 @@ fn supervise(
new_config,
&shutdown,
)?;
println!("Config reload complete.");
info!("Config reload complete.");
}
Err(RecvTimeoutError::Timeout) => {} // normal poll cycle
Err(RecvTimeoutError::Disconnected) => {
println!("Command channel disconnected, shutting down.");
info!("Command channel disconnected, shutting down.");
return Ok(());
}
}
// Check for shutdown signal
if shutdown.load(Ordering::SeqCst) {
println!("Shutdown signal received.");
info!("Shutdown signal received.");
return Ok(());
}
@ -695,11 +696,11 @@ fn supervise(
if let Some(child) = &mut protocols.smbd {
match child.try_wait() {
Ok(Some(status)) => {
eprintln!("smbd exited ({status}).");
warn!("smbd exited ({status}).");
if smbd_tracker.can_restart() {
smbd_tracker.record_restart();
let delay = smbd_tracker.count * 2;
eprintln!(
warn!(
"Restarting smbd in {delay}s ({}/{MAX_RESTARTS})...",
smbd_tracker.count,
);
@ -707,19 +708,19 @@ fn supervise(
match spawn_smbd() {
Ok(new_child) => *child = new_child,
Err(e) => {
eprintln!("Failed to restart smbd: {e}");
error!("Failed to restart smbd: {e}");
protocols.smbd = None;
}
}
} else {
eprintln!(
error!(
"smbd exceeded max restarts ({MAX_RESTARTS}), giving up."
);
protocols.smbd = None;
}
}
Ok(None) => {}
Err(e) => eprintln!("Warning: failed to check smbd status: {e}"),
Err(e) => warn!("Warning: failed to check smbd status: {e}"),
}
}
@ -728,11 +729,11 @@ fn supervise(
if let Some(child) = &mut protocols.webdav {
match child.try_wait() {
Ok(Some(status)) => {
eprintln!("WebDAV exited ({status}).");
warn!("WebDAV exited ({status}).");
if webdav_tracker.can_restart() {
webdav_tracker.record_restart();
let delay = webdav_tracker.count * 2;
eprintln!(
warn!(
"Restarting WebDAV in {delay}s ({}/{MAX_RESTARTS})...",
webdav_tracker.count,
);
@ -740,19 +741,19 @@ fn supervise(
match spawn_webdav(&config) {
Ok(new_child) => *child = new_child,
Err(e) => {
eprintln!("Failed to restart WebDAV: {e}");
error!("Failed to restart WebDAV: {e}");
protocols.webdav = None;
}
}
} else {
eprintln!(
error!(
"WebDAV exceeded max restarts ({MAX_RESTARTS}), giving up."
);
protocols.webdav = None;
}
}
Ok(None) => {}
Err(e) => eprintln!("Warning: failed to check WebDAV status: {e}"),
Err(e) => warn!("Warning: failed to check WebDAV status: {e}"),
}
}
@ -818,8 +819,8 @@ fn update_status(
fn apply_bwlimit(mounts: &[MountChild], up: &str, down: &str) {
for mc in mounts {
match rc::bwlimit(mc.rc_port, Some(up), Some(down)) {
Ok(_) => println!(" bwlimit applied to '{}'", mc.name),
Err(e) => eprintln!(" bwlimit failed for '{}': {e}", mc.name),
Ok(_) => info!(" bwlimit applied to '{}'", mc.name),
Err(e) => warn!(" bwlimit failed for '{}': {e}", mc.name),
}
}
}
@ -839,18 +840,18 @@ fn handle_reload(
let diff = config_diff::diff(&old_config, &new_config);
if diff.is_empty() {
println!(" No changes detected.");
info!(" No changes detected.");
return Ok(());
}
println!(" Changes: {}", diff.summary());
info!(" Changes: {}", diff.summary());
match diff.highest_tier() {
ChangeTier::None => {}
ChangeTier::Live => {
// Tier A: bandwidth only — RC API call, no restart
println!(" Tier A: applying bandwidth limits via RC API...");
info!(" Tier A: applying bandwidth limits via RC API...");
apply_bwlimit(mounts, &new_config.bandwidth.limit_up, &new_config.bandwidth.limit_down);
}
@ -860,7 +861,7 @@ fn handle_reload(
if diff.bandwidth_changed {
apply_bwlimit(mounts, &new_config.bandwidth.limit_up, &new_config.bandwidth.limit_down);
}
println!(" Tier B: restarting protocol services...");
info!(" Tier B: restarting protocol services...");
restart_protocols(protocols, smbd_tracker, webdav_tracker, &new_config)?;
}
@ -875,13 +876,13 @@ fn handle_reload(
|| !diff.connections_removed.is_empty()
|| !diff.connections_modified.is_empty()
{
println!(" Regenerating rclone.conf (connections changed)...");
info!(" Regenerating rclone.conf (connections changed)...");
crate::rclone::config::write_config(&new_config)?;
}
// Handle removed shares: drain → unmount → kill
for name in &diff.shares_removed {
println!(" Removing share '{name}'...");
info!(" Removing share '{name}'...");
if let Some(idx) = mounts.iter().position(|mc| mc.name == *name) {
let mc = &mounts[idx];
wait_writeback_drain(mc.rc_port);
@ -893,7 +894,7 @@ fn handle_reload(
// Handle modified shares: treat as remove + add
for name in &diff.shares_modified {
println!(" Restarting modified share '{name}'...");
info!(" Restarting modified share '{name}'...");
// Remove old
if let Some(idx) = mounts.iter().position(|mc| mc.name == *name) {
let mc = &mounts[idx];
@ -913,7 +914,7 @@ fn handle_reload(
// Handle added shares: spawn new mount
for name in &diff.shares_added {
println!(" Adding share '{name}'...");
info!(" Adding share '{name}'...");
if let Some((i, share)) = new_config.shares.iter().enumerate().find(|(_, s)| s.name == *name) {
let rc_port = new_config.rc_port(i);
std::fs::create_dir_all(&share.mount_point).ok();
@ -935,7 +936,7 @@ fn handle_reload(
ChangeTier::Global => {
// Tier D: global restart — drain all → stop everything → restart
println!(" Tier D: full restart (global settings changed)...");
info!(" Tier D: full restart (global settings changed)...");
// Drain all write-back queues
for mc in mounts.iter() {
@ -1027,13 +1028,13 @@ fn handle_reload(
// Re-trigger warmup if settings changed
if diff.warmup_changed {
println!(" Warmup settings changed, re-triggering...");
info!(" Warmup settings changed, re-triggering...");
spawn_warmup(&new_config, shared_status, shutdown);
}
// Re-trigger dir-refresh if settings changed
if diff.dir_refresh_changed {
println!(" Dir-refresh settings changed, re-triggering...");
info!(" Dir-refresh settings changed, re-triggering...");
spawn_dir_refresh(&new_config, shared_status, shutdown);
}
@ -1061,7 +1062,7 @@ fn spawn_mount(config: &Config, share: &crate::config::ShareConfig, rc_port: u16
}
}
println!(" Mount ready: {} at {}", share.name, share.mount_point.display());
info!(" Mount ready: {} at {}", share.name, share.mount_point.display());
Ok(MountChild {
name: share.name.clone(),
child,
@ -1092,18 +1093,18 @@ fn unmount_share(config: &Config, share_name: &str) {
fn stop_protocols(protocols: &mut ProtocolChildren, config: &Config) {
if let Some(child) = &mut protocols.smbd {
graceful_kill(child);
println!(" SMB: stopped");
info!(" SMB: stopped");
}
protocols.smbd = None;
if config.protocols.enable_nfs {
let _ = Command::new("exportfs").arg("-ua").status();
println!(" NFS: unexported");
info!(" NFS: unexported");
}
if let Some(child) = &mut protocols.webdav {
graceful_kill(child);
println!(" WebDAV: stopped");
info!(" WebDAV: stopped");
}
protocols.webdav = None;
}
@ -1123,13 +1124,13 @@ fn reload_protocol_configs(protocols: &ProtocolChildren, config: &Config) -> Res
let pid = child.id() as i32;
// SAFETY: sending SIGHUP to a known child PID is safe.
unsafe { libc::kill(pid, libc::SIGHUP) };
println!(" SMB: config reloaded (SIGHUP)");
info!(" SMB: config reloaded (SIGHUP)");
}
}
if config.protocols.enable_nfs {
nfs::write_config(config)?;
let _ = Command::new("exportfs").arg("-ra").status();
println!(" NFS: re-exported");
info!(" NFS: re-exported");
}
Ok(())
}
@ -1206,17 +1207,17 @@ fn wait_writeback_drain(port: u16) {
let pending = dc.uploads_in_progress + dc.uploads_queued;
if pending == 0 {
if !first {
println!(" Write-back queue drained.");
info!(" Write-back queue drained.");
}
return;
}
if first {
println!(
info!(
" Waiting for write-back queue ({pending} files pending)..."
);
first = false;
} else {
eprint!("\r Write-back: {pending} files remaining... ");
info!(" Write-back: {pending} files remaining...");
}
} else {
return;
@ -1226,8 +1227,7 @@ fn wait_writeback_drain(port: u16) {
}
if Instant::now() > deadline {
eprintln!();
eprintln!(
warn!(
" Warning: write-back drain timed out after {}s, proceeding.",
WRITEBACK_DRAIN_TIMEOUT.as_secs()
);
@ -1264,13 +1264,13 @@ fn shutdown_services(config: &Config, mounts: &mut Vec<MountChild>, protocols: &
}
}
}
println!(" FUSE: unmounted");
info!(" FUSE: unmounted");
// Gracefully stop all rclone mount processes
for mc in mounts.iter_mut() {
graceful_kill(&mut mc.child);
}
println!(" rclone: stopped");
info!(" rclone: stopped");
}
#[cfg(test)]

View File

@ -11,7 +11,7 @@ use axum::Router;
use serde::Serialize;
use crate::config::Config;
use crate::daemon::{LogEntry, SupervisorCmd};
use crate::daemon::SupervisorCmd;
use crate::web::SharedState;
pub fn routes() -> Router<SharedState> {
@ -305,27 +305,64 @@ async fn post_bwlimit(
}
}
/// GET /api/logs?since=0 — recent log entries.
/// GET /api/logs?lines=200&from_line=0 — recent log file entries.
#[derive(serde::Deserialize)]
struct LogsQuery {
#[serde(default = "default_lines")]
lines: usize,
#[serde(default)]
since: u64,
from_line: usize,
}
fn default_lines() -> usize {
200
}
#[derive(Serialize)]
struct LogsResponse {
next_id: u64,
entries: Vec<LogEntry>,
total_lines: usize,
entries: Vec<String>,
}
async fn get_logs(
State(state): State<SharedState>,
Query(params): Query<LogsQuery>,
) -> Json<LogsResponse> {
let logs = state.logs.read().unwrap();
let entries = logs.since(params.since);
let log_file = {
let config = state.config.read().unwrap();
config.log.file.clone()
};
if log_file.is_empty() {
return Json(LogsResponse {
total_lines: 0,
entries: vec![],
});
}
let content = match std::fs::read_to_string(&log_file) {
Ok(c) => c,
Err(_) => {
return Json(LogsResponse {
total_lines: 0,
entries: vec![],
})
}
};
let all_lines: Vec<String> = content
.lines()
.map(|l| l.to_string())
.collect();
let total_lines = all_lines.len();
let entries: Vec<String> = all_lines
.into_iter()
.skip(params.from_line)
.take(params.lines)
.collect();
Json(LogsResponse {
next_id: logs.next_id(),
total_lines,
entries,
})
}

View File

@ -17,7 +17,7 @@ use axum::routing::get;
use axum::Router;
use crate::config::Config;
use crate::daemon::{AppState, DaemonStatus, LogBuffer, SupervisorCmd, DEFAULT_WEB_PORT};
use crate::daemon::{AppState, DaemonStatus, SupervisorCmd, DEFAULT_WEB_PORT};
/// Axum-compatible shared state (wraps AppState in an Arc for axum).
pub type SharedState = Arc<AppState>;
@ -50,18 +50,12 @@ pub fn spawn_web_server(
sse_tx: tokio::sync::broadcast::Sender<()>,
) -> thread::JoinHandle<()> {
thread::spawn(move || {
let logs = Arc::new(RwLock::new(LogBuffer::new()));
{
let mut lb = logs.write().unwrap();
lb.push("Web UI started");
}
let state = Arc::new(AppState {
config,
status,
cmd_tx,
config_path,
sse_tx,
logs,
});
let rt = tokio::runtime::Builder::new_multi_thread()
@ -76,9 +70,9 @@ pub fn spawn_web_server(
let listener = tokio::net::TcpListener::bind(&addr)
.await
.unwrap_or_else(|e| panic!("Failed to bind web server to {addr}: {e}"));
println!(" Web UI: http://localhost:{DEFAULT_WEB_PORT}");
tracing::info!(" Web UI: http://localhost:{DEFAULT_WEB_PORT}");
if let Err(e) = axum::serve(listener, app).await {
eprintln!("Web server error: {e}");
tracing::error!("Web server error: {e}");
}
});
})

View File

@ -571,10 +571,7 @@ async fn config_apply(
});
}
{
let mut logs = state.logs.write().unwrap();
logs.push(format!("Config applied: {diff_summary}"));
}
tracing::info!("Config applied: {diff_summary}");
Json(ConfigApplyResponse {
ok: true,

View File

@ -2,7 +2,7 @@
function logViewerFn() {
return {
entries: [],
nextId: 0,
fromLine: 0,
polling: null,
autoScroll: true,
@ -17,7 +17,7 @@ function logViewerFn() {
async fetchLogs() {
try {
const resp = await fetch('/api/logs?since=' + this.nextId);
const resp = await fetch('/api/logs?lines=200&from_line=' + this.fromLine);
const data = await resp.json();
if (data.entries.length > 0) {
this.entries = this.entries.concat(data.entries);
@ -25,7 +25,7 @@ function logViewerFn() {
if (this.entries.length > 1000) {
this.entries = this.entries.slice(-500);
}
this.nextId = data.next_id;
this.fromLine = data.total_lines;
if (this.autoScroll) {
this.$nextTick(() => {
const el = this.$refs.logBox;
@ -36,11 +36,6 @@ function logViewerFn() {
} catch(e) { /* ignore fetch errors */ }
},
formatTime(ts) {
const d = new Date(ts * 1000);
return d.toLocaleTimeString('en-GB', { hour12: false });
},
clear() {
this.entries = [];
}
@ -74,10 +69,9 @@ if (window.Alpine) {
<template x-if="entries.length === 0">
<div style="color:var(--text-muted);padding:24px;text-align:center">No log entries yet. Events will appear here as they occur.</div>
</template>
<template x-for="entry in entries" :key="entry.id">
<template x-for="(line, idx) in entries" :key="idx">
<div class="log-line">
<span class="log-ts" x-text="formatTime(entry.ts)"></span>
<span class="log-msg" x-text="entry.msg"></span>
<span class="log-msg" x-text="line"></span>
</div>
</template>
</div>