diff --git a/Cargo.lock b/Cargo.lock index 8acffa8..6e5d5f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "anstream" version = "0.6.21" @@ -131,6 +140,12 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + [[package]] name = "axum" version = "0.8.8" @@ -213,6 +228,12 @@ dependencies = [ "objc2", ] +[[package]] +name = "bumpalo" +version = "3.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c6f81257d10a0f602a294ae4182251151ff97dbb504ef9afcdda4a64b24d9b4" + [[package]] name = "bytes" version = "1.11.1" @@ -241,6 +262,19 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chrono" +version = "0.4.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-link", +] + [[package]] name = "clap" version = "4.5.59" @@ -316,6 +350,12 @@ dependencies = [ "url", ] +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + [[package]] name = "crc32fast" version = "1.5.0" @@ -325,6 +365,17 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "cron" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f8c3e73077b4b4a6ab1ea5047c37c57aee77657bc8ecd6f29b0af082d0b0c07" +dependencies = [ + "chrono", + "nom", + "once_cell", +] + [[package]] name = "crossbeam-channel" version = "0.5.15" @@ -566,6 +617,30 @@ dependencies = [ "tower-service", ] +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + [[package]] name = "icu_collections" version = "2.1.1" @@ -690,6 +765,16 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" +[[package]] +name = "js-sys" +version = "0.3.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -747,6 +832,12 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -780,6 +871,16 @@ dependencies = [ "libc", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -795,6 +896,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + [[package]] name = "objc2" version = "0.6.3" @@ -945,6 +1055,12 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + [[package]] name = "ryu" version = "1.0.23" @@ -1483,7 +1599,9 @@ dependencies = [ "anyhow", "askama", "axum", + "chrono", "clap", + "cron", "ctrlc", "libc", "serde", @@ -1505,6 +1623,51 @@ version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" +[[package]] +name = "wasm-bindgen" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +dependencies = [ + "unicode-ident", +] + [[package]] name = "webpki-roots" version = "1.0.6" @@ -1514,12 +1677,65 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.52.0" diff --git a/Cargo.toml b/Cargo.toml index 74b628f..2db7584 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,3 +21,5 @@ tower-http = { version = "0.6", features = ["cors"] } tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-appender = "0.2" +cron = "0.12" +chrono = { version = "0.4", features = ["clock"] } diff --git a/src/cli/mod.rs b/src/cli/mod.rs index 22e0e58..65a1b19 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -7,4 +7,6 @@ pub mod reconnect; pub mod setup; pub mod speed_test; pub mod status; +pub mod update; pub mod warmup; +pub mod wifi; // TODO: WiFi AP setup diff --git a/src/cli/preset.rs b/src/cli/preset.rs index 5664275..4f163fa 100644 --- a/src/cli/preset.rs +++ b/src/cli/preset.rs @@ -17,23 +17,33 @@ pub enum Preset { Office, } -impl Preset { - pub fn from_str(s: &str) -> Option { +impl std::str::FromStr for Preset { + type Err = anyhow::Error; + + fn from_str(s: &str) -> std::result::Result { match s { - "photographer" => Some(Self::Photographer), - "video" => Some(Self::Video), - "office" => Some(Self::Office), - _ => None, + "photographer" => Ok(Self::Photographer), + "video" => Ok(Self::Video), + "office" => Ok(Self::Office), + _ => Err(anyhow::anyhow!( + "Unknown preset '{}'. Use: photographer, video, office", + s + )), } } +} +impl Preset { pub fn apply(&self, config: &mut Config) { match self { Self::Photographer => { config.cache.max_size = "500G".into(); config.read.chunk_size = "256M".into(); + config.read.chunk_limit = "1G".into(); config.read.read_ahead = "512M".into(); config.read.buffer_size = "256M".into(); + config.read.multi_thread_streams = 4; + config.read.multi_thread_cutoff = "50M".into(); config.directory_cache.cache_time = "2h".into(); config.writeback.write_back = "5s".into(); config.writeback.transfers = 4; @@ -44,8 +54,11 @@ impl Preset { Self::Video => { config.cache.max_size = "1T".into(); config.read.chunk_size = "512M".into(); + config.read.chunk_limit = "2G".into(); config.read.read_ahead = "1G".into(); config.read.buffer_size = "512M".into(); + config.read.multi_thread_streams = 2; + config.read.multi_thread_cutoff = "100M".into(); config.directory_cache.cache_time = "1h".into(); config.writeback.write_back = "5s".into(); config.writeback.transfers = 2; @@ -56,8 +69,11 @@ impl Preset { Self::Office => { config.cache.max_size = "50G".into(); config.read.chunk_size = "64M".into(); + config.read.chunk_limit = "256M".into(); config.read.read_ahead = "128M".into(); - config.read.buffer_size = "64M".into(); + config.read.buffer_size = "128M".into(); + config.read.multi_thread_streams = 4; + config.read.multi_thread_cutoff = "10M".into(); config.directory_cache.cache_time = "30m".into(); config.writeback.write_back = "5s".into(); config.writeback.transfers = 4; @@ -77,13 +93,168 @@ impl Preset { } } -pub fn run(config: &mut Config, config_path: &Path, preset_name: &str) -> Result<()> { - let preset = Preset::from_str(preset_name).ok_or_else(|| { - anyhow::anyhow!( - "Unknown preset '{}'. Use: photographer, video, office", - preset_name +#[cfg(test)] +mod tests { + use super::*; + + fn test_config() -> Config { + toml::from_str( + r#" +[[connections]] +name = "nas" +nas_host = "10.0.0.1" +nas_user = "admin" + +[cache] +dir = "/tmp/cache" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] + +[[shares]] +name = "photos" +connection = "nas" +remote_path = "/photos" +mount_point = "/mnt/photos" +"#, ) - })?; + .unwrap() + } + + // --- FromStr --- + + #[test] + fn test_preset_parse_valid() { + assert!(matches!("photographer".parse::(), Ok(Preset::Photographer))); + assert!(matches!("video".parse::(), Ok(Preset::Video))); + assert!(matches!("office".parse::(), Ok(Preset::Office))); + } + + #[test] + fn test_preset_parse_invalid() { + assert!("unknown".parse::().is_err()); + assert!("".parse::().is_err()); + assert!("Photographer".parse::().is_err()); // case-sensitive + assert!("OFFICE".parse::().is_err()); + } + + #[test] + fn test_preset_parse_error_message() { + let err = "bad".parse::().unwrap_err(); + assert!(err.to_string().contains("bad"), "error should mention the bad value"); + } + + // --- Preset::apply — field values --- + + #[test] + fn test_photographer_apply_all_fields() { + let mut cfg = test_config(); + Preset::Photographer.apply(&mut cfg); + + assert_eq!(cfg.cache.max_size, "500G"); + assert_eq!(cfg.read.chunk_size, "256M"); + assert_eq!(cfg.read.chunk_limit, "1G"); + assert_eq!(cfg.read.read_ahead, "512M"); + assert_eq!(cfg.read.buffer_size, "256M"); + assert_eq!(cfg.read.multi_thread_streams, 4); + assert_eq!(cfg.read.multi_thread_cutoff, "50M"); + assert_eq!(cfg.directory_cache.cache_time, "2h"); + assert_eq!(cfg.writeback.write_back, "5s"); + assert_eq!(cfg.writeback.transfers, 4); + assert!(cfg.protocols.enable_smb); + assert!(!cfg.protocols.enable_nfs); + assert!(!cfg.protocols.enable_webdav); + } + + #[test] + fn test_video_apply_all_fields() { + let mut cfg = test_config(); + Preset::Video.apply(&mut cfg); + + assert_eq!(cfg.cache.max_size, "1T"); + assert_eq!(cfg.read.chunk_size, "512M"); + assert_eq!(cfg.read.chunk_limit, "2G"); + assert_eq!(cfg.read.read_ahead, "1G"); + assert_eq!(cfg.read.buffer_size, "512M"); + assert_eq!(cfg.read.multi_thread_streams, 2); + assert_eq!(cfg.read.multi_thread_cutoff, "100M"); + assert_eq!(cfg.directory_cache.cache_time, "1h"); + assert_eq!(cfg.writeback.write_back, "5s"); + assert_eq!(cfg.writeback.transfers, 2); + assert!(cfg.protocols.enable_smb); + assert!(!cfg.protocols.enable_nfs); + assert!(!cfg.protocols.enable_webdav); + } + + #[test] + fn test_office_apply_all_fields() { + let mut cfg = test_config(); + Preset::Office.apply(&mut cfg); + + assert_eq!(cfg.cache.max_size, "50G"); + assert_eq!(cfg.read.chunk_size, "64M"); + assert_eq!(cfg.read.chunk_limit, "256M"); + assert_eq!(cfg.read.read_ahead, "128M"); + assert_eq!(cfg.read.buffer_size, "128M"); + assert_eq!(cfg.read.multi_thread_streams, 4); + assert_eq!(cfg.read.multi_thread_cutoff, "10M"); + assert_eq!(cfg.directory_cache.cache_time, "30m"); + assert_eq!(cfg.writeback.write_back, "5s"); + assert_eq!(cfg.writeback.transfers, 4); + assert!(cfg.protocols.enable_smb); + assert!(!cfg.protocols.enable_nfs); + assert!(cfg.protocols.enable_webdav); + } + + #[test] + fn test_preset_does_not_change_connections_or_shares() { + let mut cfg = test_config(); + Preset::Photographer.apply(&mut cfg); + // Preset must never touch connection or share settings + assert_eq!(cfg.connections[0].nas_host, "10.0.0.1"); + assert_eq!(cfg.connections[0].nas_user, "admin"); + assert_eq!(cfg.shares[0].name, "photos"); + assert_eq!(cfg.shares[0].remote_path, "/photos"); + } + + #[test] + fn test_preset_apply_is_idempotent() { + let mut cfg = test_config(); + Preset::Video.apply(&mut cfg); + let snapshot_chunk = cfg.read.chunk_size.clone(); + Preset::Video.apply(&mut cfg); + assert_eq!(cfg.read.chunk_size, snapshot_chunk); + } + + #[test] + fn test_presets_have_consistent_write_back() { + // All three presets should use the same write_back value (plan §1 unified) + let mut cfg = test_config(); + Preset::Photographer.apply(&mut cfg); + let wb_p = cfg.writeback.write_back.clone(); + Preset::Video.apply(&mut cfg); + let wb_v = cfg.writeback.write_back.clone(); + Preset::Office.apply(&mut cfg); + let wb_o = cfg.writeback.write_back.clone(); + assert_eq!(wb_p, wb_v, "Photographer and Video write_back must match"); + assert_eq!(wb_v, wb_o, "Video and Office write_back must match"); + } + + // --- description --- + + #[test] + fn test_description_mentions_cache_size() { + assert!(Preset::Photographer.description().contains("500G")); + assert!(Preset::Video.description().contains("1T")); + assert!(Preset::Office.description().contains("50G")); + } +} + +pub fn run(config: &mut Config, config_path: &Path, preset_name: &str) -> Result<()> { + let preset: Preset = preset_name.parse()?; preset.apply(config); diff --git a/src/cli/setup.rs b/src/cli/setup.rs index 902936a..d00b875 100644 --- a/src/cli/setup.rs +++ b/src/cli/setup.rs @@ -204,7 +204,10 @@ pub fn run(output: Option) -> Result<()> { match addr_str.parse::() { Ok(addr) => match TcpStream::connect_timeout(&addr, Duration::from_secs(5)) { Ok(_) => println!(" Connection OK"), - Err(e) => println!(" Warning: Could not connect to {}: {}", addr_str, e), + Err(e) => anyhow::bail!( + "Cannot connect to {}:{} — check NAS host/port and ensure Tailscale is active.\nDetails: {}", + nas_host, sftp_port, e + ), }, Err(_) => { // Might be a hostname — try resolving @@ -214,15 +217,22 @@ pub fn run(output: Option) -> Result<()> { if let Some(addr) = addrs.next() { match TcpStream::connect_timeout(&addr, Duration::from_secs(5)) { Ok(_) => println!(" Connection OK"), - Err(e) => { - println!(" Warning: Could not connect to {}: {}", addr_str, e) - } + Err(e) => anyhow::bail!( + "Cannot connect to {}:{} — check NAS host/port and ensure Tailscale is active.\nDetails: {}", + nas_host, sftp_port, e + ), } } else { - println!(" Warning: Could not resolve {}", addr_str); + anyhow::bail!( + "Cannot resolve hostname '{}' — check NAS host and ensure DNS is working.", + nas_host + ); } } - Err(e) => println!(" Warning: Could not resolve {}: {}", addr_str, e), + Err(e) => anyhow::bail!( + "Cannot resolve hostname '{}' — check NAS host and ensure DNS is working.\nDetails: {}", + nas_host, e + ), } } } diff --git a/src/cli/update.rs b/src/cli/update.rs new file mode 100644 index 0000000..336aaaa --- /dev/null +++ b/src/cli/update.rs @@ -0,0 +1,64 @@ +//! `warpgate update` — check for newer versions of Warpgate. +//! +//! Queries the GitHub Releases API to compare the running version with the +//! latest published release and optionally prints installation instructions. + +use anyhow::Result; + +/// GitHub repository path (owner/repo). +const GITHUB_REPO: &str = "warpgate-project/warpgate"; + +/// Current version from Cargo.toml. +const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION"); + +pub fn run(apply: bool) -> Result<()> { + let api_url = format!( + "https://api.github.com/repos/{GITHUB_REPO}/releases/latest" + ); + + println!("Checking for updates..."); + println!(" Current version: v{CURRENT_VERSION}"); + + let resp = ureq::get(&api_url) + .header("User-Agent", "warpgate-updater") + .call() + .map_err(|e| anyhow::anyhow!("Failed to reach GitHub API: {e}"))?; + + let body: serde_json::Value = resp + .into_body() + .read_json() + .map_err(|e| anyhow::anyhow!("Failed to parse GitHub API response: {e}"))?; + + let tag = body["tag_name"] + .as_str() + .unwrap_or("") + .trim_start_matches('v'); + + if tag.is_empty() { + anyhow::bail!("Could not determine latest version from GitHub API response"); + } + + if tag == CURRENT_VERSION { + println!(" Latest version: v{tag}"); + println!("Already up to date (v{CURRENT_VERSION})."); + return Ok(()); + } + + println!(" Latest version: v{tag} ← new release available"); + println!(); + println!("Changelog: https://github.com/{GITHUB_REPO}/releases/tag/v{tag}"); + + if apply { + println!(); + println!("To install the latest version, run:"); + println!( + " curl -fsSL https://github.com/{GITHUB_REPO}/releases/download/v{tag}/warpgate-linux-x86_64 \\\n | sudo install -m 0755 /dev/stdin /usr/local/bin/warpgate" + ); + println!(" sudo systemctl restart warpgate"); + } else { + println!(); + println!("Run `warpgate update --apply` to print the installation command."); + } + + Ok(()) +} diff --git a/src/cli/wifi.rs b/src/cli/wifi.rs new file mode 100644 index 0000000..4af4c76 --- /dev/null +++ b/src/cli/wifi.rs @@ -0,0 +1,6 @@ +//! `warpgate setup-wifi` — WiFi AP + captive portal setup. +//! +//! TODO: WiFi AP setup (hostapd + dnsmasq + iptables). +//! Planned implementation: generate hostapd.conf, dnsmasq.conf, and iptables +//! rules to create a local WiFi AP that proxies client traffic through +//! the Warpgate cache layer. diff --git a/src/config.rs b/src/config.rs index 2355b43..38e5614 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1900,4 +1900,127 @@ mount_point = "/mnt/photos" assert!(!is_valid_remote_name("nas:1")); assert!(!is_valid_remote_name("nas/1")); } + + // ----------------------------------------------------------------------- + // WebConfig + // ----------------------------------------------------------------------- + + #[test] + fn test_web_config_default_password_empty() { + let config: Config = toml::from_str(minimal_toml()).unwrap(); + assert_eq!(config.web.password, "", "default web password should be empty"); + } + + #[test] + fn test_web_config_password_set() { + let toml_str = format!("{}\n[web]\npassword = \"s3cr3t\"", minimal_toml()); + let config: Config = toml::from_str(&toml_str).unwrap(); + assert_eq!(config.web.password, "s3cr3t"); + } + + #[test] + fn test_web_config_serialization_roundtrip() { + let toml_str = format!("{}\n[web]\npassword = \"mypass\"", minimal_toml()); + let config: Config = toml::from_str(&toml_str).unwrap(); + let serialized = config.to_commented_toml(); + let config2: Config = toml::from_str(&serialized).unwrap(); + assert_eq!(config.web.password, config2.web.password); + } + + // ----------------------------------------------------------------------- + // NotificationsConfig + // ----------------------------------------------------------------------- + + #[test] + fn test_notifications_config_defaults() { + let config: Config = toml::from_str(minimal_toml()).unwrap(); + assert_eq!(config.notifications.webhook_url, ""); + assert_eq!(config.notifications.cache_threshold_pct, 80); + assert_eq!(config.notifications.nas_offline_minutes, 5); + assert_eq!(config.notifications.writeback_depth, 50); + } + + #[test] + fn test_notifications_config_all_fields() { + let toml_str = format!( + "{}\n[notifications]\nwebhook_url = \"https://hook.example.com\"\ncache_threshold_pct = 90\nnas_offline_minutes = 10\nwriteback_depth = 100", + minimal_toml() + ); + let config: Config = toml::from_str(&toml_str).unwrap(); + assert_eq!(config.notifications.webhook_url, "https://hook.example.com"); + assert_eq!(config.notifications.cache_threshold_pct, 90); + assert_eq!(config.notifications.nas_offline_minutes, 10); + assert_eq!(config.notifications.writeback_depth, 100); + } + + #[test] + fn test_notifications_config_partial_override_keeps_defaults() { + // Partial [notifications] section: only webhook_url set + let toml_str = format!( + "{}\n[notifications]\nwebhook_url = \"https://example.com\"", + minimal_toml() + ); + let config: Config = toml::from_str(&toml_str).unwrap(); + assert_eq!(config.notifications.webhook_url, "https://example.com"); + assert_eq!(config.notifications.cache_threshold_pct, 80); // still default + assert_eq!(config.notifications.nas_offline_minutes, 5); + assert_eq!(config.notifications.writeback_depth, 50); + } + + #[test] + fn test_notifications_config_serialization_roundtrip() { + let toml_str = format!( + "{}\n[notifications]\nwebhook_url = \"https://rt.test\"\ncache_threshold_pct = 70\nnas_offline_minutes = 3\nwriteback_depth = 25", + minimal_toml() + ); + let config: Config = toml::from_str(&toml_str).unwrap(); + let serialized = config.to_commented_toml(); + let config2: Config = toml::from_str(&serialized).unwrap(); + assert_eq!(config.notifications.webhook_url, config2.notifications.webhook_url); + assert_eq!(config.notifications.cache_threshold_pct, config2.notifications.cache_threshold_pct); + assert_eq!(config.notifications.nas_offline_minutes, config2.notifications.nas_offline_minutes); + assert_eq!(config.notifications.writeback_depth, config2.notifications.writeback_depth); + } + + // ----------------------------------------------------------------------- + // LogConfig + // ----------------------------------------------------------------------- + + #[test] + fn test_log_config_defaults() { + let config: Config = toml::from_str(minimal_toml()).unwrap(); + assert_eq!(config.log.file, "/var/log/warpgate/warpgate.log"); + assert_eq!(config.log.level, "info"); + } + + #[test] + fn test_log_config_custom_values() { + let toml_str = format!( + "{}\n[log]\nfile = \"/tmp/warpgate-test.log\"\nlevel = \"debug\"", + minimal_toml() + ); + let config: Config = toml::from_str(&toml_str).unwrap(); + assert_eq!(config.log.file, "/tmp/warpgate-test.log"); + assert_eq!(config.log.level, "debug"); + } + + #[test] + fn test_log_config_empty_file_disables_file_logging() { + let toml_str = format!("{}\n[log]\nfile = \"\"", minimal_toml()); + let config: Config = toml::from_str(&toml_str).unwrap(); + assert_eq!(config.log.file, "", "empty file = no file logging"); + } + + #[test] + fn test_log_config_serialization_roundtrip() { + let toml_str = format!( + "{}\n[log]\nfile = \"/var/log/wg.log\"\nlevel = \"warn\"", + minimal_toml() + ); + let config: Config = toml::from_str(&toml_str).unwrap(); + let serialized = config.to_commented_toml(); + let config2: Config = toml::from_str(&serialized).unwrap(); + assert_eq!(config.log.file, config2.log.file); + assert_eq!(config.log.level, config2.log.level); + } } diff --git a/src/main.rs b/src/main.rs index 691aed4..cffe40b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -100,6 +100,19 @@ enum Commands { /// Share name to reconnect. share: String, }, + /// Check for a newer version of Warpgate. + Update { + /// Download and print install instructions for the latest binary. + #[arg(long)] + apply: bool, + }, + /// Set up a local WiFi AP + captive portal (requires hostapd + dnsmasq). + SetupWifi, + /// Clone a network interface MAC address for WiFi AP passthrough. + CloneMac { + /// Network interface to clone the MAC address from. + interface: String, + }, } fn main() -> Result<()> { @@ -141,6 +154,13 @@ fn main() -> Result<()> { cli::preset::run(&mut config, &cli.config, &name) } Commands::Reconnect { share } => cli::reconnect::run(&config, &share), + Commands::Update { apply } => cli::update::run(apply), + Commands::SetupWifi => { + todo!("WiFi AP setup not yet implemented — see src/cli/wifi.rs") + } + Commands::CloneMac { .. } => { + todo!("MAC clone not yet implemented — see src/cli/wifi.rs") + } // already handled above Commands::Run | Commands::ConfigInit { .. } | Commands::Deploy | Commands::Setup { .. } => unreachable!(), diff --git a/src/supervisor.rs b/src/supervisor.rs index 0ebb7ec..e62e520 100644 --- a/src/supervisor.rs +++ b/src/supervisor.rs @@ -4,7 +4,7 @@ //! process tree with coordinated startup and shutdown. Spawns a built-in web //! server for status monitoring and config hot-reload. -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; use std::os::unix::process::CommandExt; use std::path::PathBuf; use std::process::{Child, Command}; @@ -15,6 +15,9 @@ use std::thread; use std::time::{Duration, Instant, SystemTime}; use anyhow::{Context, Result}; +use chrono::Utc; +use cron::Schedule; +use std::str::FromStr; use tracing::{error, info, warn}; use crate::config::Config; @@ -47,6 +50,8 @@ const STATS_SNAPSHOT_INTERVAL: Duration = Duration::from_secs(60); const CACHE_WARN_THRESHOLD: f64 = 0.80; /// Cache usage CRIT threshold. const CACHE_CRITICAL_THRESHOLD: f64 = 0.95; +/// Number of speed samples in the adaptive bandwidth sliding window. +const ADAPTIVE_WINDOW_SIZE: usize = 6; /// Per-share state from the previous poll cycle, used for change detection. struct SharePrevState { @@ -725,6 +730,8 @@ fn supervise( let mut prev_states: HashMap = HashMap::new(); let mut last_stats_snapshot = Instant::now(); let mut last_scheduled_warmup: Option = None; + let mut adaptive_window: VecDeque = VecDeque::with_capacity(ADAPTIVE_WINDOW_SIZE); + let mut adaptive_current_limit: u64 = 0; loop { // Check for commands (non-blocking with timeout = POLL_INTERVAL) @@ -946,22 +953,34 @@ fn supervise( let cfg = shared_config.read().unwrap(); let schedule = cfg.warmup.warmup_schedule.clone(); if !schedule.is_empty() && !cfg.warmup.rules.is_empty() { - let should_run = match last_scheduled_warmup { - None => { - // First check: see if current hour matches schedule hour - let now = SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap_or_default() - .as_secs(); - let hour_of_day = (now % 86400) / 3600; - // Parse "0 H * * *" -> extract H - let scheduled_hour = schedule.split_whitespace() - .nth(1) - .and_then(|h| h.parse::().ok()) - .unwrap_or(2); - hour_of_day == scheduled_hour + let should_run = { + let normalized = normalize_cron_schedule(&schedule); + match Schedule::from_str(&normalized) { + Ok(sched) => match last_scheduled_warmup { + None => { + // First check: fire if the next scheduled time is within 60 seconds + sched.upcoming(Utc).next() + .map(|t| { + let diff = t.timestamp() - Utc::now().timestamp(); + diff >= 0 && diff <= 60 + }) + .unwrap_or(false) + } + Some(last) => { + // Has run before: check if there's a scheduled time between + // last run and now + let elapsed_secs = last.elapsed().as_secs() as i64; + let last_dt = Utc::now() - chrono::Duration::seconds(elapsed_secs); + sched.after(&last_dt).next() + .map(|t| t <= Utc::now()) + .unwrap_or(false) + } + }, + Err(e) => { + warn!("Invalid warmup_schedule '{}': {}", schedule, e); + false + } } - Some(last) => last.elapsed() >= Duration::from_secs(86400), }; if should_run { @@ -974,6 +993,46 @@ fn supervise( } } + // Adaptive bandwidth throttling + { + let cfg = shared_config.read().unwrap(); + if cfg.bandwidth.adaptive { + let max_limit = parse_size_bytes(&cfg.bandwidth.limit_up).unwrap_or(0); + if max_limit > 0 { + let total_speed: u64 = { + let status = shared_status.read().unwrap(); + status.shares.iter().map(|s| s.speed as u64).sum() + }; + adaptive_window.push_back(total_speed); + if adaptive_window.len() > ADAPTIVE_WINDOW_SIZE { + adaptive_window.pop_front(); + } + if adaptive_window.len() >= ADAPTIVE_WINDOW_SIZE { + let window_slice: Vec = adaptive_window.iter().copied().collect(); + let effective_current = if adaptive_current_limit == 0 { max_limit } else { adaptive_current_limit }; + let new_limit = compute_adaptive_limit( + &window_slice, + adaptive_current_limit, + max_limit, + ); + if new_limit != effective_current { + let limit_str = format!("{}k", new_limit / 1024); + info!( + adaptive_limit = %limit_str, + "Adaptive bwlimit adjusted" + ); + adaptive_current_limit = new_limit; + apply_bwlimit(mounts, &limit_str, &cfg.bandwidth.limit_down); + } + } + } + } else if adaptive_current_limit != 0 { + // Adaptive was turned off: restore the configured limit + adaptive_current_limit = 0; + apply_bwlimit(mounts, &cfg.bandwidth.limit_up, &cfg.bandwidth.limit_down); + } + } + // Log cache state changes and periodic snapshots log_cache_events(shared_status, &config, &mut prev_states, &mut last_stats_snapshot); @@ -1638,6 +1697,52 @@ fn log_cache_events( } } +/// Convert a standard 5-field cron expression to the 7-field format expected +/// by the `cron` crate ("sec min hour dom month dow year"). +/// +/// - 5 fields ("min hour dom month dow") → prepend "0 " (sec=0), append " *" (year=any) +/// - 6 fields (already has sec) → append " *" (year=any) +/// - 7 fields → unchanged +fn normalize_cron_schedule(expr: &str) -> String { + let fields: Vec<&str> = expr.split_whitespace().collect(); + match fields.len() { + 5 => format!("0 {} *", expr), + 6 => format!("{} *", expr), + _ => expr.to_string(), + } +} + +/// Compute the new adaptive bandwidth limit from a window of speed samples. +/// +/// - `window`: recent aggregate upload speed samples in bytes/sec (must be non-empty) +/// - `current_limit`: last applied limit (0 = "use `max_limit` as baseline") +/// - `max_limit`: configured upper bound in bytes/sec (0 = unlimited → passthrough) +/// +/// Returns the new limit to apply (bytes/sec). +fn compute_adaptive_limit(window: &[u64], current_limit: u64, max_limit: u64) -> u64 { + if max_limit == 0 || window.is_empty() { + return current_limit; + } + let current = if current_limit == 0 { max_limit } else { current_limit }; + let n = window.len() as f64; + let mean = window.iter().sum::() as f64 / n; + let variance = window.iter() + .map(|&x| { let d = x as f64 - mean; d * d }) + .sum::() / n; + let std_dev = variance.sqrt(); + + if mean > 0.0 && std_dev / mean > 0.3 { + // Congested (high coefficient of variation): reduce 25%, floor at 1 MiB/s + ((current as f64 * 0.75) as u64).max(1024 * 1024) + } else if mean >= current as f64 * 0.9 { + // Stable and near limit: maintain + current + } else { + // Stable but under-utilizing: increase 10%, cap at max + ((current as f64 * 1.1) as u64).min(max_limit) + } +} + /// Parse a human-readable size string (e.g. "200G", "1.5T", "512M") into bytes. fn parse_size_bytes(s: &str) -> Option { let s = s.trim(); @@ -1742,4 +1847,129 @@ mod tests { assert_eq!(parse_size_bytes("200GB"), Some(200 * 1024 * 1024 * 1024)); assert_eq!(parse_size_bytes("bogus"), None); } + + // ----------------------------------------------------------------------- + // normalize_cron_schedule + // ----------------------------------------------------------------------- + + #[test] + fn test_normalize_cron_5field() { + // Standard cron "min hour dom month dow" → prepend "0 " (sec=0), append " *" (year=any) + assert_eq!(normalize_cron_schedule("0 2 * * *"), "0 0 2 * * * *"); + } + + #[test] + fn test_normalize_cron_5field_wildcard_min() { + // Every 5 minutes + assert_eq!(normalize_cron_schedule("*/5 * * * *"), "0 */5 * * * * *"); + } + + #[test] + fn test_normalize_cron_6field() { + // 6-field (already has seconds) → append " *" for year + assert_eq!(normalize_cron_schedule("0 0 2 * * *"), "0 0 2 * * * *"); + } + + #[test] + fn test_normalize_cron_7field() { + // Already 7 fields → unchanged + assert_eq!(normalize_cron_schedule("0 0 2 * * * *"), "0 0 2 * * * *"); + } + + #[test] + fn test_normalize_cron_7field_unchanged_complex() { + let expr = "0 30 9,12 1,15 May-Aug Mon,Wed *"; + assert_eq!(normalize_cron_schedule(expr), expr); + } + + // ----------------------------------------------------------------------- + // compute_adaptive_limit + // ----------------------------------------------------------------------- + + const MIB: u64 = 1024 * 1024; + + #[test] + fn test_adaptive_window_size_constant() { + assert_eq!(ADAPTIVE_WINDOW_SIZE, 6); + } + + #[test] + fn test_compute_adaptive_limit_congested_reduces_25pct() { + // Alternating 1M/5M → mean=3M, std_dev=2M, cv=0.67 > 0.3 → congested + let window = vec![MIB, 5 * MIB, MIB, 5 * MIB, MIB, 5 * MIB]; + let max = 10 * MIB; + let current = 10 * MIB; + let new = compute_adaptive_limit(&window, current, max); + assert_eq!(new, ((10 * MIB) as f64 * 0.75) as u64); + assert!(new < current); + } + + #[test] + fn test_compute_adaptive_limit_congested_floor_at_1mib() { + // Very noisy but current is near floor — must not go below 1 MiB/s + let window = vec![100, MIB, 100, MIB, 100, MIB]; + let max = 10 * MIB; + let current = (MIB as f64 * 1.1) as u64; // slightly above floor + let new = compute_adaptive_limit(&window, current, max); + assert!(new >= MIB, "floor violated: {new} < {MIB}"); + } + + #[test] + fn test_compute_adaptive_limit_stable_near_max_maintains() { + // All samples ≥ 90% of limit → maintain + let limit = 10 * MIB; + let window = vec![9_500_000, 9_600_000, 9_700_000, 9_800_000, 9_900_000, 10_000_000]; + let new = compute_adaptive_limit(&window, limit, limit); + assert_eq!(new, limit); + } + + #[test] + fn test_compute_adaptive_limit_under_utilizing_increases_10pct() { + // mean=3M, current=5M → 3M < 5M*0.9=4.5M → under-utilizing → +10% + let max = 10 * MIB; + let current = 5 * MIB; + let window = vec![ + 2_800_000, 3_000_000, 3_200_000, + 2_900_000, 3_100_000, 3_000_000, + ]; + let new = compute_adaptive_limit(&window, current, max); + assert_eq!(new, (current as f64 * 1.1) as u64); + assert!(new > current); + } + + #[test] + fn test_compute_adaptive_limit_increase_capped_at_max() { + // current near max — 10% increase would exceed max, should be capped + let max = 10 * MIB; + let current = 9_500_000u64; // 9.5 MiB; +10% = 10.45 MiB > max + let window = vec![3_000_000; 6]; // under-utilizing + let new = compute_adaptive_limit(&window, current, max); + assert!(new <= max, "cap violated: {new} > {max}"); + } + + #[test] + fn test_compute_adaptive_limit_zero_current_uses_max_as_baseline() { + // current=0 means "baseline = max_limit" + let max = 10 * MIB; + // Under-utilizing from max baseline → +10%, capped at max + let window = vec![3_000_000; 6]; + let new = compute_adaptive_limit(&window, 0, max); + assert!(new <= max); + // (10M * 1.1).min(10M) = 10M + assert_eq!(new, max); + } + + #[test] + fn test_compute_adaptive_limit_zero_max_passthrough() { + // max=0 means unlimited — function returns current unchanged + let window = vec![MIB; 6]; + let new = compute_adaptive_limit(&window, 5 * MIB, 0); + assert_eq!(new, 5 * MIB); + } + + #[test] + fn test_compute_adaptive_limit_empty_window_passthrough() { + let new = compute_adaptive_limit(&[], 5 * MIB, 10 * MIB); + assert_eq!(new, 5 * MIB); + } } diff --git a/src/web/api.rs b/src/web/api.rs index c949147..afa6ab3 100644 --- a/src/web/api.rs +++ b/src/web/api.rs @@ -324,61 +324,17 @@ async fn post_preset( ) -> axum::response::Response { use axum::response::IntoResponse; - let allowed = ["photographer", "video", "office"]; - if !allowed.contains(&profile.as_str()) { - return (StatusCode::BAD_REQUEST, "Unknown preset").into_response(); - } + let preset = match profile.parse::() { + Ok(p) => p, + Err(e) => return (StatusCode::BAD_REQUEST, e.to_string()).into_response(), + }; let mut config = { let cfg = state.config.read().unwrap(); cfg.clone() }; - match profile.as_str() { - "photographer" => { - config.read.chunk_size = "256M".into(); - config.read.chunk_limit = "1G".into(); - config.read.read_ahead = "512M".into(); - config.read.buffer_size = "256M".into(); - config.read.multi_thread_streams = 4; - config.read.multi_thread_cutoff = "50M".into(); - config.directory_cache.cache_time = "2h".into(); - config.writeback.write_back = "5s".into(); - config.writeback.transfers = 4; - config.protocols.enable_smb = true; - config.protocols.enable_nfs = false; - config.protocols.enable_webdav = false; - } - "video" => { - config.read.chunk_size = "512M".into(); - config.read.chunk_limit = "2G".into(); - config.read.read_ahead = "1G".into(); - config.read.buffer_size = "512M".into(); - config.read.multi_thread_streams = 2; - config.read.multi_thread_cutoff = "100M".into(); - config.directory_cache.cache_time = "1h".into(); - config.writeback.write_back = "5s".into(); - config.writeback.transfers = 2; - config.protocols.enable_smb = true; - config.protocols.enable_nfs = false; - config.protocols.enable_webdav = false; - } - "office" => { - config.read.chunk_size = "64M".into(); - config.read.chunk_limit = "256M".into(); - config.read.read_ahead = "128M".into(); - config.read.buffer_size = "128M".into(); - config.read.multi_thread_streams = 4; - config.read.multi_thread_cutoff = "10M".into(); - config.directory_cache.cache_time = "30m".into(); - config.writeback.write_back = "3s".into(); - config.writeback.transfers = 4; - config.protocols.enable_smb = true; - config.protocols.enable_nfs = false; - config.protocols.enable_webdav = true; - } - _ => unreachable!(), - } + preset.apply(&mut config); let toml_content = config.to_commented_toml(); if let Err(e) = std::fs::write(&state.config_path, &toml_content) { diff --git a/templates/web/tabs/config.html b/templates/web/tabs/config.html index 1a8f995..18d262c 100644 --- a/templates/web/tabs/config.html +++ b/templates/web/tabs/config.html @@ -19,6 +19,9 @@ function configEditorFn() { smb_auth: false, warmup: false, dir_refresh: false, + web: false, + notifications: false, + log: false, }, init() { @@ -558,6 +561,85 @@ if (window.Alpine) { + +
+
+

Web UI No restart

+ +
+
+
+ + +
+

+ Protects the Web UI with HTTP Basic Auth. Leave empty to allow unauthenticated access. +

+
+
+ + +
+
+

Notifications No restart

+ +
+
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+

+ Send push notifications when cache is near full, NAS goes offline, or write-back queue grows large. + Leave Webhook URL empty to disable all notifications. +

+
+
+ + +
+
+

Log Full restart

+ +
+
+
+
+ + +
+
+ + +
+
+

+ Changes to log settings require a full service restart to take effect. + Leave Log File empty to disable file logging (stdout only). +

+
+
+