feat: fill implementation gaps — preset unification, cron, adaptive bw, update cmd, tests

Step 1 — Unify preset logic (eliminate dual implementation)
- src/cli/preset.rs: add missing fields (chunk_limit, multi_thread_streams,
  multi_thread_cutoff), fix Office buffer_size 64M→128M, implement FromStr
- src/web/api.rs: post_preset() now calls Preset::apply() — no more inlined
  params; Office write_back unified to 5s (was 3s in API)

Step 2 — Fix setup.rs connection test: warn→bail
- All 4 "Warning: Could not connect/resolve" prints replaced with anyhow::bail!
  matching deploy/setup.rs behavior

Step 3 — Web UI: add [web] and [notifications] edit sections
- templates/web/tabs/config.html: new collapsible Web UI (password) and
  Notifications (webhook_url, cache_threshold_pct, nas_offline_minutes,
  writeback_depth) sections, both tagged "No restart"
- Also adds [log] section (file path + level select, "Full restart")

Step 4 — Full cron expression support in warmup scheduler
- Cargo.toml: add cron = "0.12", chrono = "0.4"
- supervisor.rs: normalize_cron_schedule() converts 5-field standard cron to
  7-field cron crate format; replaces naive hour-only matching

Step 5 — Adaptive bandwidth algorithm
- supervisor.rs: extract compute_adaptive_limit() pure function; sliding
  window of 6 samples, cv>0.3→congested (−25%, floor 1MiB/s), stable
  near-limit→maintain, under-utilizing→+10% (capped at limit_up)

Step 6 — warpgate update command
- src/cli/update.rs: query GitHub Releases API, compare with CARGO_PKG_VERSION
- src/main.rs: add Update{apply}, SetupWifi, CloneMac{interface} commands
- src/cli/wifi.rs: TODO stub for WiFi AP setup

Unit tests (+35, total 188→223)
- cli/preset.rs: 10 tests — FromStr, all fields for each preset, idempotency,
  connection/share isolation, write_back consistency regression
- supervisor.rs: 14 tests — normalize_cron_schedule (5 cases),
  compute_adaptive_limit (9 cases: congestion, floor, stable, under-utilizing,
  cap, zero-current, zero-max, empty window)
- config.rs: 11 tests — WebConfig (3), NotificationsConfig (4), LogConfig (4)

Shell tests (+4 scripts)
- tests/09-cli/test-preset-cli.sh: preset CLI without daemon; checks all
  three presets write correct values including unified buffer_size/write_back
- tests/09-cli/test-update-command.sh: update command; skips on no-network
- tests/10-scheduled/test-cron-warmup-schedule.sh: "* * * * *" fires in <90s
- tests/10-scheduled/test-adaptive-bandwidth.sh: adaptive loop stability
- tests/harness/config-gen.sh: add warmup.warmup_schedule override support
- tests/run-all.sh: add 10-scheduled category

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
grabbit 2026-02-19 16:55:00 +08:00
parent a11c899d71
commit faf9d80824
18 changed files with 1330 additions and 84 deletions

216
Cargo.lock generated
View File

@ -17,6 +17,15 @@ dependencies = [
"memchr",
]
[[package]]
name = "android_system_properties"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
dependencies = [
"libc",
]
[[package]]
name = "anstream"
version = "0.6.21"
@ -131,6 +140,12 @@ version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
[[package]]
name = "autocfg"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "axum"
version = "0.8.8"
@ -213,6 +228,12 @@ dependencies = [
"objc2",
]
[[package]]
name = "bumpalo"
version = "3.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c6f81257d10a0f602a294ae4182251151ff97dbb504ef9afcdda4a64b24d9b4"
[[package]]
name = "bytes"
version = "1.11.1"
@ -241,6 +262,19 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "chrono"
version = "0.4.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118"
dependencies = [
"iana-time-zone",
"js-sys",
"num-traits",
"wasm-bindgen",
"windows-link",
]
[[package]]
name = "clap"
version = "4.5.59"
@ -316,6 +350,12 @@ dependencies = [
"url",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "crc32fast"
version = "1.5.0"
@ -325,6 +365,17 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "cron"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f8c3e73077b4b4a6ab1ea5047c37c57aee77657bc8ecd6f29b0af082d0b0c07"
dependencies = [
"chrono",
"nom",
"once_cell",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.15"
@ -566,6 +617,30 @@ dependencies = [
"tower-service",
]
[[package]]
name = "iana-time-zone"
version = "0.1.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470"
dependencies = [
"android_system_properties",
"core-foundation-sys",
"iana-time-zone-haiku",
"js-sys",
"log",
"wasm-bindgen",
"windows-core",
]
[[package]]
name = "iana-time-zone-haiku"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
dependencies = [
"cc",
]
[[package]]
name = "icu_collections"
version = "2.1.1"
@ -690,6 +765,16 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
[[package]]
name = "js-sys"
version = "0.3.85"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "lazy_static"
version = "1.5.0"
@ -747,6 +832,12 @@ version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
[[package]]
name = "minimal-lexical"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "miniz_oxide"
version = "0.8.9"
@ -780,6 +871,16 @@ dependencies = [
"libc",
]
[[package]]
name = "nom"
version = "7.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
dependencies = [
"memchr",
"minimal-lexical",
]
[[package]]
name = "nu-ansi-term"
version = "0.50.3"
@ -795,6 +896,15 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "objc2"
version = "0.6.3"
@ -945,6 +1055,12 @@ dependencies = [
"untrusted",
]
[[package]]
name = "rustversion"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "ryu"
version = "1.0.23"
@ -1483,7 +1599,9 @@ dependencies = [
"anyhow",
"askama",
"axum",
"chrono",
"clap",
"cron",
"ctrlc",
"libc",
"serde",
@ -1505,6 +1623,51 @@ version = "0.11.1+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "wasm-bindgen"
version = "0.2.108"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.108"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.108"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55"
dependencies = [
"bumpalo",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.108"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12"
dependencies = [
"unicode-ident",
]
[[package]]
name = "webpki-roots"
version = "1.0.6"
@ -1514,12 +1677,65 @@ dependencies = [
"rustls-pki-types",
]
[[package]]
name = "windows-core"
version = "0.62.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb"
dependencies = [
"windows-implement",
"windows-interface",
"windows-link",
"windows-result",
"windows-strings",
]
[[package]]
name = "windows-implement"
version = "0.60.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-interface"
version = "0.59.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-link"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
[[package]]
name = "windows-result"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-strings"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-sys"
version = "0.52.0"

View File

@ -21,3 +21,5 @@ tower-http = { version = "0.6", features = ["cors"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tracing-appender = "0.2"
cron = "0.12"
chrono = { version = "0.4", features = ["clock"] }

View File

@ -7,4 +7,6 @@ pub mod reconnect;
pub mod setup;
pub mod speed_test;
pub mod status;
pub mod update;
pub mod warmup;
pub mod wifi; // TODO: WiFi AP setup

View File

@ -17,23 +17,33 @@ pub enum Preset {
Office,
}
impl Preset {
pub fn from_str(s: &str) -> Option<Self> {
impl std::str::FromStr for Preset {
type Err = anyhow::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
match s {
"photographer" => Some(Self::Photographer),
"video" => Some(Self::Video),
"office" => Some(Self::Office),
_ => None,
"photographer" => Ok(Self::Photographer),
"video" => Ok(Self::Video),
"office" => Ok(Self::Office),
_ => Err(anyhow::anyhow!(
"Unknown preset '{}'. Use: photographer, video, office",
s
)),
}
}
}
impl Preset {
pub fn apply(&self, config: &mut Config) {
match self {
Self::Photographer => {
config.cache.max_size = "500G".into();
config.read.chunk_size = "256M".into();
config.read.chunk_limit = "1G".into();
config.read.read_ahead = "512M".into();
config.read.buffer_size = "256M".into();
config.read.multi_thread_streams = 4;
config.read.multi_thread_cutoff = "50M".into();
config.directory_cache.cache_time = "2h".into();
config.writeback.write_back = "5s".into();
config.writeback.transfers = 4;
@ -44,8 +54,11 @@ impl Preset {
Self::Video => {
config.cache.max_size = "1T".into();
config.read.chunk_size = "512M".into();
config.read.chunk_limit = "2G".into();
config.read.read_ahead = "1G".into();
config.read.buffer_size = "512M".into();
config.read.multi_thread_streams = 2;
config.read.multi_thread_cutoff = "100M".into();
config.directory_cache.cache_time = "1h".into();
config.writeback.write_back = "5s".into();
config.writeback.transfers = 2;
@ -56,8 +69,11 @@ impl Preset {
Self::Office => {
config.cache.max_size = "50G".into();
config.read.chunk_size = "64M".into();
config.read.chunk_limit = "256M".into();
config.read.read_ahead = "128M".into();
config.read.buffer_size = "64M".into();
config.read.buffer_size = "128M".into();
config.read.multi_thread_streams = 4;
config.read.multi_thread_cutoff = "10M".into();
config.directory_cache.cache_time = "30m".into();
config.writeback.write_back = "5s".into();
config.writeback.transfers = 4;
@ -77,13 +93,168 @@ impl Preset {
}
}
pub fn run(config: &mut Config, config_path: &Path, preset_name: &str) -> Result<()> {
let preset = Preset::from_str(preset_name).ok_or_else(|| {
anyhow::anyhow!(
"Unknown preset '{}'. Use: photographer, video, office",
preset_name
#[cfg(test)]
mod tests {
use super::*;
fn test_config() -> Config {
toml::from_str(
r#"
[[connections]]
name = "nas"
nas_host = "10.0.0.1"
nas_user = "admin"
[cache]
dir = "/tmp/cache"
[read]
[bandwidth]
[writeback]
[directory_cache]
[protocols]
[[shares]]
name = "photos"
connection = "nas"
remote_path = "/photos"
mount_point = "/mnt/photos"
"#,
)
})?;
.unwrap()
}
// --- FromStr ---
#[test]
fn test_preset_parse_valid() {
assert!(matches!("photographer".parse::<Preset>(), Ok(Preset::Photographer)));
assert!(matches!("video".parse::<Preset>(), Ok(Preset::Video)));
assert!(matches!("office".parse::<Preset>(), Ok(Preset::Office)));
}
#[test]
fn test_preset_parse_invalid() {
assert!("unknown".parse::<Preset>().is_err());
assert!("".parse::<Preset>().is_err());
assert!("Photographer".parse::<Preset>().is_err()); // case-sensitive
assert!("OFFICE".parse::<Preset>().is_err());
}
#[test]
fn test_preset_parse_error_message() {
let err = "bad".parse::<Preset>().unwrap_err();
assert!(err.to_string().contains("bad"), "error should mention the bad value");
}
// --- Preset::apply — field values ---
#[test]
fn test_photographer_apply_all_fields() {
let mut cfg = test_config();
Preset::Photographer.apply(&mut cfg);
assert_eq!(cfg.cache.max_size, "500G");
assert_eq!(cfg.read.chunk_size, "256M");
assert_eq!(cfg.read.chunk_limit, "1G");
assert_eq!(cfg.read.read_ahead, "512M");
assert_eq!(cfg.read.buffer_size, "256M");
assert_eq!(cfg.read.multi_thread_streams, 4);
assert_eq!(cfg.read.multi_thread_cutoff, "50M");
assert_eq!(cfg.directory_cache.cache_time, "2h");
assert_eq!(cfg.writeback.write_back, "5s");
assert_eq!(cfg.writeback.transfers, 4);
assert!(cfg.protocols.enable_smb);
assert!(!cfg.protocols.enable_nfs);
assert!(!cfg.protocols.enable_webdav);
}
#[test]
fn test_video_apply_all_fields() {
let mut cfg = test_config();
Preset::Video.apply(&mut cfg);
assert_eq!(cfg.cache.max_size, "1T");
assert_eq!(cfg.read.chunk_size, "512M");
assert_eq!(cfg.read.chunk_limit, "2G");
assert_eq!(cfg.read.read_ahead, "1G");
assert_eq!(cfg.read.buffer_size, "512M");
assert_eq!(cfg.read.multi_thread_streams, 2);
assert_eq!(cfg.read.multi_thread_cutoff, "100M");
assert_eq!(cfg.directory_cache.cache_time, "1h");
assert_eq!(cfg.writeback.write_back, "5s");
assert_eq!(cfg.writeback.transfers, 2);
assert!(cfg.protocols.enable_smb);
assert!(!cfg.protocols.enable_nfs);
assert!(!cfg.protocols.enable_webdav);
}
#[test]
fn test_office_apply_all_fields() {
let mut cfg = test_config();
Preset::Office.apply(&mut cfg);
assert_eq!(cfg.cache.max_size, "50G");
assert_eq!(cfg.read.chunk_size, "64M");
assert_eq!(cfg.read.chunk_limit, "256M");
assert_eq!(cfg.read.read_ahead, "128M");
assert_eq!(cfg.read.buffer_size, "128M");
assert_eq!(cfg.read.multi_thread_streams, 4);
assert_eq!(cfg.read.multi_thread_cutoff, "10M");
assert_eq!(cfg.directory_cache.cache_time, "30m");
assert_eq!(cfg.writeback.write_back, "5s");
assert_eq!(cfg.writeback.transfers, 4);
assert!(cfg.protocols.enable_smb);
assert!(!cfg.protocols.enable_nfs);
assert!(cfg.protocols.enable_webdav);
}
#[test]
fn test_preset_does_not_change_connections_or_shares() {
let mut cfg = test_config();
Preset::Photographer.apply(&mut cfg);
// Preset must never touch connection or share settings
assert_eq!(cfg.connections[0].nas_host, "10.0.0.1");
assert_eq!(cfg.connections[0].nas_user, "admin");
assert_eq!(cfg.shares[0].name, "photos");
assert_eq!(cfg.shares[0].remote_path, "/photos");
}
#[test]
fn test_preset_apply_is_idempotent() {
let mut cfg = test_config();
Preset::Video.apply(&mut cfg);
let snapshot_chunk = cfg.read.chunk_size.clone();
Preset::Video.apply(&mut cfg);
assert_eq!(cfg.read.chunk_size, snapshot_chunk);
}
#[test]
fn test_presets_have_consistent_write_back() {
// All three presets should use the same write_back value (plan §1 unified)
let mut cfg = test_config();
Preset::Photographer.apply(&mut cfg);
let wb_p = cfg.writeback.write_back.clone();
Preset::Video.apply(&mut cfg);
let wb_v = cfg.writeback.write_back.clone();
Preset::Office.apply(&mut cfg);
let wb_o = cfg.writeback.write_back.clone();
assert_eq!(wb_p, wb_v, "Photographer and Video write_back must match");
assert_eq!(wb_v, wb_o, "Video and Office write_back must match");
}
// --- description ---
#[test]
fn test_description_mentions_cache_size() {
assert!(Preset::Photographer.description().contains("500G"));
assert!(Preset::Video.description().contains("1T"));
assert!(Preset::Office.description().contains("50G"));
}
}
pub fn run(config: &mut Config, config_path: &Path, preset_name: &str) -> Result<()> {
let preset: Preset = preset_name.parse()?;
preset.apply(config);

View File

@ -204,7 +204,10 @@ pub fn run(output: Option<PathBuf>) -> Result<()> {
match addr_str.parse::<SocketAddr>() {
Ok(addr) => match TcpStream::connect_timeout(&addr, Duration::from_secs(5)) {
Ok(_) => println!(" Connection OK"),
Err(e) => println!(" Warning: Could not connect to {}: {}", addr_str, e),
Err(e) => anyhow::bail!(
"Cannot connect to {}:{} — check NAS host/port and ensure Tailscale is active.\nDetails: {}",
nas_host, sftp_port, e
),
},
Err(_) => {
// Might be a hostname — try resolving
@ -214,15 +217,22 @@ pub fn run(output: Option<PathBuf>) -> Result<()> {
if let Some(addr) = addrs.next() {
match TcpStream::connect_timeout(&addr, Duration::from_secs(5)) {
Ok(_) => println!(" Connection OK"),
Err(e) => {
println!(" Warning: Could not connect to {}: {}", addr_str, e)
}
Err(e) => anyhow::bail!(
"Cannot connect to {}:{} — check NAS host/port and ensure Tailscale is active.\nDetails: {}",
nas_host, sftp_port, e
),
}
} else {
println!(" Warning: Could not resolve {}", addr_str);
anyhow::bail!(
"Cannot resolve hostname '{}' — check NAS host and ensure DNS is working.",
nas_host
);
}
}
Err(e) => println!(" Warning: Could not resolve {}: {}", addr_str, e),
Err(e) => anyhow::bail!(
"Cannot resolve hostname '{}' — check NAS host and ensure DNS is working.\nDetails: {}",
nas_host, e
),
}
}
}

64
src/cli/update.rs Normal file
View File

@ -0,0 +1,64 @@
//! `warpgate update` — check for newer versions of Warpgate.
//!
//! Queries the GitHub Releases API to compare the running version with the
//! latest published release and optionally prints installation instructions.
use anyhow::Result;
/// GitHub repository path (owner/repo).
const GITHUB_REPO: &str = "warpgate-project/warpgate";
/// Current version from Cargo.toml.
const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION");
pub fn run(apply: bool) -> Result<()> {
let api_url = format!(
"https://api.github.com/repos/{GITHUB_REPO}/releases/latest"
);
println!("Checking for updates...");
println!(" Current version: v{CURRENT_VERSION}");
let resp = ureq::get(&api_url)
.header("User-Agent", "warpgate-updater")
.call()
.map_err(|e| anyhow::anyhow!("Failed to reach GitHub API: {e}"))?;
let body: serde_json::Value = resp
.into_body()
.read_json()
.map_err(|e| anyhow::anyhow!("Failed to parse GitHub API response: {e}"))?;
let tag = body["tag_name"]
.as_str()
.unwrap_or("")
.trim_start_matches('v');
if tag.is_empty() {
anyhow::bail!("Could not determine latest version from GitHub API response");
}
if tag == CURRENT_VERSION {
println!(" Latest version: v{tag}");
println!("Already up to date (v{CURRENT_VERSION}).");
return Ok(());
}
println!(" Latest version: v{tag} ← new release available");
println!();
println!("Changelog: https://github.com/{GITHUB_REPO}/releases/tag/v{tag}");
if apply {
println!();
println!("To install the latest version, run:");
println!(
" curl -fsSL https://github.com/{GITHUB_REPO}/releases/download/v{tag}/warpgate-linux-x86_64 \\\n | sudo install -m 0755 /dev/stdin /usr/local/bin/warpgate"
);
println!(" sudo systemctl restart warpgate");
} else {
println!();
println!("Run `warpgate update --apply` to print the installation command.");
}
Ok(())
}

6
src/cli/wifi.rs Normal file
View File

@ -0,0 +1,6 @@
//! `warpgate setup-wifi` — WiFi AP + captive portal setup.
//!
//! TODO: WiFi AP setup (hostapd + dnsmasq + iptables).
//! Planned implementation: generate hostapd.conf, dnsmasq.conf, and iptables
//! rules to create a local WiFi AP that proxies client traffic through
//! the Warpgate cache layer.

View File

@ -1900,4 +1900,127 @@ mount_point = "/mnt/photos"
assert!(!is_valid_remote_name("nas:1"));
assert!(!is_valid_remote_name("nas/1"));
}
// -----------------------------------------------------------------------
// WebConfig
// -----------------------------------------------------------------------
#[test]
fn test_web_config_default_password_empty() {
let config: Config = toml::from_str(minimal_toml()).unwrap();
assert_eq!(config.web.password, "", "default web password should be empty");
}
#[test]
fn test_web_config_password_set() {
let toml_str = format!("{}\n[web]\npassword = \"s3cr3t\"", minimal_toml());
let config: Config = toml::from_str(&toml_str).unwrap();
assert_eq!(config.web.password, "s3cr3t");
}
#[test]
fn test_web_config_serialization_roundtrip() {
let toml_str = format!("{}\n[web]\npassword = \"mypass\"", minimal_toml());
let config: Config = toml::from_str(&toml_str).unwrap();
let serialized = config.to_commented_toml();
let config2: Config = toml::from_str(&serialized).unwrap();
assert_eq!(config.web.password, config2.web.password);
}
// -----------------------------------------------------------------------
// NotificationsConfig
// -----------------------------------------------------------------------
#[test]
fn test_notifications_config_defaults() {
let config: Config = toml::from_str(minimal_toml()).unwrap();
assert_eq!(config.notifications.webhook_url, "");
assert_eq!(config.notifications.cache_threshold_pct, 80);
assert_eq!(config.notifications.nas_offline_minutes, 5);
assert_eq!(config.notifications.writeback_depth, 50);
}
#[test]
fn test_notifications_config_all_fields() {
let toml_str = format!(
"{}\n[notifications]\nwebhook_url = \"https://hook.example.com\"\ncache_threshold_pct = 90\nnas_offline_minutes = 10\nwriteback_depth = 100",
minimal_toml()
);
let config: Config = toml::from_str(&toml_str).unwrap();
assert_eq!(config.notifications.webhook_url, "https://hook.example.com");
assert_eq!(config.notifications.cache_threshold_pct, 90);
assert_eq!(config.notifications.nas_offline_minutes, 10);
assert_eq!(config.notifications.writeback_depth, 100);
}
#[test]
fn test_notifications_config_partial_override_keeps_defaults() {
// Partial [notifications] section: only webhook_url set
let toml_str = format!(
"{}\n[notifications]\nwebhook_url = \"https://example.com\"",
minimal_toml()
);
let config: Config = toml::from_str(&toml_str).unwrap();
assert_eq!(config.notifications.webhook_url, "https://example.com");
assert_eq!(config.notifications.cache_threshold_pct, 80); // still default
assert_eq!(config.notifications.nas_offline_minutes, 5);
assert_eq!(config.notifications.writeback_depth, 50);
}
#[test]
fn test_notifications_config_serialization_roundtrip() {
let toml_str = format!(
"{}\n[notifications]\nwebhook_url = \"https://rt.test\"\ncache_threshold_pct = 70\nnas_offline_minutes = 3\nwriteback_depth = 25",
minimal_toml()
);
let config: Config = toml::from_str(&toml_str).unwrap();
let serialized = config.to_commented_toml();
let config2: Config = toml::from_str(&serialized).unwrap();
assert_eq!(config.notifications.webhook_url, config2.notifications.webhook_url);
assert_eq!(config.notifications.cache_threshold_pct, config2.notifications.cache_threshold_pct);
assert_eq!(config.notifications.nas_offline_minutes, config2.notifications.nas_offline_minutes);
assert_eq!(config.notifications.writeback_depth, config2.notifications.writeback_depth);
}
// -----------------------------------------------------------------------
// LogConfig
// -----------------------------------------------------------------------
#[test]
fn test_log_config_defaults() {
let config: Config = toml::from_str(minimal_toml()).unwrap();
assert_eq!(config.log.file, "/var/log/warpgate/warpgate.log");
assert_eq!(config.log.level, "info");
}
#[test]
fn test_log_config_custom_values() {
let toml_str = format!(
"{}\n[log]\nfile = \"/tmp/warpgate-test.log\"\nlevel = \"debug\"",
minimal_toml()
);
let config: Config = toml::from_str(&toml_str).unwrap();
assert_eq!(config.log.file, "/tmp/warpgate-test.log");
assert_eq!(config.log.level, "debug");
}
#[test]
fn test_log_config_empty_file_disables_file_logging() {
let toml_str = format!("{}\n[log]\nfile = \"\"", minimal_toml());
let config: Config = toml::from_str(&toml_str).unwrap();
assert_eq!(config.log.file, "", "empty file = no file logging");
}
#[test]
fn test_log_config_serialization_roundtrip() {
let toml_str = format!(
"{}\n[log]\nfile = \"/var/log/wg.log\"\nlevel = \"warn\"",
minimal_toml()
);
let config: Config = toml::from_str(&toml_str).unwrap();
let serialized = config.to_commented_toml();
let config2: Config = toml::from_str(&serialized).unwrap();
assert_eq!(config.log.file, config2.log.file);
assert_eq!(config.log.level, config2.log.level);
}
}

View File

@ -100,6 +100,19 @@ enum Commands {
/// Share name to reconnect.
share: String,
},
/// Check for a newer version of Warpgate.
Update {
/// Download and print install instructions for the latest binary.
#[arg(long)]
apply: bool,
},
/// Set up a local WiFi AP + captive portal (requires hostapd + dnsmasq).
SetupWifi,
/// Clone a network interface MAC address for WiFi AP passthrough.
CloneMac {
/// Network interface to clone the MAC address from.
interface: String,
},
}
fn main() -> Result<()> {
@ -141,6 +154,13 @@ fn main() -> Result<()> {
cli::preset::run(&mut config, &cli.config, &name)
}
Commands::Reconnect { share } => cli::reconnect::run(&config, &share),
Commands::Update { apply } => cli::update::run(apply),
Commands::SetupWifi => {
todo!("WiFi AP setup not yet implemented — see src/cli/wifi.rs")
}
Commands::CloneMac { .. } => {
todo!("MAC clone not yet implemented — see src/cli/wifi.rs")
}
// already handled above
Commands::Run | Commands::ConfigInit { .. } | Commands::Deploy
| Commands::Setup { .. } => unreachable!(),

View File

@ -4,7 +4,7 @@
//! process tree with coordinated startup and shutdown. Spawns a built-in web
//! server for status monitoring and config hot-reload.
use std::collections::HashMap;
use std::collections::{HashMap, VecDeque};
use std::os::unix::process::CommandExt;
use std::path::PathBuf;
use std::process::{Child, Command};
@ -15,6 +15,9 @@ use std::thread;
use std::time::{Duration, Instant, SystemTime};
use anyhow::{Context, Result};
use chrono::Utc;
use cron::Schedule;
use std::str::FromStr;
use tracing::{error, info, warn};
use crate::config::Config;
@ -47,6 +50,8 @@ const STATS_SNAPSHOT_INTERVAL: Duration = Duration::from_secs(60);
const CACHE_WARN_THRESHOLD: f64 = 0.80;
/// Cache usage CRIT threshold.
const CACHE_CRITICAL_THRESHOLD: f64 = 0.95;
/// Number of speed samples in the adaptive bandwidth sliding window.
const ADAPTIVE_WINDOW_SIZE: usize = 6;
/// Per-share state from the previous poll cycle, used for change detection.
struct SharePrevState {
@ -725,6 +730,8 @@ fn supervise(
let mut prev_states: HashMap<String, SharePrevState> = HashMap::new();
let mut last_stats_snapshot = Instant::now();
let mut last_scheduled_warmup: Option<Instant> = None;
let mut adaptive_window: VecDeque<u64> = VecDeque::with_capacity(ADAPTIVE_WINDOW_SIZE);
let mut adaptive_current_limit: u64 = 0;
loop {
// Check for commands (non-blocking with timeout = POLL_INTERVAL)
@ -946,22 +953,34 @@ fn supervise(
let cfg = shared_config.read().unwrap();
let schedule = cfg.warmup.warmup_schedule.clone();
if !schedule.is_empty() && !cfg.warmup.rules.is_empty() {
let should_run = match last_scheduled_warmup {
let should_run = {
let normalized = normalize_cron_schedule(&schedule);
match Schedule::from_str(&normalized) {
Ok(sched) => match last_scheduled_warmup {
None => {
// First check: see if current hour matches schedule hour
let now = SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let hour_of_day = (now % 86400) / 3600;
// Parse "0 H * * *" -> extract H
let scheduled_hour = schedule.split_whitespace()
.nth(1)
.and_then(|h| h.parse::<u64>().ok())
.unwrap_or(2);
hour_of_day == scheduled_hour
// First check: fire if the next scheduled time is within 60 seconds
sched.upcoming(Utc).next()
.map(|t| {
let diff = t.timestamp() - Utc::now().timestamp();
diff >= 0 && diff <= 60
})
.unwrap_or(false)
}
Some(last) => {
// Has run before: check if there's a scheduled time between
// last run and now
let elapsed_secs = last.elapsed().as_secs() as i64;
let last_dt = Utc::now() - chrono::Duration::seconds(elapsed_secs);
sched.after(&last_dt).next()
.map(|t| t <= Utc::now())
.unwrap_or(false)
}
},
Err(e) => {
warn!("Invalid warmup_schedule '{}': {}", schedule, e);
false
}
}
Some(last) => last.elapsed() >= Duration::from_secs(86400),
};
if should_run {
@ -974,6 +993,46 @@ fn supervise(
}
}
// Adaptive bandwidth throttling
{
let cfg = shared_config.read().unwrap();
if cfg.bandwidth.adaptive {
let max_limit = parse_size_bytes(&cfg.bandwidth.limit_up).unwrap_or(0);
if max_limit > 0 {
let total_speed: u64 = {
let status = shared_status.read().unwrap();
status.shares.iter().map(|s| s.speed as u64).sum()
};
adaptive_window.push_back(total_speed);
if adaptive_window.len() > ADAPTIVE_WINDOW_SIZE {
adaptive_window.pop_front();
}
if adaptive_window.len() >= ADAPTIVE_WINDOW_SIZE {
let window_slice: Vec<u64> = adaptive_window.iter().copied().collect();
let effective_current = if adaptive_current_limit == 0 { max_limit } else { adaptive_current_limit };
let new_limit = compute_adaptive_limit(
&window_slice,
adaptive_current_limit,
max_limit,
);
if new_limit != effective_current {
let limit_str = format!("{}k", new_limit / 1024);
info!(
adaptive_limit = %limit_str,
"Adaptive bwlimit adjusted"
);
adaptive_current_limit = new_limit;
apply_bwlimit(mounts, &limit_str, &cfg.bandwidth.limit_down);
}
}
}
} else if adaptive_current_limit != 0 {
// Adaptive was turned off: restore the configured limit
adaptive_current_limit = 0;
apply_bwlimit(mounts, &cfg.bandwidth.limit_up, &cfg.bandwidth.limit_down);
}
}
// Log cache state changes and periodic snapshots
log_cache_events(shared_status, &config, &mut prev_states, &mut last_stats_snapshot);
@ -1638,6 +1697,52 @@ fn log_cache_events(
}
}
/// Convert a standard 5-field cron expression to the 7-field format expected
/// by the `cron` crate ("sec min hour dom month dow year").
///
/// - 5 fields ("min hour dom month dow") → prepend "0 " (sec=0), append " *" (year=any)
/// - 6 fields (already has sec) → append " *" (year=any)
/// - 7 fields → unchanged
fn normalize_cron_schedule(expr: &str) -> String {
let fields: Vec<&str> = expr.split_whitespace().collect();
match fields.len() {
5 => format!("0 {} *", expr),
6 => format!("{} *", expr),
_ => expr.to_string(),
}
}
/// Compute the new adaptive bandwidth limit from a window of speed samples.
///
/// - `window`: recent aggregate upload speed samples in bytes/sec (must be non-empty)
/// - `current_limit`: last applied limit (0 = "use `max_limit` as baseline")
/// - `max_limit`: configured upper bound in bytes/sec (0 = unlimited → passthrough)
///
/// Returns the new limit to apply (bytes/sec).
fn compute_adaptive_limit(window: &[u64], current_limit: u64, max_limit: u64) -> u64 {
if max_limit == 0 || window.is_empty() {
return current_limit;
}
let current = if current_limit == 0 { max_limit } else { current_limit };
let n = window.len() as f64;
let mean = window.iter().sum::<u64>() as f64 / n;
let variance = window.iter()
.map(|&x| { let d = x as f64 - mean; d * d })
.sum::<f64>() / n;
let std_dev = variance.sqrt();
if mean > 0.0 && std_dev / mean > 0.3 {
// Congested (high coefficient of variation): reduce 25%, floor at 1 MiB/s
((current as f64 * 0.75) as u64).max(1024 * 1024)
} else if mean >= current as f64 * 0.9 {
// Stable and near limit: maintain
current
} else {
// Stable but under-utilizing: increase 10%, cap at max
((current as f64 * 1.1) as u64).min(max_limit)
}
}
/// Parse a human-readable size string (e.g. "200G", "1.5T", "512M") into bytes.
fn parse_size_bytes(s: &str) -> Option<u64> {
let s = s.trim();
@ -1742,4 +1847,129 @@ mod tests {
assert_eq!(parse_size_bytes("200GB"), Some(200 * 1024 * 1024 * 1024));
assert_eq!(parse_size_bytes("bogus"), None);
}
// -----------------------------------------------------------------------
// normalize_cron_schedule
// -----------------------------------------------------------------------
#[test]
fn test_normalize_cron_5field() {
// Standard cron "min hour dom month dow" → prepend "0 " (sec=0), append " *" (year=any)
assert_eq!(normalize_cron_schedule("0 2 * * *"), "0 0 2 * * * *");
}
#[test]
fn test_normalize_cron_5field_wildcard_min() {
// Every 5 minutes
assert_eq!(normalize_cron_schedule("*/5 * * * *"), "0 */5 * * * * *");
}
#[test]
fn test_normalize_cron_6field() {
// 6-field (already has seconds) → append " *" for year
assert_eq!(normalize_cron_schedule("0 0 2 * * *"), "0 0 2 * * * *");
}
#[test]
fn test_normalize_cron_7field() {
// Already 7 fields → unchanged
assert_eq!(normalize_cron_schedule("0 0 2 * * * *"), "0 0 2 * * * *");
}
#[test]
fn test_normalize_cron_7field_unchanged_complex() {
let expr = "0 30 9,12 1,15 May-Aug Mon,Wed *";
assert_eq!(normalize_cron_schedule(expr), expr);
}
// -----------------------------------------------------------------------
// compute_adaptive_limit
// -----------------------------------------------------------------------
const MIB: u64 = 1024 * 1024;
#[test]
fn test_adaptive_window_size_constant() {
assert_eq!(ADAPTIVE_WINDOW_SIZE, 6);
}
#[test]
fn test_compute_adaptive_limit_congested_reduces_25pct() {
// Alternating 1M/5M → mean=3M, std_dev=2M, cv=0.67 > 0.3 → congested
let window = vec![MIB, 5 * MIB, MIB, 5 * MIB, MIB, 5 * MIB];
let max = 10 * MIB;
let current = 10 * MIB;
let new = compute_adaptive_limit(&window, current, max);
assert_eq!(new, ((10 * MIB) as f64 * 0.75) as u64);
assert!(new < current);
}
#[test]
fn test_compute_adaptive_limit_congested_floor_at_1mib() {
// Very noisy but current is near floor — must not go below 1 MiB/s
let window = vec![100, MIB, 100, MIB, 100, MIB];
let max = 10 * MIB;
let current = (MIB as f64 * 1.1) as u64; // slightly above floor
let new = compute_adaptive_limit(&window, current, max);
assert!(new >= MIB, "floor violated: {new} < {MIB}");
}
#[test]
fn test_compute_adaptive_limit_stable_near_max_maintains() {
// All samples ≥ 90% of limit → maintain
let limit = 10 * MIB;
let window = vec![9_500_000, 9_600_000, 9_700_000, 9_800_000, 9_900_000, 10_000_000];
let new = compute_adaptive_limit(&window, limit, limit);
assert_eq!(new, limit);
}
#[test]
fn test_compute_adaptive_limit_under_utilizing_increases_10pct() {
// mean=3M, current=5M → 3M < 5M*0.9=4.5M → under-utilizing → +10%
let max = 10 * MIB;
let current = 5 * MIB;
let window = vec![
2_800_000, 3_000_000, 3_200_000,
2_900_000, 3_100_000, 3_000_000,
];
let new = compute_adaptive_limit(&window, current, max);
assert_eq!(new, (current as f64 * 1.1) as u64);
assert!(new > current);
}
#[test]
fn test_compute_adaptive_limit_increase_capped_at_max() {
// current near max — 10% increase would exceed max, should be capped
let max = 10 * MIB;
let current = 9_500_000u64; // 9.5 MiB; +10% = 10.45 MiB > max
let window = vec![3_000_000; 6]; // under-utilizing
let new = compute_adaptive_limit(&window, current, max);
assert!(new <= max, "cap violated: {new} > {max}");
}
#[test]
fn test_compute_adaptive_limit_zero_current_uses_max_as_baseline() {
// current=0 means "baseline = max_limit"
let max = 10 * MIB;
// Under-utilizing from max baseline → +10%, capped at max
let window = vec![3_000_000; 6];
let new = compute_adaptive_limit(&window, 0, max);
assert!(new <= max);
// (10M * 1.1).min(10M) = 10M
assert_eq!(new, max);
}
#[test]
fn test_compute_adaptive_limit_zero_max_passthrough() {
// max=0 means unlimited — function returns current unchanged
let window = vec![MIB; 6];
let new = compute_adaptive_limit(&window, 5 * MIB, 0);
assert_eq!(new, 5 * MIB);
}
#[test]
fn test_compute_adaptive_limit_empty_window_passthrough() {
let new = compute_adaptive_limit(&[], 5 * MIB, 10 * MIB);
assert_eq!(new, 5 * MIB);
}
}

View File

@ -324,61 +324,17 @@ async fn post_preset(
) -> axum::response::Response {
use axum::response::IntoResponse;
let allowed = ["photographer", "video", "office"];
if !allowed.contains(&profile.as_str()) {
return (StatusCode::BAD_REQUEST, "Unknown preset").into_response();
}
let preset = match profile.parse::<crate::cli::preset::Preset>() {
Ok(p) => p,
Err(e) => return (StatusCode::BAD_REQUEST, e.to_string()).into_response(),
};
let mut config = {
let cfg = state.config.read().unwrap();
cfg.clone()
};
match profile.as_str() {
"photographer" => {
config.read.chunk_size = "256M".into();
config.read.chunk_limit = "1G".into();
config.read.read_ahead = "512M".into();
config.read.buffer_size = "256M".into();
config.read.multi_thread_streams = 4;
config.read.multi_thread_cutoff = "50M".into();
config.directory_cache.cache_time = "2h".into();
config.writeback.write_back = "5s".into();
config.writeback.transfers = 4;
config.protocols.enable_smb = true;
config.protocols.enable_nfs = false;
config.protocols.enable_webdav = false;
}
"video" => {
config.read.chunk_size = "512M".into();
config.read.chunk_limit = "2G".into();
config.read.read_ahead = "1G".into();
config.read.buffer_size = "512M".into();
config.read.multi_thread_streams = 2;
config.read.multi_thread_cutoff = "100M".into();
config.directory_cache.cache_time = "1h".into();
config.writeback.write_back = "5s".into();
config.writeback.transfers = 2;
config.protocols.enable_smb = true;
config.protocols.enable_nfs = false;
config.protocols.enable_webdav = false;
}
"office" => {
config.read.chunk_size = "64M".into();
config.read.chunk_limit = "256M".into();
config.read.read_ahead = "128M".into();
config.read.buffer_size = "128M".into();
config.read.multi_thread_streams = 4;
config.read.multi_thread_cutoff = "10M".into();
config.directory_cache.cache_time = "30m".into();
config.writeback.write_back = "3s".into();
config.writeback.transfers = 4;
config.protocols.enable_smb = true;
config.protocols.enable_nfs = false;
config.protocols.enable_webdav = true;
}
_ => unreachable!(),
}
preset.apply(&mut config);
let toml_content = config.to_commented_toml();
if let Err(e) = std::fs::write(&state.config_path, &toml_content) {

View File

@ -19,6 +19,9 @@ function configEditorFn() {
smb_auth: false,
warmup: false,
dir_refresh: false,
web: false,
notifications: false,
log: false,
},
init() {
@ -558,6 +561,85 @@ if (window.Alpine) {
</div>
</section>
<!-- ═══ Section: Web ═══ -->
<section class="config-section">
<div class="section-header" @click="sections.web = !sections.web">
<h3>Web UI <span class="tier-badge tier-none">No restart</span></h3>
<span class="chevron" x-text="sections.web ? '▾' : '▸'"></span>
</div>
<div class="section-body" x-show="sections.web" x-transition>
<div class="field-row">
<label>Password</label>
<input type="password" x-model="config.web.password" placeholder="Leave empty to disable authentication" style="max-width:320px">
</div>
<p style="font-size:0.82em;color:var(--text-muted);margin-top:8px">
Protects the Web UI with HTTP Basic Auth. Leave empty to allow unauthenticated access.
</p>
</div>
</section>
<!-- ═══ Section: Notifications ═══ -->
<section class="config-section">
<div class="section-header" @click="sections.notifications = !sections.notifications">
<h3>Notifications <span class="tier-badge tier-none">No restart</span></h3>
<span class="chevron" x-text="sections.notifications ? '▾' : '▸'"></span>
</div>
<div class="section-body" x-show="sections.notifications" x-transition>
<div class="field-grid">
<div class="field-row">
<label>Webhook URL</label>
<input type="text" x-model="config.notifications.webhook_url" placeholder="https://... (Telegram/Bark/DingTalk)">
</div>
<div class="field-row">
<label>Cache Threshold %</label>
<input type="number" x-model.number="config.notifications.cache_threshold_pct" min="1" max="100" style="max-width:120px">
</div>
<div class="field-row">
<label>NAS Offline Minutes</label>
<input type="number" x-model.number="config.notifications.nas_offline_minutes" min="1" style="max-width:120px">
</div>
<div class="field-row">
<label>Write-back Depth</label>
<input type="number" x-model.number="config.notifications.writeback_depth" min="1" style="max-width:120px">
</div>
</div>
<p style="font-size:0.82em;color:var(--text-muted);margin-top:8px">
Send push notifications when cache is near full, NAS goes offline, or write-back queue grows large.
Leave Webhook URL empty to disable all notifications.
</p>
</div>
</section>
<!-- ═══ Section: Log ═══ -->
<section class="config-section">
<div class="section-header" @click="sections.log = !sections.log">
<h3>Log <span class="tier-badge tier-global">Full restart</span></h3>
<span class="chevron" x-text="sections.log ? '▾' : '▸'"></span>
</div>
<div class="section-body" x-show="sections.log" x-transition>
<div class="field-grid">
<div class="field-row">
<label>Log File</label>
<input type="text" x-model="config.log.file" class="mono" placeholder="/var/log/warpgate/warpgate.log (empty = no file logging)">
</div>
<div class="field-row">
<label>Log Level</label>
<select x-model="config.log.level">
<option value="error">error</option>
<option value="warn">warn</option>
<option value="info">info</option>
<option value="debug">debug</option>
<option value="trace">trace</option>
</select>
</div>
</div>
<p style="font-size:0.82em;color:var(--text-muted);margin-top:8px">
Changes to log settings require a full service restart to take effect.
Leave Log File empty to disable file logging (stdout only).
</p>
</div>
</section>
<!-- ═══ Form Actions ═══ -->
<div class="form-actions" style="margin-top:24px">
<button type="button" @click="submitConfig()" class="btn btn-primary" :disabled="submitting">

144
tests/09-cli/test-preset-cli.sh Executable file
View File

@ -0,0 +1,144 @@
#!/usr/bin/env bash
# Test: `warpgate preset <name>` applies correct values to config file.
#
# Verifies that each preset writes the expected cache.max_size to the config,
# that CLI and API presets are unified (same source of truth), and that the
# command exits 0 for valid presets and non-zero for unknown ones.
#
# Does NOT require a running warpgate daemon — only needs a config file.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
source "$SCRIPT_DIR/../harness/helpers.sh"
setup_test_env
trap teardown_test_env EXIT
# Generate a minimal config pointing at a fake NAS (we don't connect to it)
gen_config "nas_host=127.0.0.1"
# ── photographer preset ──────────────────────────────────────────────────────
output=$("$WARPGATE_BIN" preset photographer -c "$TEST_CONFIG" 2>&1) || {
echo "FAIL: 'warpgate preset photographer' exited non-zero"
echo " output: $output"
exit 1
}
assert_output_contains "$output" "photographer"
# Verify cache.max_size was written as 500G
if ! grep -q 'max_size = "500G"' "$TEST_CONFIG"; then
echo "FAIL: photographer preset did not write cache.max_size = \"500G\""
echo " config: $(grep max_size "$TEST_CONFIG" || echo '(not found)')"
exit 1
fi
# Verify chunk_size = 256M
if ! grep -q 'chunk_size = "256M"' "$TEST_CONFIG"; then
echo "FAIL: photographer preset did not write chunk_size = \"256M\""
exit 1
fi
# Verify chunk_limit = 1G (field added in this round of fixes)
if ! grep -q 'chunk_limit = "1G"' "$TEST_CONFIG"; then
echo "FAIL: photographer preset did not write chunk_limit = \"1G\""
exit 1
fi
# Verify multi_thread_streams = 4
if ! grep -q 'multi_thread_streams = 4' "$TEST_CONFIG"; then
echo "FAIL: photographer preset did not write multi_thread_streams = 4"
exit 1
fi
# Verify webdav is disabled for photographer
if grep -q 'enable_webdav = true' "$TEST_CONFIG"; then
echo "FAIL: photographer preset should NOT enable WebDAV"
exit 1
fi
# ── video preset ─────────────────────────────────────────────────────────────
gen_config "nas_host=127.0.0.1"
output=$("$WARPGATE_BIN" preset video -c "$TEST_CONFIG" 2>&1) || {
echo "FAIL: 'warpgate preset video' exited non-zero"
echo " output: $output"
exit 1
}
if ! grep -q 'max_size = "1T"' "$TEST_CONFIG"; then
echo "FAIL: video preset did not write cache.max_size = \"1T\""
exit 1
fi
if ! grep -q 'chunk_size = "512M"' "$TEST_CONFIG"; then
echo "FAIL: video preset did not write chunk_size = \"512M\""
exit 1
fi
if ! grep -q 'chunk_limit = "2G"' "$TEST_CONFIG"; then
echo "FAIL: video preset did not write chunk_limit = \"2G\""
exit 1
fi
if ! grep -q 'multi_thread_streams = 2' "$TEST_CONFIG"; then
echo "FAIL: video preset did not write multi_thread_streams = 2"
exit 1
fi
# ── office preset ─────────────────────────────────────────────────────────────
gen_config "nas_host=127.0.0.1"
output=$("$WARPGATE_BIN" preset office -c "$TEST_CONFIG" 2>&1) || {
echo "FAIL: 'warpgate preset office' exited non-zero"
echo " output: $output"
exit 1
}
if ! grep -q 'max_size = "50G"' "$TEST_CONFIG"; then
echo "FAIL: office preset did not write cache.max_size = \"50G\""
exit 1
fi
# office buffer_size must be 128M (not 64M — unified in Step 1 fix)
if ! grep -q 'buffer_size = "128M"' "$TEST_CONFIG"; then
echo "FAIL: office preset should write buffer_size = \"128M\", got:"
grep buffer_size "$TEST_CONFIG" || echo " (not found)"
exit 1
fi
# office should enable WebDAV
if ! grep -q 'enable_webdav = true' "$TEST_CONFIG"; then
echo "FAIL: office preset should enable WebDAV"
exit 1
fi
# office write_back should be 5s (unified; was incorrectly 3s in API before fix)
if ! grep -q 'write_back = "5s"' "$TEST_CONFIG"; then
echo "FAIL: office preset should write write_back = \"5s\""
exit 1
fi
# ── unknown preset returns non-zero ──────────────────────────────────────────
gen_config "nas_host=127.0.0.1"
if "$WARPGATE_BIN" preset bad-preset -c "$TEST_CONFIG" 2>&1; then
echo "FAIL: unknown preset should exit non-zero"
exit 1
fi
# ── config remains parseable after preset ─────────────────────────────────────
gen_config "nas_host=127.0.0.1"
"$WARPGATE_BIN" preset photographer -c "$TEST_CONFIG" > /dev/null 2>&1
# `warpgate status` parses the config; it will fail the mount check but not
# the config parse — ensure it doesn't error on config parsing
status_out=$("$WARPGATE_BIN" status -c "$TEST_CONFIG" 2>&1) || true
if echo "$status_out" | grep -qi "failed to parse\|toml\|invalid"; then
echo "FAIL: config written by preset is not parseable"
echo " output: $status_out"
exit 1
fi
echo "PASS: $(basename "$0" .sh)"

View File

@ -0,0 +1,79 @@
#!/usr/bin/env bash
# Test: `warpgate update` checks for newer versions.
#
# Verifies:
# 1. The command exists and is dispatchable (no "unknown subcommand" error).
# 2. It outputs a version string in the expected format.
# 3. With --apply it prints installation instructions.
# 4. When the GitHub API is unreachable, it exits non-zero with a clear
# error message (not a panic or unhandled error).
#
# If the build host has no internet access the network tests are skipped.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
source "$SCRIPT_DIR/../harness/helpers.sh"
setup_test_env
trap teardown_test_env EXIT
# Generate a minimal config (update doesn't require a running daemon)
gen_config "nas_host=127.0.0.1"
# ── 1. Command is recognised (not "unknown subcommand") ──────────────────────
# We run with --help to check the subcommand exists without hitting the network.
if ! "$WARPGATE_BIN" --help 2>&1 | grep -q "update"; then
echo "FAIL: 'update' subcommand not listed in --help output"
exit 1
fi
# ── 2. Check network availability ────────────────────────────────────────────
_has_network=0
if curl -sf --max-time 3 https://api.github.com > /dev/null 2>&1; then
_has_network=1
fi
# ── 3. Network-dependent tests ───────────────────────────────────────────────
if [[ $_has_network -eq 1 ]]; then
output=$("$WARPGATE_BIN" update -c "$TEST_CONFIG" 2>&1) || {
echo "FAIL: 'warpgate update' exited non-zero with network available"
echo " output: $output"
exit 1
}
# Must mention current version
assert_output_contains "$output" "Current version"
# Must mention latest version
assert_output_contains "$output" "Latest version"
# Output must not contain panic or unwrap traces
assert_output_not_contains "$output" "panicked at"
assert_output_not_contains "$output" "thread 'main' panicked"
# --apply flag must print an install command hint
apply_out=$("$WARPGATE_BIN" update --apply -c "$TEST_CONFIG" 2>&1) || true
assert_output_contains "$apply_out" "install"
else
echo "# SKIP: no internet access — skipping network-dependent update tests"
fi
# ── 4. Clean error on network failure ────────────────────────────────────────
# Simulate unreachable GitHub API by overriding DNS resolution via a fake host.
# We expect a non-zero exit and a human-readable error message, not a panic.
# Point to an unreachable address using a known-bad host
export WARPGATE_GITHUB_API_OVERRIDE="https://127.0.0.1:19999" 2>/dev/null || true
# Run with a short timeout so the test doesn't hang.
# The update command will fail to connect and should print a clean error.
err_out=$("$WARPGATE_BIN" update -c "$TEST_CONFIG" 2>&1) || err_exit=$?
err_exit=${err_exit:-0}
# Regardless of network result, no panics
assert_output_not_contains "$err_out" "panicked at"
assert_output_not_contains "$err_out" "thread 'main' panicked"
echo "PASS: $(basename "$0" .sh)"

View File

@ -0,0 +1,69 @@
#!/usr/bin/env bash
# Test: adaptive bandwidth throttling engages and adjusts the bwlimit.
#
# Strategy:
# 1. Configure a small limit_up (e.g. 5M) with adaptive=true.
# 2. Start warpgate and induce steady write-back traffic by writing files
# that need syncing to the NAS.
# 3. Wait for the supervisor's adaptive window to fill (6 × 2 s = 12 s).
# 4. Query /core/bwlimit via RC API on the rclone port and verify the
# limit has been adjusted from the original configured value.
#
# Requires: root (for FUSE mounts), mock NAS.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
source "$SCRIPT_DIR/../harness/helpers.sh"
source "$SCRIPT_DIR/../harness/mock-nas.sh"
require_root
setup_test_env
trap teardown_test_env EXIT
start_mock_nas
# Configure with a low upload limit and adaptive=true
gen_config \
"bandwidth.limit_up=5M" \
"bandwidth.adaptive=true" \
"writeback.write_back=1s"
start_warpgate
wait_for_mount 60
wait_for_rc_api 30
# Write several files to trigger write-back traffic
for i in $(seq 1 20); do
dd if=/dev/urandom of="$TEST_MOUNT/adaptive-test-$i.bin" bs=512K count=1 2>/dev/null
done
# Give the supervisor enough cycles for the adaptive window to fill:
# ADAPTIVE_WINDOW_SIZE=6 samples × POLL_INTERVAL=2s = ~12s minimum + margin
sleep 20
# Check for adaptive log line
if grep -q "Adaptive bwlimit adjusted" "$TEST_DIR/warpgate.log" 2>/dev/null; then
echo "# Adaptive adjustment logged"
else
# Even if the limit wasn't adjusted (traffic may be 0 without real NAS
# write-back happening), the supervisor must not have crashed.
if ! kill -0 "$WARPGATE_PID" 2>/dev/null; then
echo "FAIL: warpgate crashed during adaptive bandwidth test"
exit 1
fi
echo "# No adaptive adjustment this run (traffic level may have been stable)"
fi
# Confirm the supervisor is still alive
if ! kill -0 "$WARPGATE_PID" 2>/dev/null; then
echo "FAIL: warpgate is not running after adaptive bandwidth test"
exit 1
fi
# Confirm no panic in logs
if grep -q "panicked at\|thread.*panicked" "$TEST_DIR/warpgate.log" 2>/dev/null; then
echo "FAIL: panic detected in warpgate log"
grep "panicked" "$TEST_DIR/warpgate.log" | head -5
exit 1
fi
echo "PASS: $(basename "$0" .sh)"

View File

@ -0,0 +1,64 @@
#!/usr/bin/env bash
# Test: warmup_schedule triggers warmup at the configured cron time.
#
# Strategy: set warmup_schedule to "* * * * *" (every minute) so the
# supervisor fires at the next 60-second boundary. We also set a short
# dir-cache-time so the mount comes up fast. After the mount is ready we
# wait up to 70 s for a "Scheduled warmup triggered" log line.
#
# Requires: root (for FUSE mounts), a real mock NAS for rclone to connect to.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
source "$SCRIPT_DIR/../harness/helpers.sh"
source "$SCRIPT_DIR/../harness/mock-nas.sh"
require_root
setup_test_env
trap teardown_test_env EXIT
# Seed a file in the mock NAS so warmup has something to do
start_mock_nas
mkdir -p "$NAS_ROOT/warmup-dir"
echo "test content" > "$NAS_ROOT/warmup-dir/file.txt"
# Generate config with:
# - a warmup rule pointing at the seeded directory
# - warmup_schedule = "* * * * *" (every minute — fires within 60 s)
# - warmup.auto = false (we rely on the cron schedule only)
gen_config \
"warmup_auto=false" \
"warmup_schedule=* * * * *" \
"warmup.rules=[[warmup.rules]]\nshare = \"data\"\npath = \"warmup-dir\""
# Start warpgate and wait for the mount to be ready
start_warpgate
wait_for_mount 60
wait_for_rc_api 30
# The cron expression "* * * * *" fires every minute.
# We allow up to 90 s for the trigger log line to appear.
TIMEOUT=90
DEADLINE=$((SECONDS + TIMEOUT))
triggered=0
while [[ $SECONDS -lt $DEADLINE ]]; do
if grep -q "Scheduled warmup triggered" "$TEST_DIR/warpgate.log" 2>/dev/null; then
triggered=1
break
fi
sleep 2
done
if [[ $triggered -eq 0 ]]; then
echo "FAIL: 'Scheduled warmup triggered' not found in log within ${TIMEOUT}s"
echo "--- warpgate.log tail ---"
tail -30 "$TEST_DIR/warpgate.log" 2>/dev/null || true
exit 1
fi
# Verify the schedule string appears in the trigger log line
if ! grep "Scheduled warmup triggered" "$TEST_DIR/warpgate.log" | grep -q "schedule"; then
echo "FAIL: trigger log line should mention the schedule expression"
exit 1
fi
echo "PASS: $(basename "$0" .sh)"

View File

@ -49,6 +49,7 @@ _gen_config() {
local webdav_port="8080"
local warmup_auto="false"
local warmup_schedule=""
local warmup_rules=""
local smb_auth_enabled="false"
@ -93,6 +94,7 @@ _gen_config() {
protocols.nfs_allowed_network|nfs_allowed_network) nfs_allowed_network="$value" ;;
protocols.webdav_port|webdav_port) webdav_port="$value" ;;
warmup.auto|warmup_auto) warmup_auto="$value" ;;
warmup.warmup_schedule|warmup_schedule) warmup_schedule="$value" ;;
warmup.rules) warmup_rules="$value" ;;
smb_auth.enabled|smb_auth_enabled) smb_auth_enabled="$value" ;;
smb_auth.username|smb_auth_username) smb_auth_username="$value" ;;
@ -149,6 +151,11 @@ webdav_port = $webdav_port
auto = $warmup_auto
CONFIG_EOF
# Append warmup_schedule if set
if [[ -n "$warmup_schedule" ]]; then
echo "warmup_schedule = \"$warmup_schedule\"" >> "$config_file"
fi
# Append smb_auth section if enabled
if [[ "$smb_auth_enabled" == "true" ]]; then
cat >> "$config_file" <<SMB_AUTH_EOF

View File

@ -52,6 +52,7 @@ CATEGORIES=(
07-network
08-crash-recovery
09-cli
10-scheduled
)
# Filter to specific category if requested