warpgate/src/web/api.rs
grabbit faf9d80824 feat: fill implementation gaps — preset unification, cron, adaptive bw, update cmd, tests
Step 1 — Unify preset logic (eliminate dual implementation)
- src/cli/preset.rs: add missing fields (chunk_limit, multi_thread_streams,
  multi_thread_cutoff), fix Office buffer_size 64M→128M, implement FromStr
- src/web/api.rs: post_preset() now calls Preset::apply() — no more inlined
  params; Office write_back unified to 5s (was 3s in API)

Step 2 — Fix setup.rs connection test: warn→bail
- All 4 "Warning: Could not connect/resolve" prints replaced with anyhow::bail!
  matching deploy/setup.rs behavior

Step 3 — Web UI: add [web] and [notifications] edit sections
- templates/web/tabs/config.html: new collapsible Web UI (password) and
  Notifications (webhook_url, cache_threshold_pct, nas_offline_minutes,
  writeback_depth) sections, both tagged "No restart"
- Also adds [log] section (file path + level select, "Full restart")

Step 4 — Full cron expression support in warmup scheduler
- Cargo.toml: add cron = "0.12", chrono = "0.4"
- supervisor.rs: normalize_cron_schedule() converts 5-field standard cron to
  7-field cron crate format; replaces naive hour-only matching

Step 5 — Adaptive bandwidth algorithm
- supervisor.rs: extract compute_adaptive_limit() pure function; sliding
  window of 6 samples, cv>0.3→congested (−25%, floor 1MiB/s), stable
  near-limit→maintain, under-utilizing→+10% (capped at limit_up)

Step 6 — warpgate update command
- src/cli/update.rs: query GitHub Releases API, compare with CARGO_PKG_VERSION
- src/main.rs: add Update{apply}, SetupWifi, CloneMac{interface} commands
- src/cli/wifi.rs: TODO stub for WiFi AP setup

Unit tests (+35, total 188→223)
- cli/preset.rs: 10 tests — FromStr, all fields for each preset, idempotency,
  connection/share isolation, write_back consistency regression
- supervisor.rs: 14 tests — normalize_cron_schedule (5 cases),
  compute_adaptive_limit (9 cases: congestion, floor, stable, under-utilizing,
  cap, zero-current, zero-max, empty window)
- config.rs: 11 tests — WebConfig (3), NotificationsConfig (4), LogConfig (4)

Shell tests (+4 scripts)
- tests/09-cli/test-preset-cli.sh: preset CLI without daemon; checks all
  three presets write correct values including unified buffer_size/write_back
- tests/09-cli/test-update-command.sh: update command; skips on no-network
- tests/10-scheduled/test-cron-warmup-schedule.sh: "* * * * *" fires in <90s
- tests/10-scheduled/test-adaptive-bandwidth.sh: adaptive loop stability
- tests/harness/config-gen.sh: add warmup.warmup_schedule override support
- tests/run-all.sh: add 10-scheduled category

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-19 16:55:00 +08:00

442 lines
13 KiB
Rust

//! JSON API handlers for programmatic access and future SPA.
//!
//! All endpoints return JSON. The htmx frontend uses the page handlers instead,
//! but these are available for CLI tools and external integrations.
use axum::extract::{Path, Query, State};
use axum::http::StatusCode;
use axum::response::Json;
use axum::routing::{get, post};
use axum::Router;
use serde::Serialize;
use crate::config::Config;
use crate::daemon::SupervisorCmd;
use crate::web::SharedState;
pub fn routes() -> Router<SharedState> {
Router::new()
.route("/api/status", get(get_status))
.route("/api/status/{share}", get(get_share_status))
.route("/api/config", get(get_config))
.route("/api/config", post(post_config))
.route("/api/bwlimit", post(post_bwlimit))
.route("/api/logs", get(get_logs))
.route("/api/reconnect/{share}", post(reconnect_share))
.route("/api/preset/{profile}", post(post_preset))
}
/// GET /api/status — overall daemon status.
#[derive(Serialize)]
struct StatusResponse {
uptime: String,
shares: Vec<ShareStatusResponse>,
smbd_running: bool,
webdav_running: bool,
nfs_exported: bool,
warmup: Vec<WarmupRuleStatusResponse>,
nas_offline: bool,
all_synced: bool,
}
#[derive(Serialize)]
struct WarmupRuleStatusResponse {
share: String,
path: String,
newer_than: Option<String>,
state: String,
total_files: usize,
skipped: usize,
cached: usize,
errors: usize,
}
#[derive(Serialize)]
struct ShareStatusResponse {
name: String,
mounted: bool,
rc_port: u16,
cache_bytes: u64,
cache_display: String,
dirty_count: u64,
errored_files: u64,
speed: f64,
speed_display: String,
transfers: u64,
errors: u64,
health: String,
health_message: Option<String>,
warmup_state: String,
warmup_done: usize,
warmup_total: usize,
dir_refresh_active: bool,
last_dir_refresh_ago: Option<String>,
last_dir_refresh_dirs_ok: usize,
last_dir_refresh_dirs_failed: usize,
}
/// Build a `ShareStatusResponse` for one share, including dir-refresh fields.
fn share_to_response(
s: &crate::daemon::ShareStatus,
status: &crate::daemon::DaemonStatus,
config: &Config,
) -> ShareStatusResponse {
let (warmup_state, warmup_done, warmup_total) = status.warmup_summary_for(&s.name);
let dir_refresh_active = config
.find_share(&s.name)
.map(|sc| config.effective_dir_refresh_interval(sc).is_some())
.unwrap_or(false);
let last_dir_refresh_ago = status.dir_refresh_ago_for(&s.name);
let last_dir_refresh_dirs_ok = status.dir_refresh_dirs_ok.get(&s.name).copied().unwrap_or(0);
let last_dir_refresh_dirs_failed = status.dir_refresh_dirs_failed.get(&s.name).copied().unwrap_or(0);
ShareStatusResponse {
name: s.name.clone(),
mounted: s.mounted,
rc_port: s.rc_port,
cache_bytes: s.cache_bytes,
cache_display: s.cache_display(),
dirty_count: s.dirty_count,
errored_files: s.errored_files,
speed: s.speed,
speed_display: s.speed_display(),
transfers: s.transfers,
errors: s.errors,
health: s.health_label().to_string(),
health_message: s.health_message().map(|m| m.to_string()),
warmup_state: warmup_state.to_string(),
warmup_done,
warmup_total,
dir_refresh_active,
last_dir_refresh_ago,
last_dir_refresh_dirs_ok,
last_dir_refresh_dirs_failed,
}
}
/// Build all share status responses.
fn build_share_status_responses(
status: &crate::daemon::DaemonStatus,
config: &Config,
) -> Vec<ShareStatusResponse> {
status
.shares
.iter()
.map(|s| share_to_response(s, status, config))
.collect()
}
async fn get_status(State(state): State<SharedState>) -> Json<StatusResponse> {
let status = state.status.read().unwrap();
let config = state.config.read().unwrap();
Json(StatusResponse {
uptime: status.uptime_string(),
shares: build_share_status_responses(&status, &config),
smbd_running: status.smbd_running,
webdav_running: status.webdav_running,
nfs_exported: status.nfs_exported,
warmup: status
.warmup
.iter()
.map(|r| {
let state_str = match &r.state {
crate::daemon::WarmupRuleState::Pending => "pending",
crate::daemon::WarmupRuleState::Listing => "listing",
crate::daemon::WarmupRuleState::Caching => "caching",
crate::daemon::WarmupRuleState::Complete => "complete",
crate::daemon::WarmupRuleState::Failed(_) => "failed",
};
WarmupRuleStatusResponse {
share: r.share.clone(),
path: r.path.clone(),
newer_than: r.newer_than.clone(),
state: state_str.to_string(),
total_files: r.total_files,
skipped: r.skipped,
cached: r.cached,
errors: r.errors,
}
})
.collect(),
nas_offline: status.nas_offline,
all_synced: status.all_synced,
})
}
/// GET /api/status/{share} — per-share status.
async fn get_share_status(
State(state): State<SharedState>,
Path(share_name): Path<String>,
) -> Result<Json<ShareStatusResponse>, StatusCode> {
let status = state.status.read().unwrap();
let config = state.config.read().unwrap();
let share = status
.shares
.iter()
.find(|s| s.name == share_name)
.ok_or(StatusCode::NOT_FOUND)?;
Ok(Json(share_to_response(share, &status, &config)))
}
/// GET /api/config — current config as JSON.
async fn get_config(State(state): State<SharedState>) -> Json<serde_json::Value> {
let config = state.config.read().unwrap();
Json(serde_json::to_value(&*config).unwrap_or_default())
}
/// POST /api/config — submit new config as TOML string.
#[derive(serde::Deserialize)]
struct ConfigSubmit {
toml: String,
}
#[derive(Serialize)]
struct ConfigResponse {
ok: bool,
message: String,
diff_summary: Option<String>,
}
async fn post_config(
State(state): State<SharedState>,
Json(body): Json<ConfigSubmit>,
) -> (StatusCode, Json<ConfigResponse>) {
// Parse and validate the new config
let new_config: crate::config::Config = match toml::from_str(&body.toml) {
Ok(c) => c,
Err(e) => {
return (
StatusCode::BAD_REQUEST,
Json(ConfigResponse {
ok: false,
message: format!("TOML parse error: {e}"),
diff_summary: None,
}),
);
}
};
if let Err(e) = new_config.validate() {
return (
StatusCode::BAD_REQUEST,
Json(ConfigResponse {
ok: false,
message: format!("Validation error: {e}"),
diff_summary: None,
}),
);
}
// Compute diff for summary
let diff_summary = {
let old_config = state.config.read().unwrap();
let d = crate::config_diff::diff(&old_config, &new_config);
if d.is_empty() {
return (
StatusCode::OK,
Json(ConfigResponse {
ok: true,
message: "No changes detected".to_string(),
diff_summary: Some(d.summary()),
}),
);
}
d.summary()
};
// Save to disk
if let Err(e) = std::fs::write(&state.config_path, &body.toml) {
return (
StatusCode::INTERNAL_SERVER_ERROR,
Json(ConfigResponse {
ok: false,
message: format!("Failed to write config file: {e}"),
diff_summary: Some(diff_summary),
}),
);
}
// Send reload command to supervisor
if let Err(e) = state.cmd_tx.send(SupervisorCmd::Reload(new_config)) {
return (
StatusCode::INTERNAL_SERVER_ERROR,
Json(ConfigResponse {
ok: false,
message: format!("Failed to send reload command: {e}"),
diff_summary: Some(diff_summary),
}),
);
}
(
StatusCode::OK,
Json(ConfigResponse {
ok: true,
message: "Config applied successfully".to_string(),
diff_summary: Some(diff_summary),
}),
)
}
/// POST /api/bwlimit — live bandwidth adjustment.
#[derive(serde::Deserialize)]
struct BwLimitRequest {
#[serde(default = "default_bw")]
up: String,
#[serde(default = "default_bw")]
down: String,
}
fn default_bw() -> String {
"0".to_string()
}
#[derive(Serialize)]
struct BwLimitResponse {
ok: bool,
message: String,
}
async fn post_bwlimit(
State(state): State<SharedState>,
Json(body): Json<BwLimitRequest>,
) -> Json<BwLimitResponse> {
match state
.cmd_tx
.send(SupervisorCmd::BwLimit {
up: body.up,
down: body.down,
}) {
Ok(()) => Json(BwLimitResponse {
ok: true,
message: "Bandwidth limit updated".to_string(),
}),
Err(e) => Json(BwLimitResponse {
ok: false,
message: format!("Failed to send command: {e}"),
}),
}
}
/// POST /api/preset/{profile} — apply a configuration preset.
async fn post_preset(
State(state): State<SharedState>,
Path(profile): Path<String>,
) -> axum::response::Response {
use axum::response::IntoResponse;
let preset = match profile.parse::<crate::cli::preset::Preset>() {
Ok(p) => p,
Err(e) => return (StatusCode::BAD_REQUEST, e.to_string()).into_response(),
};
let mut config = {
let cfg = state.config.read().unwrap();
cfg.clone()
};
preset.apply(&mut config);
let toml_content = config.to_commented_toml();
if let Err(e) = std::fs::write(&state.config_path, &toml_content) {
return format!("<span class='error'>保存失败: {e}</span>").into_response();
}
if let Err(e) = state
.cmd_tx
.send(SupervisorCmd::Reload(config))
{
return format!("<span class='error'>重载失败: {e}</span>").into_response();
}
format!("<span class='ok'>✓ 已应用「{profile}」预设,配置重新加载中...</span>").into_response()
}
/// GET /api/logs?lines=200&from_line=0 — recent log file entries.
#[derive(serde::Deserialize)]
struct LogsQuery {
#[serde(default = "default_lines")]
lines: usize,
#[serde(default)]
from_line: usize,
}
fn default_lines() -> usize {
200
}
#[derive(Serialize)]
struct LogsResponse {
total_lines: usize,
entries: Vec<String>,
}
async fn get_logs(
State(state): State<SharedState>,
Query(params): Query<LogsQuery>,
) -> Json<LogsResponse> {
let log_file = {
let config = state.config.read().unwrap();
config.log.file.clone()
};
if log_file.is_empty() {
return Json(LogsResponse {
total_lines: 0,
entries: vec![],
});
}
let content = match std::fs::read_to_string(&log_file) {
Ok(c) => c,
Err(_) => {
return Json(LogsResponse {
total_lines: 0,
entries: vec![],
})
}
};
let all_lines: Vec<String> = content
.lines()
.map(|l| l.to_string())
.collect();
let total_lines = all_lines.len();
// from_line == 0 means initial load: show the most recent `lines` entries
// (tail behaviour). Subsequent polls pass the exact line offset returned by
// the previous response, so they only pick up newly-appended lines.
let skip = if params.from_line == 0 {
total_lines.saturating_sub(params.lines)
} else {
params.from_line
};
let entries: Vec<String> = all_lines
.into_iter()
.skip(skip)
.take(params.lines)
.collect();
Json(LogsResponse {
total_lines,
entries,
})
}
/// POST /api/reconnect/{share} — trigger reconnect for a single share.
async fn reconnect_share(
State(state): State<SharedState>,
Path(share_name): Path<String>,
) -> Json<serde_json::Value> {
// Validate share exists
{
let cfg = state.config.read().unwrap();
if cfg.find_share(&share_name).is_none() {
return Json(serde_json::json!({ "ok": false, "message": format!("Share '{}' not found", share_name) }));
}
}
match state.cmd_tx.send(crate::daemon::SupervisorCmd::Reconnect(share_name.clone())) {
Ok(()) => Json(serde_json::json!({ "ok": true, "message": format!("Reconnecting share '{}'", share_name) })),
Err(e) => Json(serde_json::json!({ "ok": false, "message": format!("Failed to send reconnect: {}", e) })),
}
}