//! JSON API handlers for programmatic access and future SPA. //! //! All endpoints return JSON. The htmx frontend uses the page handlers instead, //! but these are available for CLI tools and external integrations. use axum::extract::{Path, Query, State}; use axum::http::StatusCode; use axum::response::Json; use axum::routing::{get, post}; use axum::Router; use serde::Serialize; use crate::config::Config; use crate::daemon::SupervisorCmd; use crate::web::SharedState; pub fn routes() -> Router { Router::new() .route("/api/status", get(get_status)) .route("/api/status/{share}", get(get_share_status)) .route("/api/config", get(get_config)) .route("/api/config", post(post_config)) .route("/api/bwlimit", post(post_bwlimit)) .route("/api/logs", get(get_logs)) .route("/api/reconnect/{share}", post(reconnect_share)) .route("/api/preset/{profile}", post(post_preset)) } /// GET /api/status — overall daemon status. #[derive(Serialize)] struct StatusResponse { uptime: String, shares: Vec, smbd_running: bool, webdav_running: bool, nfs_exported: bool, warmup: Vec, nas_offline: bool, all_synced: bool, } #[derive(Serialize)] struct WarmupRuleStatusResponse { share: String, path: String, newer_than: Option, state: String, total_files: usize, skipped: usize, cached: usize, errors: usize, } #[derive(Serialize)] struct ShareStatusResponse { name: String, mounted: bool, rc_port: u16, cache_bytes: u64, cache_display: String, dirty_count: u64, errored_files: u64, speed: f64, speed_display: String, transfers: u64, errors: u64, health: String, health_message: Option, warmup_state: String, warmup_done: usize, warmup_total: usize, dir_refresh_active: bool, last_dir_refresh_ago: Option, last_dir_refresh_dirs_ok: usize, last_dir_refresh_dirs_failed: usize, } /// Build a `ShareStatusResponse` for one share, including dir-refresh fields. fn share_to_response( s: &crate::daemon::ShareStatus, status: &crate::daemon::DaemonStatus, config: &Config, ) -> ShareStatusResponse { let (warmup_state, warmup_done, warmup_total) = status.warmup_summary_for(&s.name); let dir_refresh_active = config .find_share(&s.name) .map(|sc| config.effective_dir_refresh_interval(sc).is_some()) .unwrap_or(false); let last_dir_refresh_ago = status.dir_refresh_ago_for(&s.name); let last_dir_refresh_dirs_ok = status.dir_refresh_dirs_ok.get(&s.name).copied().unwrap_or(0); let last_dir_refresh_dirs_failed = status.dir_refresh_dirs_failed.get(&s.name).copied().unwrap_or(0); ShareStatusResponse { name: s.name.clone(), mounted: s.mounted, rc_port: s.rc_port, cache_bytes: s.cache_bytes, cache_display: s.cache_display(), dirty_count: s.dirty_count, errored_files: s.errored_files, speed: s.speed, speed_display: s.speed_display(), transfers: s.transfers, errors: s.errors, health: s.health_label().to_string(), health_message: s.health_message().map(|m| m.to_string()), warmup_state: warmup_state.to_string(), warmup_done, warmup_total, dir_refresh_active, last_dir_refresh_ago, last_dir_refresh_dirs_ok, last_dir_refresh_dirs_failed, } } /// Build all share status responses. fn build_share_status_responses( status: &crate::daemon::DaemonStatus, config: &Config, ) -> Vec { status .shares .iter() .map(|s| share_to_response(s, status, config)) .collect() } async fn get_status(State(state): State) -> Json { let status = state.status.read().unwrap(); let config = state.config.read().unwrap(); Json(StatusResponse { uptime: status.uptime_string(), shares: build_share_status_responses(&status, &config), smbd_running: status.smbd_running, webdav_running: status.webdav_running, nfs_exported: status.nfs_exported, warmup: status .warmup .iter() .map(|r| { let state_str = match &r.state { crate::daemon::WarmupRuleState::Pending => "pending", crate::daemon::WarmupRuleState::Listing => "listing", crate::daemon::WarmupRuleState::Caching => "caching", crate::daemon::WarmupRuleState::Complete => "complete", crate::daemon::WarmupRuleState::Failed(_) => "failed", }; WarmupRuleStatusResponse { share: r.share.clone(), path: r.path.clone(), newer_than: r.newer_than.clone(), state: state_str.to_string(), total_files: r.total_files, skipped: r.skipped, cached: r.cached, errors: r.errors, } }) .collect(), nas_offline: status.nas_offline, all_synced: status.all_synced, }) } /// GET /api/status/{share} — per-share status. async fn get_share_status( State(state): State, Path(share_name): Path, ) -> Result, StatusCode> { let status = state.status.read().unwrap(); let config = state.config.read().unwrap(); let share = status .shares .iter() .find(|s| s.name == share_name) .ok_or(StatusCode::NOT_FOUND)?; Ok(Json(share_to_response(share, &status, &config))) } /// GET /api/config — current config as JSON. async fn get_config(State(state): State) -> Json { let config = state.config.read().unwrap(); Json(serde_json::to_value(&*config).unwrap_or_default()) } /// POST /api/config — submit new config as TOML string. #[derive(serde::Deserialize)] struct ConfigSubmit { toml: String, } #[derive(Serialize)] struct ConfigResponse { ok: bool, message: String, diff_summary: Option, } async fn post_config( State(state): State, Json(body): Json, ) -> (StatusCode, Json) { // Parse and validate the new config let new_config: crate::config::Config = match toml::from_str(&body.toml) { Ok(c) => c, Err(e) => { return ( StatusCode::BAD_REQUEST, Json(ConfigResponse { ok: false, message: format!("TOML parse error: {e}"), diff_summary: None, }), ); } }; if let Err(e) = new_config.validate() { return ( StatusCode::BAD_REQUEST, Json(ConfigResponse { ok: false, message: format!("Validation error: {e}"), diff_summary: None, }), ); } // Compute diff for summary let diff_summary = { let old_config = state.config.read().unwrap(); let d = crate::config_diff::diff(&old_config, &new_config); if d.is_empty() { return ( StatusCode::OK, Json(ConfigResponse { ok: true, message: "No changes detected".to_string(), diff_summary: Some(d.summary()), }), ); } d.summary() }; // Save to disk if let Err(e) = std::fs::write(&state.config_path, &body.toml) { return ( StatusCode::INTERNAL_SERVER_ERROR, Json(ConfigResponse { ok: false, message: format!("Failed to write config file: {e}"), diff_summary: Some(diff_summary), }), ); } // Send reload command to supervisor if let Err(e) = state.cmd_tx.send(SupervisorCmd::Reload(new_config)) { return ( StatusCode::INTERNAL_SERVER_ERROR, Json(ConfigResponse { ok: false, message: format!("Failed to send reload command: {e}"), diff_summary: Some(diff_summary), }), ); } ( StatusCode::OK, Json(ConfigResponse { ok: true, message: "Config applied successfully".to_string(), diff_summary: Some(diff_summary), }), ) } /// POST /api/bwlimit — live bandwidth adjustment. #[derive(serde::Deserialize)] struct BwLimitRequest { #[serde(default = "default_bw")] up: String, #[serde(default = "default_bw")] down: String, } fn default_bw() -> String { "0".to_string() } #[derive(Serialize)] struct BwLimitResponse { ok: bool, message: String, } async fn post_bwlimit( State(state): State, Json(body): Json, ) -> Json { match state .cmd_tx .send(SupervisorCmd::BwLimit { up: body.up, down: body.down, }) { Ok(()) => Json(BwLimitResponse { ok: true, message: "Bandwidth limit updated".to_string(), }), Err(e) => Json(BwLimitResponse { ok: false, message: format!("Failed to send command: {e}"), }), } } /// POST /api/preset/{profile} — apply a configuration preset. async fn post_preset( State(state): State, Path(profile): Path, ) -> axum::response::Response { use axum::response::IntoResponse; let preset = match profile.parse::() { Ok(p) => p, Err(e) => return (StatusCode::BAD_REQUEST, e.to_string()).into_response(), }; let mut config = { let cfg = state.config.read().unwrap(); cfg.clone() }; preset.apply(&mut config); let toml_content = config.to_commented_toml(); if let Err(e) = std::fs::write(&state.config_path, &toml_content) { return format!("保存失败: {e}").into_response(); } if let Err(e) = state .cmd_tx .send(SupervisorCmd::Reload(config)) { return format!("重载失败: {e}").into_response(); } format!("✓ 已应用「{profile}」预设,配置重新加载中...").into_response() } /// GET /api/logs?lines=200&from_line=0 — recent log file entries. #[derive(serde::Deserialize)] struct LogsQuery { #[serde(default = "default_lines")] lines: usize, #[serde(default)] from_line: usize, } fn default_lines() -> usize { 200 } #[derive(Serialize)] struct LogsResponse { total_lines: usize, entries: Vec, } async fn get_logs( State(state): State, Query(params): Query, ) -> Json { let log_file = { let config = state.config.read().unwrap(); config.log.file.clone() }; if log_file.is_empty() { return Json(LogsResponse { total_lines: 0, entries: vec![], }); } let content = match std::fs::read_to_string(&log_file) { Ok(c) => c, Err(_) => { return Json(LogsResponse { total_lines: 0, entries: vec![], }) } }; let all_lines: Vec = content .lines() .map(|l| l.to_string()) .collect(); let total_lines = all_lines.len(); // from_line == 0 means initial load: show the most recent `lines` entries // (tail behaviour). Subsequent polls pass the exact line offset returned by // the previous response, so they only pick up newly-appended lines. let skip = if params.from_line == 0 { total_lines.saturating_sub(params.lines) } else { params.from_line }; let entries: Vec = all_lines .into_iter() .skip(skip) .take(params.lines) .collect(); Json(LogsResponse { total_lines, entries, }) } /// POST /api/reconnect/{share} — trigger reconnect for a single share. async fn reconnect_share( State(state): State, Path(share_name): Path, ) -> Json { // Validate share exists { let cfg = state.config.read().unwrap(); if cfg.find_share(&share_name).is_none() { return Json(serde_json::json!({ "ok": false, "message": format!("Share '{}' not found", share_name) })); } } match state.cmd_tx.send(crate::daemon::SupervisorCmd::Reconnect(share_name.clone())) { Ok(()) => Json(serde_json::json!({ "ok": true, "message": format!("Reconnecting share '{}'", share_name) })), Err(e) => Json(serde_json::json!({ "ok": false, "message": format!("Failed to send reconnect: {}", e) })), } }