- Task A: Offline mode banner in layout (nas_offline field in LayoutTemplate)
- Task B: Safe-to-disconnect sync indicator on dashboard (all_synced field)
- Task C: Preset apply buttons (photographer/video/office) in config tab with POST /api/preset/{profile} endpoint
- Task D: Reconnect button and error banner in share detail panel
- Added nas_offline/all_synced fields to DaemonStatus for integration contract
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
633 lines
21 KiB
Rust
633 lines
21 KiB
Rust
//! HTML page handlers using askama templates for the htmx + Alpine.js frontend.
|
|
|
|
use askama::Template;
|
|
use axum::extract::{Path, Query, State};
|
|
use axum::http::StatusCode;
|
|
use axum::response::{Html, IntoResponse, Redirect, Response};
|
|
use axum::routing::{get, post};
|
|
use axum::{Form, Json};
|
|
use axum::Router;
|
|
|
|
use crate::config::Config;
|
|
use crate::daemon::{DaemonStatus, ShareStatus};
|
|
use crate::web::SharedState;
|
|
|
|
pub fn routes() -> Router<SharedState> {
|
|
Router::new()
|
|
// Full-page routes (serve layout shell with embedded tab content)
|
|
.route("/", get(page_dashboard))
|
|
.route("/shares", get(page_shares))
|
|
.route("/shares/{name}", get(share_redirect))
|
|
.route("/config", get(page_config))
|
|
.route("/config", post(config_submit))
|
|
.route("/config/apply", post(config_apply))
|
|
.route("/logs", get(page_logs))
|
|
// Tab partial routes (htmx async load)
|
|
.route("/tabs/dashboard", get(tab_dashboard))
|
|
.route("/tabs/shares", get(tab_shares))
|
|
.route("/tabs/config", get(tab_config))
|
|
.route("/tabs/logs", get(tab_logs))
|
|
// Legacy compatibility
|
|
.route("/partials/status", get(status_partial))
|
|
}
|
|
|
|
// ─── View models ──────────────────────────────────────────────────────────
|
|
|
|
/// Compact share view for dashboard cards and status partial.
|
|
#[allow(dead_code)] // fields used by askama templates
|
|
struct ShareView {
|
|
name: String,
|
|
connection: String,
|
|
mount_point: String,
|
|
mounted: bool,
|
|
cache_display: String,
|
|
dirty_count: u64,
|
|
speed_display: String,
|
|
read_only: bool,
|
|
health: String,
|
|
health_message: String,
|
|
warmup_state: String,
|
|
warmup_done: usize,
|
|
warmup_total: usize,
|
|
dir_refresh_active: bool,
|
|
last_dir_refresh_ago: String,
|
|
}
|
|
|
|
/// Extended share view for the shares table with all detail fields.
|
|
#[allow(dead_code)] // fields used by askama templates
|
|
struct ShareDetailView {
|
|
name: String,
|
|
connection: String,
|
|
mount_point: String,
|
|
remote_path: String,
|
|
mounted: bool,
|
|
read_only: bool,
|
|
rc_port: u16,
|
|
cache_display: String,
|
|
dirty_count: u64,
|
|
errored_files: u64,
|
|
speed_display: String,
|
|
transfers: u64,
|
|
errors: u64,
|
|
health: String,
|
|
health_message: String,
|
|
warmup_state: String,
|
|
warmup_done: usize,
|
|
warmup_total: usize,
|
|
warmup_rules: Vec<WarmupRuleView>,
|
|
dir_refresh_active: bool,
|
|
last_dir_refresh_ago: String,
|
|
}
|
|
|
|
/// View model for a single warmup rule in the shares detail panel.
|
|
#[allow(dead_code)]
|
|
struct WarmupRuleView {
|
|
path: String,
|
|
newer_than: String,
|
|
state: String,
|
|
badge_class: String,
|
|
total_files: usize,
|
|
skipped: usize,
|
|
cached: usize,
|
|
errors: usize,
|
|
}
|
|
|
|
/// Build compact share views from status + config.
|
|
fn build_share_views(status: &DaemonStatus, config: &Config) -> Vec<ShareView> {
|
|
status
|
|
.shares
|
|
.iter()
|
|
.map(|s| {
|
|
let sc = config.find_share(&s.name);
|
|
let (warmup_state, warmup_done, warmup_total) =
|
|
status.warmup_summary_for(&s.name);
|
|
let dir_refresh_active = sc
|
|
.map(|sc| config.effective_dir_refresh_interval(sc).is_some())
|
|
.unwrap_or(false);
|
|
let last_dir_refresh_ago = status.dir_refresh_ago_for(&s.name).unwrap_or_default();
|
|
ShareView {
|
|
name: s.name.clone(),
|
|
connection: sc.map(|c| c.connection.clone()).unwrap_or_default(),
|
|
mount_point: sc
|
|
.map(|c| c.mount_point.display().to_string())
|
|
.unwrap_or_default(),
|
|
mounted: s.mounted,
|
|
cache_display: s.cache_display(),
|
|
dirty_count: s.dirty_count,
|
|
speed_display: s.speed_display(),
|
|
read_only: sc.map(|c| c.read_only).unwrap_or(false),
|
|
health: s.health_label().to_string(),
|
|
health_message: s.health_message().unwrap_or("").to_string(),
|
|
warmup_state: warmup_state.to_string(),
|
|
warmup_done,
|
|
warmup_total,
|
|
dir_refresh_active,
|
|
last_dir_refresh_ago,
|
|
}
|
|
})
|
|
.collect()
|
|
}
|
|
|
|
/// Build extended share detail views from status + config.
|
|
fn build_share_detail_views(status: &DaemonStatus, config: &Config) -> Vec<ShareDetailView> {
|
|
status
|
|
.shares
|
|
.iter()
|
|
.map(|s| {
|
|
let sc = config.find_share(&s.name);
|
|
let (warmup_state, warmup_done, warmup_total) =
|
|
status.warmup_summary_for(&s.name);
|
|
let dir_refresh_active = sc
|
|
.map(|sc| config.effective_dir_refresh_interval(sc).is_some())
|
|
.unwrap_or(false);
|
|
let last_dir_refresh_ago = status.dir_refresh_ago_for(&s.name).unwrap_or_default();
|
|
|
|
// Build per-rule views for this share
|
|
let warmup_rules: Vec<WarmupRuleView> = status
|
|
.warmup
|
|
.iter()
|
|
.filter(|r| r.share == s.name)
|
|
.map(|r| {
|
|
let (state_str, badge_class) = match &r.state {
|
|
crate::daemon::WarmupRuleState::Pending => ("pending", "warn"),
|
|
crate::daemon::WarmupRuleState::Listing => ("listing", "warmup"),
|
|
crate::daemon::WarmupRuleState::Caching => ("caching", "warmup"),
|
|
crate::daemon::WarmupRuleState::Complete => ("complete", "ok"),
|
|
crate::daemon::WarmupRuleState::Failed(_) => ("failed", "error"),
|
|
};
|
|
WarmupRuleView {
|
|
path: r.path.clone(),
|
|
newer_than: r.newer_than.clone().unwrap_or_default(),
|
|
state: state_str.to_string(),
|
|
badge_class: badge_class.to_string(),
|
|
total_files: r.total_files,
|
|
skipped: r.skipped,
|
|
cached: r.cached,
|
|
errors: r.errors,
|
|
}
|
|
})
|
|
.collect();
|
|
|
|
ShareDetailView {
|
|
name: s.name.clone(),
|
|
connection: sc.map(|c| c.connection.clone()).unwrap_or_default(),
|
|
mount_point: sc
|
|
.map(|c| c.mount_point.display().to_string())
|
|
.unwrap_or_default(),
|
|
remote_path: sc.map(|c| c.remote_path.clone()).unwrap_or_default(),
|
|
mounted: s.mounted,
|
|
read_only: sc.map(|c| c.read_only).unwrap_or(false),
|
|
rc_port: s.rc_port,
|
|
cache_display: s.cache_display(),
|
|
dirty_count: s.dirty_count,
|
|
errored_files: s.errored_files,
|
|
speed_display: s.speed_display(),
|
|
transfers: s.transfers,
|
|
errors: s.errors,
|
|
health: s.health_label().to_string(),
|
|
health_message: s.health_message().unwrap_or("").to_string(),
|
|
warmup_state: warmup_state.to_string(),
|
|
warmup_done,
|
|
warmup_total,
|
|
warmup_rules,
|
|
dir_refresh_active,
|
|
last_dir_refresh_ago,
|
|
}
|
|
})
|
|
.collect()
|
|
}
|
|
|
|
/// Aggregate stats from share statuses.
|
|
fn aggregate_stats(shares: &[ShareStatus]) -> (u64, f64, u64) {
|
|
let total_cache: u64 = shares.iter().map(|s| s.cache_bytes).sum();
|
|
let total_speed: f64 = shares.iter().map(|s| s.speed).sum();
|
|
let active_transfers: u64 = shares.iter().map(|s| s.transfers).sum();
|
|
(total_cache, total_speed, active_transfers)
|
|
}
|
|
|
|
fn format_bytes(bytes: u64) -> String {
|
|
const KIB: f64 = 1024.0;
|
|
const MIB: f64 = KIB * 1024.0;
|
|
const GIB: f64 = MIB * 1024.0;
|
|
const TIB: f64 = GIB * 1024.0;
|
|
let b = bytes as f64;
|
|
if b >= TIB {
|
|
format!("{:.1} TiB", b / TIB)
|
|
} else if b >= GIB {
|
|
format!("{:.1} GiB", b / GIB)
|
|
} else if b >= MIB {
|
|
format!("{:.1} MiB", b / MIB)
|
|
} else if b >= KIB {
|
|
format!("{:.1} KiB", b / KIB)
|
|
} else {
|
|
format!("{bytes} B")
|
|
}
|
|
}
|
|
|
|
fn format_speed(speed: f64) -> String {
|
|
if speed < 1.0 {
|
|
"-".to_string()
|
|
} else {
|
|
format!("{}/s", format_bytes(speed as u64))
|
|
}
|
|
}
|
|
|
|
// ─── Templates ────────────────────────────────────────────────────────────
|
|
|
|
#[derive(Template)]
|
|
#[template(path = "web/layout.html", escape = "none")]
|
|
struct LayoutTemplate {
|
|
active_tab: String,
|
|
tab_content: String,
|
|
uptime: String,
|
|
config_path: String,
|
|
nas_offline: bool,
|
|
}
|
|
|
|
#[derive(Template)]
|
|
#[template(path = "web/tabs/dashboard.html")]
|
|
struct DashboardTabTemplate {
|
|
total_shares: usize,
|
|
healthy_count: usize,
|
|
#[allow(dead_code)]
|
|
failed_count: usize,
|
|
total_cache_display: String,
|
|
aggregate_speed_display: String,
|
|
active_transfers: u64,
|
|
shares: Vec<ShareView>,
|
|
smbd_running: bool,
|
|
webdav_running: bool,
|
|
nfs_exported: bool,
|
|
all_synced: bool,
|
|
}
|
|
|
|
#[derive(Template)]
|
|
#[template(path = "web/tabs/shares.html")]
|
|
struct SharesTabTemplate {
|
|
shares: Vec<ShareDetailView>,
|
|
expand: String,
|
|
}
|
|
|
|
#[derive(Template)]
|
|
#[template(path = "web/tabs/config.html", escape = "none")]
|
|
struct ConfigTabTemplate {
|
|
init_json: String,
|
|
}
|
|
|
|
/// Data embedded as JSON for the Alpine.js config editor.
|
|
#[derive(serde::Serialize)]
|
|
struct ConfigTabInit {
|
|
config: Config,
|
|
message: Option<String>,
|
|
is_error: bool,
|
|
}
|
|
|
|
/// JSON response for the `POST /config/apply` endpoint.
|
|
#[derive(serde::Serialize)]
|
|
struct ConfigApplyResponse {
|
|
ok: bool,
|
|
message: String,
|
|
}
|
|
|
|
#[derive(Template)]
|
|
#[template(path = "web/tabs/logs.html")]
|
|
struct LogsTabTemplate;
|
|
|
|
/// Legacy htmx polling partial (backward compat for `/partials/status`).
|
|
#[derive(Template)]
|
|
#[template(path = "web/tabs/dashboard.html")]
|
|
struct StatusPartialTemplate {
|
|
total_shares: usize,
|
|
healthy_count: usize,
|
|
#[allow(dead_code)]
|
|
failed_count: usize,
|
|
total_cache_display: String,
|
|
aggregate_speed_display: String,
|
|
active_transfers: u64,
|
|
shares: Vec<ShareView>,
|
|
smbd_running: bool,
|
|
webdav_running: bool,
|
|
nfs_exported: bool,
|
|
all_synced: bool,
|
|
}
|
|
|
|
// ─── Full-page handlers (layout shell + tab content) ──────────────────────
|
|
|
|
async fn page_dashboard(State(state): State<SharedState>) -> Response {
|
|
render_layout("dashboard", &state, |status, config| {
|
|
render_dashboard_tab(status, config)
|
|
})
|
|
}
|
|
|
|
async fn page_shares(
|
|
State(state): State<SharedState>,
|
|
Query(params): Query<ExpandQuery>,
|
|
) -> Response {
|
|
let expand = params.expand.unwrap_or_default();
|
|
render_layout("shares", &state, |status, config| {
|
|
render_shares_tab(status, config, &expand)
|
|
})
|
|
}
|
|
|
|
async fn page_config(State(state): State<SharedState>) -> Response {
|
|
render_layout("config", &state, |_status, config| {
|
|
render_config_tab_html(config, None, false)
|
|
})
|
|
}
|
|
|
|
async fn page_logs(State(state): State<SharedState>) -> Response {
|
|
render_layout("logs", &state, |_status, _config| {
|
|
LogsTabTemplate.render().unwrap_or_default()
|
|
})
|
|
}
|
|
|
|
/// Helper: render the layout shell wrapping a tab content generator.
|
|
fn render_layout(
|
|
tab: &str,
|
|
state: &SharedState,
|
|
tab_fn: impl FnOnce(&DaemonStatus, &Config) -> String,
|
|
) -> Response {
|
|
let status = state.status.read().unwrap();
|
|
let config = state.config.read().unwrap();
|
|
|
|
let tab_content = tab_fn(&status, &config);
|
|
|
|
let tmpl = LayoutTemplate {
|
|
active_tab: tab.to_string(),
|
|
tab_content,
|
|
uptime: status.uptime_string(),
|
|
config_path: state.config_path.display().to_string(),
|
|
nas_offline: status.nas_offline,
|
|
};
|
|
|
|
match tmpl.render() {
|
|
Ok(html) => Html(html).into_response(),
|
|
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, format!("Template error: {e}")).into_response(),
|
|
}
|
|
}
|
|
|
|
// ─── Tab partial handlers (htmx async) ───────────────────────────────────
|
|
|
|
#[derive(serde::Deserialize)]
|
|
struct ExpandQuery {
|
|
expand: Option<String>,
|
|
}
|
|
|
|
async fn tab_dashboard(State(state): State<SharedState>) -> Response {
|
|
let status = state.status.read().unwrap();
|
|
let config = state.config.read().unwrap();
|
|
let html = render_dashboard_tab(&status, &config);
|
|
Html(html).into_response()
|
|
}
|
|
|
|
async fn tab_shares(
|
|
State(state): State<SharedState>,
|
|
Query(params): Query<ExpandQuery>,
|
|
) -> Response {
|
|
let status = state.status.read().unwrap();
|
|
let config = state.config.read().unwrap();
|
|
let expand = params.expand.unwrap_or_default();
|
|
let html = render_shares_tab(&status, &config, &expand);
|
|
Html(html).into_response()
|
|
}
|
|
|
|
async fn tab_config(State(state): State<SharedState>) -> Response {
|
|
let config = state.config.read().unwrap();
|
|
let html = render_config_tab_html(&config, None, false);
|
|
Html(html).into_response()
|
|
}
|
|
|
|
async fn tab_logs() -> Response {
|
|
match LogsTabTemplate.render() {
|
|
Ok(html) => Html(html).into_response(),
|
|
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, format!("Template error: {e}")).into_response(),
|
|
}
|
|
}
|
|
|
|
// ─── Tab render helpers ───────────────────────────────────────────────────
|
|
|
|
fn render_dashboard_tab(status: &DaemonStatus, config: &Config) -> String {
|
|
let shares = build_share_views(status, config);
|
|
let healthy_count = shares.iter().filter(|s| s.health == "OK").count();
|
|
let failed_count = shares.iter().filter(|s| s.health == "FAILED").count();
|
|
let (total_cache, total_speed, active_transfers) = aggregate_stats(&status.shares);
|
|
let all_synced = status.all_synced;
|
|
|
|
let tmpl = DashboardTabTemplate {
|
|
total_shares: shares.len(),
|
|
healthy_count,
|
|
failed_count,
|
|
total_cache_display: format_bytes(total_cache),
|
|
aggregate_speed_display: format_speed(total_speed),
|
|
active_transfers,
|
|
shares,
|
|
smbd_running: status.smbd_running,
|
|
webdav_running: status.webdav_running,
|
|
nfs_exported: status.nfs_exported,
|
|
all_synced,
|
|
};
|
|
|
|
tmpl.render().unwrap_or_default()
|
|
}
|
|
|
|
fn render_shares_tab(status: &DaemonStatus, config: &Config, expand: &str) -> String {
|
|
let shares = build_share_detail_views(status, config);
|
|
|
|
let tmpl = SharesTabTemplate {
|
|
shares,
|
|
expand: expand.to_string(),
|
|
};
|
|
|
|
tmpl.render().unwrap_or_default()
|
|
}
|
|
|
|
// ─── Share detail redirect ────────────────────────────────────────────────
|
|
|
|
async fn share_redirect(Path(name): Path<String>) -> Response {
|
|
Redirect::to(&format!("/shares?expand={name}")).into_response()
|
|
}
|
|
|
|
// ─── Config submit ────────────────────────────────────────────────────────
|
|
|
|
#[derive(serde::Deserialize)]
|
|
struct ConfigForm {
|
|
toml: String,
|
|
}
|
|
|
|
async fn config_submit(
|
|
State(state): State<SharedState>,
|
|
Form(form): Form<ConfigForm>,
|
|
) -> Response {
|
|
// Parse and validate
|
|
let new_config: Config = match toml::from_str(&form.toml) {
|
|
Ok(c) => c,
|
|
Err(e) => {
|
|
let config = state.config.read().unwrap();
|
|
let html = render_config_tab_html(
|
|
&config,
|
|
Some(format!("TOML parse error: {e}")),
|
|
true,
|
|
);
|
|
return Html(html).into_response();
|
|
}
|
|
};
|
|
|
|
if let Err(e) = new_config.validate() {
|
|
let html = render_config_tab_html(
|
|
&new_config,
|
|
Some(format!("Validation error: {e}")),
|
|
true,
|
|
);
|
|
return Html(html).into_response();
|
|
}
|
|
|
|
// Compute diff summary
|
|
let diff_summary = {
|
|
let old_config = state.config.read().unwrap();
|
|
let d = crate::config_diff::diff(&old_config, &new_config);
|
|
if d.is_empty() {
|
|
let html = render_config_tab_html(
|
|
&new_config,
|
|
Some("No changes detected.".to_string()),
|
|
false,
|
|
);
|
|
return Html(html).into_response();
|
|
}
|
|
d.summary()
|
|
};
|
|
|
|
// Save to disk
|
|
if let Err(e) = std::fs::write(&state.config_path, &form.toml) {
|
|
let html = render_config_tab_html(
|
|
&new_config,
|
|
Some(format!("Failed to write config: {e}")),
|
|
true,
|
|
);
|
|
return Html(html).into_response();
|
|
}
|
|
|
|
// Send reload command
|
|
if let Err(e) = state
|
|
.cmd_tx
|
|
.send(crate::daemon::SupervisorCmd::Reload(new_config))
|
|
{
|
|
let config = state.config.read().unwrap();
|
|
let html = render_config_tab_html(
|
|
&config,
|
|
Some(format!("Failed to send reload: {e}")),
|
|
true,
|
|
);
|
|
return Html(html).into_response();
|
|
}
|
|
|
|
// Success — re-read config and show success message
|
|
let config = state.config.read().unwrap();
|
|
let html = render_config_tab_html(
|
|
&config,
|
|
Some(format!("Config applied: {diff_summary}")),
|
|
false,
|
|
);
|
|
Html(html).into_response()
|
|
}
|
|
|
|
/// JSON endpoint: apply config from the interactive form editor.
|
|
async fn config_apply(
|
|
State(state): State<SharedState>,
|
|
Json(new_config): Json<Config>,
|
|
) -> Json<ConfigApplyResponse> {
|
|
// Validate
|
|
if let Err(e) = new_config.validate() {
|
|
return Json(ConfigApplyResponse {
|
|
ok: false,
|
|
message: format!("Validation error: {e}"),
|
|
});
|
|
}
|
|
|
|
// Compute diff
|
|
let diff_summary = {
|
|
let old_config = state.config.read().unwrap();
|
|
let d = crate::config_diff::diff(&old_config, &new_config);
|
|
if d.is_empty() {
|
|
return Json(ConfigApplyResponse {
|
|
ok: true,
|
|
message: "No changes detected.".to_string(),
|
|
});
|
|
}
|
|
d.summary()
|
|
};
|
|
|
|
// Serialize to human-readable TOML and write to disk
|
|
let toml_content = new_config.to_commented_toml();
|
|
|
|
if let Err(e) = std::fs::write(&state.config_path, &toml_content) {
|
|
return Json(ConfigApplyResponse {
|
|
ok: false,
|
|
message: format!("Failed to write config: {e}"),
|
|
});
|
|
}
|
|
|
|
// Send reload command
|
|
if let Err(e) = state
|
|
.cmd_tx
|
|
.send(crate::daemon::SupervisorCmd::Reload(new_config))
|
|
{
|
|
return Json(ConfigApplyResponse {
|
|
ok: false,
|
|
message: format!("Failed to send reload: {e}"),
|
|
});
|
|
}
|
|
|
|
tracing::info!("Config applied: {diff_summary}");
|
|
|
|
Json(ConfigApplyResponse {
|
|
ok: true,
|
|
message: format!("Config applied: {diff_summary}"),
|
|
})
|
|
}
|
|
|
|
/// Render the config tab HTML using the interactive form editor.
|
|
fn render_config_tab_html(config: &Config, message: Option<String>, is_error: bool) -> String {
|
|
let init = ConfigTabInit {
|
|
config: config.clone(),
|
|
message,
|
|
is_error,
|
|
};
|
|
// Escape </ to prevent breaking out of <script> tags
|
|
let init_json = serde_json::to_string(&init)
|
|
.unwrap_or_default()
|
|
.replace("</", "<\\/");
|
|
let tmpl = ConfigTabTemplate { init_json };
|
|
tmpl.render().unwrap_or_default()
|
|
}
|
|
|
|
// ─── Legacy partial (backward compat) ─────────────────────────────────────
|
|
|
|
async fn status_partial(State(state): State<SharedState>) -> Response {
|
|
let status = state.status.read().unwrap();
|
|
let config = state.config.read().unwrap();
|
|
|
|
let shares = build_share_views(&status, &config);
|
|
let healthy_count = shares.iter().filter(|s| s.health == "OK").count();
|
|
let failed_count = shares.iter().filter(|s| s.health == "FAILED").count();
|
|
let (total_cache, total_speed, active_transfers) = aggregate_stats(&status.shares);
|
|
|
|
let tmpl = StatusPartialTemplate {
|
|
total_shares: shares.len(),
|
|
healthy_count,
|
|
failed_count,
|
|
total_cache_display: format_bytes(total_cache),
|
|
aggregate_speed_display: format_speed(total_speed),
|
|
active_transfers,
|
|
shares,
|
|
smbd_running: status.smbd_running,
|
|
webdav_running: status.webdav_running,
|
|
nfs_exported: status.nfs_exported,
|
|
all_synced: status.all_synced,
|
|
};
|
|
|
|
match tmpl.render() {
|
|
Ok(html) => Html(html).into_response(),
|
|
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, format!("Template error: {e}")).into_response(),
|
|
}
|
|
}
|