diff --git a/tests/01-config/test-bad-toml.sh b/tests/01-config/test-bad-toml.sh new file mode 100755 index 0000000..cbbac52 --- /dev/null +++ b/tests/01-config/test-bad-toml.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +# Generate syntactically invalid TOML (unclosed section header, unclosed string). +source "$HARNESS_DIR/config-gen.sh" +_gen_broken_config bad_toml + +# The binary should fail to parse the config and exit non-zero. +output=$("$WARPGATE_BIN" status -c "$TEST_CONFIG" 2>&1) && { + echo "FAIL: warpgate status should have exited non-zero for bad TOML" + echo " output: $output" + exit 1 +} + +# The error should contain the standard config parse failure message. +assert_output_contains "$output" "Failed to parse config TOML" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/01-config/test-config-init-no-overwrite.sh b/tests/01-config/test-config-init-no-overwrite.sh new file mode 100755 index 0000000..74c5602 --- /dev/null +++ b/tests/01-config/test-config-init-no-overwrite.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +TARGET="$TEST_DIR/existing.toml" + +# Pre-create a file at the target path so config-init would overwrite it. +echo "# pre-existing config" > "$TARGET" + +# config-init should refuse to overwrite an existing file. +output=$("$WARPGATE_BIN" config-init --output "$TARGET" 2>&1) && { + echo "FAIL: config-init should have exited non-zero for existing file" + echo " output: $output" + exit 1 +} + +# The error message must mention that the file already exists. +assert_output_contains "$output" "already exists" + +# The original file should be untouched. +assert_file_content "$TARGET" "# pre-existing config" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/01-config/test-config-init.sh b/tests/01-config/test-config-init.sh new file mode 100755 index 0000000..3602348 --- /dev/null +++ b/tests/01-config/test-config-init.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +GENERATED="$TEST_DIR/generated.toml" + +# config-init should create a new config file at the specified path. +output=$("$WARPGATE_BIN" config-init --output "$GENERATED" 2>&1) || { + echo "FAIL: config-init exited non-zero" + echo " output: $output" + exit 1 +} + +# The file must exist after a successful run. +assert_file_exists "$GENERATED" + +# The generated config should be parseable by the binary. Running +# `status` will fail on the mount check (no mount running) but must +# not fail on config parsing — exit code 0 with a DOWN status. +output=$("$WARPGATE_BIN" status -c "$GENERATED" 2>&1) || { + echo "FAIL: generated config is not parseable by warpgate status" + echo " output: $output" + exit 1 +} + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/01-config/test-default-values.sh b/tests/01-config/test-default-values.sh new file mode 100755 index 0000000..c2ee101 --- /dev/null +++ b/tests/01-config/test-default-values.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +# Generate a minimal config — only required fields are set. +# The binary should fill in all other fields with defaults. +source "$HARNESS_DIR/config-gen.sh" +_gen_minimal_config + +# Verify the binary accepts the minimal config without complaint. +# This proves that defaults are applied for every non-required field. +output=$("$WARPGATE_BIN" status -c "$TEST_CONFIG" 2>&1) || { + echo "FAIL: warpgate status exited non-zero; defaults should fill in" + echo " output: $output" + exit 1 +} + +# The minimal config intentionally omits these fields (all should be +# filled by compiled-in defaults matching the PRD): +# sftp_port = 22 +# sftp_connections = 8 +# max_size = "200G" +# max_age = "720h" +# min_free = "10G" +# chunk_size = "256M" +# write_back = "5s" +# transfers = 4 +# enable_smb = true +# enable_nfs = false +# mount.point = "/mnt/nas-photos" +# +# We cannot easily extract the in-memory defaults from the binary, but a +# successful status invocation on a minimal config is proof that every +# default was applied without error. + +# Double-check the config file does NOT contain the fields that should +# come from defaults — confirming we are truly testing the defaults path. +assert_output_not_contains "$(cat "$TEST_CONFIG")" "sftp_port" +assert_output_not_contains "$(cat "$TEST_CONFIG")" "max_size" +assert_output_not_contains "$(cat "$TEST_CONFIG")" "enable_smb" +assert_output_not_contains "$(cat "$TEST_CONFIG")" "transfers" + +# Parse the status output for actual applied defaults and verify key +# values match the PRD specifications. +assert_output_contains "$output" "sftp_port" +assert_output_contains "$output" "22" +assert_output_contains "$output" "sftp_connections" +assert_output_contains "$output" "8" +assert_output_contains "$output" "write_back" +assert_output_contains "$output" "5s" +assert_output_contains "$output" "enable_smb" +assert_output_contains "$output" "true" +assert_output_contains "$output" "enable_nfs" +assert_output_contains "$output" "false" +assert_output_contains "$output" "enable_webdav" +assert_output_contains "$output" "false" +assert_output_contains "$output" "dir_cache_time" +assert_output_contains "$output" "5m0s" +assert_output_contains "$output" "transfers" +assert_output_contains "$output" "4" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/01-config/test-extreme-values.sh b/tests/01-config/test-extreme-values.sh new file mode 100755 index 0000000..90324cb --- /dev/null +++ b/tests/01-config/test-extreme-values.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +# Generate a full config and then override several fields with extreme +# (but syntactically valid) values to confirm the binary does not panic. +source "$HARNESS_DIR/config-gen.sh" +_gen_config \ + cache.max_size="999T" \ + writeback.transfers=999 \ + connection.sftp_connections=999 + +# The binary should parse the config successfully and not panic. +# `status` will report DOWN (no mount running) but should exit 0. +output=$("$WARPGATE_BIN" status -c "$TEST_CONFIG" 2>&1) || { + echo "FAIL: warpgate status exited non-zero with extreme values" + echo " output: $output" + exit 1 +} + +# Verify the extreme values actually made it into the config file. +assert_output_contains "$(cat "$TEST_CONFIG")" '999T' +assert_output_contains "$(cat "$TEST_CONFIG")" 'transfers = 999' +assert_output_contains "$(cat "$TEST_CONFIG")" 'sftp_connections = 999' + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/01-config/test-minimal-valid.sh b/tests/01-config/test-minimal-valid.sh new file mode 100755 index 0000000..90e4761 --- /dev/null +++ b/tests/01-config/test-minimal-valid.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +# Generate config with only the required fields (connection.nas_host, +# connection.nas_user, connection.remote_path, cache.dir). All other +# fields should be filled in by the binary's defaults. +source "$HARNESS_DIR/config-gen.sh" +_gen_minimal_config + +# Run `warpgate status` against the minimal config. No mount is running, +# so status should report "DOWN" but still exit 0 — the important thing +# is that config parsing succeeds. +output=$("$WARPGATE_BIN" status -c "$TEST_CONFIG" 2>&1) || { + echo "FAIL: warpgate status exited non-zero with minimal config" + echo " output: $output" + exit 1 +} + +# Sanity: the generated config is valid TOML that includes the required +# fields we set. +assert_output_contains "$(cat "$TEST_CONFIG")" 'nas_host' +assert_output_contains "$(cat "$TEST_CONFIG")" '[cache]' + +# Parse the status output for applied defaults and verify key values +# match the PRD specifications. +assert_output_contains "$output" "sftp_port" +assert_output_contains "$output" "22" +assert_output_contains "$output" "sftp_connections" +assert_output_contains "$output" "8" +assert_output_contains "$output" "write_back" +assert_output_contains "$output" "5s" +assert_output_contains "$output" "enable_smb" +assert_output_contains "$output" "true" +assert_output_contains "$output" "enable_nfs" +assert_output_contains "$output" "false" +assert_output_contains "$output" "enable_webdav" +assert_output_contains "$output" "false" +assert_output_contains "$output" "dir_cache_time" +assert_output_contains "$output" "5m0s" +assert_output_contains "$output" "transfers" +assert_output_contains "$output" "4" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/01-config/test-missing-field.sh b/tests/01-config/test-missing-field.sh new file mode 100755 index 0000000..019324b --- /dev/null +++ b/tests/01-config/test-missing-field.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +# Generate a config that is missing the required `nas_host` field. +source "$HARNESS_DIR/config-gen.sh" +_gen_broken_config missing_field + +# The binary should fail to parse the config and exit non-zero. +output=$("$WARPGATE_BIN" status -c "$TEST_CONFIG" 2>&1) && { + echo "FAIL: warpgate status should have exited non-zero for missing field" + echo " output: $output" + exit 1 +} + +# The error message from the TOML deserializer should mention "missing field". +assert_output_contains "$output" "missing field" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/02-lifecycle/test-auto-warmup.sh b/tests/02-lifecycle/test-auto-warmup.sh new file mode 100755 index 0000000..d545588 --- /dev/null +++ b/tests/02-lifecycle/test-auto-warmup.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS and create a test file for warmup to pull +start_mock_nas +nas_create_file "photos/test.jpg" 4 + +# Generate config with auto-warmup enabled and a warmup rule for the path +gen_config warmup_auto=true \ + 'warmup.rules=[[warmup.rules]] +path = "" +' + +start_warpgate + +# Wait for the warmup phase to begin +wait_for_log_line "Running auto-warmup" 60 + +# Warmup ran successfully +assert_log_contains "Running auto-warmup" + +# Wait for warmup to complete and verify the file actually entered cache +wait_for_mount 60 +wait_for_rc_api 30 +sleep 5 +assert_cached "photos/test.jpg" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/02-lifecycle/test-mount-timeout.sh b/tests/02-lifecycle/test-mount-timeout.sh new file mode 100755 index 0000000..f13b8fd --- /dev/null +++ b/tests/02-lifecycle/test-mount-timeout.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +# Use 192.0.2.1 (TEST-NET-1, RFC 5737) which is unreachable. +# rclone will hang trying to connect, triggering the 30s mount timeout. +gen_config nas_host=192.0.2.1 + +start_warpgate + +# The mount timeout is 30s; allow up to 35s for the process to exit +wait_for_exit "$WARPGATE_PID" 35 + +# Verify the timeout message appeared in the log +assert_log_contains "Timed out waiting for mount" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/02-lifecycle/test-no-warmup-rules.sh b/tests/02-lifecycle/test-no-warmup-rules.sh new file mode 100755 index 0000000..f2b7eb6 --- /dev/null +++ b/tests/02-lifecycle/test-no-warmup-rules.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS and bring warpgate to full supervision +start_mock_nas + +# Default config: warmup_auto=false and no warmup rules +gen_config + +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Verify that auto-warmup was NOT triggered +assert_log_not_contains "Running auto-warmup" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/02-lifecycle/test-preflight-creates-dirs.sh b/tests/02-lifecycle/test-preflight-creates-dirs.sh new file mode 100755 index 0000000..27409aa --- /dev/null +++ b/tests/02-lifecycle/test-preflight-creates-dirs.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +# Use directories that do not yet exist. Preflight should create them. +NEW_MOUNT="$TEST_DIR/new-mount" +NEW_CACHE="$TEST_DIR/new-cache" + +# Ensure they really don't exist +[[ ! -d "$NEW_MOUNT" ]] || { echo "FAIL: $NEW_MOUNT already exists"; exit 1; } +[[ ! -d "$NEW_CACHE" ]] || { echo "FAIL: $NEW_CACHE already exists"; exit 1; } + +# Generate config with the non-existent directories. +# No mock NAS is running, so the mount will fail after preflight, +# but that is fine -- we only need to verify directory creation. +gen_config mount_point="$NEW_MOUNT" cache_dir="$NEW_CACHE" + +start_warpgate + +# Wait for warpgate to exit (it will fail on mount since no NAS is running) +wait_for_exit "$WARPGATE_PID" 35 + +# Verify preflight created both directories +assert_dir_exists "$NEW_MOUNT" +assert_dir_exists "$NEW_CACHE" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/02-lifecycle/test-rclone-immediate-exit.sh b/tests/02-lifecycle/test-rclone-immediate-exit.sh new file mode 100755 index 0000000..3434257 --- /dev/null +++ b/tests/02-lifecycle/test-rclone-immediate-exit.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so the connection is reachable, but configure warpgate +# with a WRONG key file so rclone fails with "auth failed" (not "connection +# refused"). This tests the auth-failure fast-exit path. +start_mock_nas + +# Generate a second SSH key that is NOT in authorized_keys +ssh-keygen -t ed25519 -f "$TEST_DIR/wrong_key" -N "" -q + +gen_config nas_key_file="$TEST_DIR/wrong_key" + +start_warpgate + +# rclone should fail fast; allow up to 10s for warpgate to detect and exit +wait_for_exit "$WARPGATE_PID" 10 + +# Verify the immediate-exit message appeared in the log +assert_log_contains "rclone mount exited immediately" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/02-lifecycle/test-shutdown-order.sh b/tests/02-lifecycle/test-shutdown-order.sh new file mode 100755 index 0000000..8d56bf1 --- /dev/null +++ b/tests/02-lifecycle/test-shutdown-order.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS and bring warpgate to full supervision +start_mock_nas +gen_config +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Initiate graceful shutdown +stop_warpgate + +# Verify shutdown messages appear in the correct order +assert_log_contains "Shutting down" +assert_log_order "SMB: stopped" "FUSE: unmounted" +assert_log_order "FUSE: unmounted" "rclone: stopped" + +# Verify the drain step appears between SMB stop and FUSE unmount +# The supervisor should drain dirty writes before tearing down the FUSE mount. +if grep -q "Write-back queue drained\|Waiting for write-back" "$TEST_DIR/warpgate.log" 2>/dev/null; then + assert_log_order "SMB: stopped" "Write-back queue drained\|Waiting for write-back" + # Verify drain completes before FUSE unmount + drain_line=$(grep -n "Write-back queue drained\|Waiting for write-back" "$TEST_DIR/warpgate.log" | head -1 | cut -d: -f1) + fuse_line=$(grep -n "FUSE: unmounted" "$TEST_DIR/warpgate.log" | head -1 | cut -d: -f1) + if [[ -n "$drain_line" && -n "$fuse_line" && "$drain_line" -ge "$fuse_line" ]]; then + echo "FAIL: drain step should appear before FUSE unmount" >&2 + exit 1 + fi +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/02-lifecycle/test-startup-order.sh b/tests/02-lifecycle/test-startup-order.sh new file mode 100755 index 0000000..427d492 --- /dev/null +++ b/tests/02-lifecycle/test-startup-order.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so rclone can connect via SFTP +start_mock_nas + +# Generate a default config pointing at the mock NAS +gen_config + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Verify the five startup phases appear in the correct order +assert_log_order "Preflight checks" "Starting rclone mount" +assert_log_order "Starting rclone mount" "Mount ready" +assert_log_order "Mount ready" "Starting protocol services" +assert_log_order "Starting protocol services" "Supervision active" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/03-signal/test-double-sigterm.sh b/tests/03-signal/test-double-sigterm.sh new file mode 100755 index 0000000..a8d0430 --- /dev/null +++ b/tests/03-signal/test-double-sigterm.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS +start_mock_nas + +# Generate a default config +gen_config + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Send SIGTERM twice in quick succession — the second signal should +# not cause a crash or panic. The shutdown flag is set on first signal; +# the second should be idempotent. +kill -TERM "$WARPGATE_PID" +sleep 0.5 +kill -TERM "$WARPGATE_PID" 2>/dev/null || true + +# Wait for the process to exit +wait_for_exit "$WARPGATE_PID" 30 + +# Verify exit code 0 (no crash from double signal) +wait "$WARPGATE_PID" 2>/dev/null +code=$? +if [[ "$code" -ne 0 ]]; then + echo "FAIL: expected exit code 0, got $code" >&2 + exit 1 +fi + +# Verify orderly shutdown occurred +assert_log_contains "Signal received, shutting down" + +# Verify the FUSE mount was removed +assert_not_mounted + +# Verify no orphan rclone processes remain +assert_no_orphan_rclone + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/03-signal/test-sigint.sh b/tests/03-signal/test-sigint.sh new file mode 100755 index 0000000..a665750 --- /dev/null +++ b/tests/03-signal/test-sigint.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so rclone can connect via SFTP +start_mock_nas + +# Generate a default config pointing at the mock NAS +gen_config + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Send SIGINT (Ctrl-C equivalent) to trigger orderly shutdown +kill -INT "$WARPGATE_PID" + +# Wait for the process to exit +wait_for_exit "$WARPGATE_PID" 30 + +# Verify exit code 0 (graceful shutdown) +wait "$WARPGATE_PID" 2>/dev/null +code=$? +if [[ "$code" -ne 0 ]]; then + echo "FAIL: expected exit code 0, got $code" >&2 + exit 1 +fi + +# Verify shutdown log messages appeared in correct order +assert_log_contains "Signal received, shutting down" +assert_log_order "Signal received, shutting down" "Write-back queue drained" + +# Verify the FUSE mount was removed +assert_not_mounted + +# Verify no orphan rclone processes remain +assert_no_orphan_rclone + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/03-signal/test-sigkill-orphans.sh b/tests/03-signal/test-sigkill-orphans.sh new file mode 100755 index 0000000..eecbecf --- /dev/null +++ b/tests/03-signal/test-sigkill-orphans.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS +start_mock_nas + +# Generate a default config +gen_config + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Record the PID before killing +local_pid="$WARPGATE_PID" + +# Send SIGKILL — this bypasses all signal handlers. +# The supervisor cannot perform orderly shutdown. +# rclone and smbd were spawned with .process_group(0), so they are +# in separate process groups and will NOT receive the SIGKILL. +kill -KILL "$local_pid" + +# Wait for the killed process to be reaped +sleep 2 + +# Clear WARPGATE_PID so teardown does not try to SIGTERM a dead process +WARPGATE_PID="" + +# Check for orphan rclone processes — since SIGKILL prevents cleanup, +# orphans are expected. This test documents the behavior. +orphan_rclone=$(pgrep -c -f "rclone.*$TEST_MOUNT" 2>/dev/null || echo 0) +orphan_smbd=$(pgrep -c -f "smbd.*$TEST_DIR" 2>/dev/null || echo 0) + +if [[ "$orphan_rclone" -gt 0 ]]; then + echo "INFO: found $orphan_rclone orphan rclone process(es) after SIGKILL (expected — process group isolation)" +fi + +if [[ "$orphan_smbd" -gt 0 ]]; then + echo "INFO: found $orphan_smbd orphan smbd process(es) after SIGKILL (expected — process group isolation)" +fi + +# Clean up any orphan processes manually (don't rely on teardown alone) +pkill -9 -f "rclone.*$TEST_MOUNT" 2>/dev/null || true +pkill -9 -f "smbd.*$TEST_DIR" 2>/dev/null || true +sleep 1 + +# Unmount stale FUSE mount +if mountpoint -q "$TEST_MOUNT" 2>/dev/null; then + fusermount3 -uz "$TEST_MOUNT" 2>/dev/null || fusermount -uz "$TEST_MOUNT" 2>/dev/null || true + sleep 1 +fi + +# Assert no orphan rclone/smbd processes remain after cleanup +assert_no_orphan_rclone + +orphan_smbd_after=$(pgrep -c -f "smbd.*$TEST_DIR" 2>/dev/null || echo 0) +if [[ "$orphan_smbd_after" -gt 0 ]]; then + echo "FAIL: orphan smbd processes still remain after cleanup" >&2 + exit 1 +fi + +# Assert mount is unmounted +assert_not_mounted + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/03-signal/test-sigterm-dirty-files.sh b/tests/03-signal/test-sigterm-dirty-files.sh new file mode 100755 index 0000000..53131e1 --- /dev/null +++ b/tests/03-signal/test-sigterm-dirty-files.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS +start_mock_nas + +# Generate config with a long write-back delay so files stay dirty +gen_config write_back=30s + +# Start warpgate and wait for mount + RC API +start_warpgate +wait_for_mount +wait_for_rc_api + +# Write a file through the FUSE mount — it will be cached locally +# but not yet written back to the NAS due to the 30s write-back delay +echo "dirty-data" > "$TEST_MOUNT/dirty-test.txt" + +# Allow a moment for VFS to register the dirty file +sleep 2 + +# Verify the file is counted as dirty (not yet written back) +dirty=$(get_dirty_count) +if [[ "$dirty" -lt 1 ]]; then + echo "FAIL: expected dirty count > 0, got $dirty" >&2 + exit 1 +fi + +# Send SIGTERM — the shutdown should drain the write-back queue +# before unmounting and exiting +kill -TERM "$WARPGATE_PID" + +# Wait for exit — drain may take a moment +wait_for_exit "$WARPGATE_PID" 60 + +# Verify the shutdown drained the write-back queue +assert_log_contains "Signal received, shutting down" +assert_log_contains "Waiting for write-back queue" +assert_log_contains "Write-back queue drained" + +# Verify the dirty file was flushed to the NAS before exit +assert_file_exists "$NAS_ROOT/dirty-test.txt" + +# Verify the content matches what we wrote +actual=$(cat "$NAS_ROOT/dirty-test.txt") +if [[ "$actual" != "dirty-data" ]]; then + echo "FAIL: NAS file content mismatch: expected 'dirty-data', got '$actual'" >&2 + exit 1 +fi + +# Verify the FUSE mount was removed +assert_not_mounted + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/03-signal/test-sigterm-during-mount.sh b/tests/03-signal/test-sigterm-during-mount.sh new file mode 100755 index 0000000..6147775 --- /dev/null +++ b/tests/03-signal/test-sigterm-during-mount.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +# No require_root: this test uses an unreachable host, no mock NAS needed +setup_test_env +trap teardown_test_env EXIT + +# Generate config with an unreachable NAS host (RFC 5737 TEST-NET) +# so warpgate blocks in the mount phase waiting for SFTP connection +gen_config nas_host=192.0.2.1 + +# Start warpgate — it will try to connect to the unreachable host +start_warpgate + +# Give it time to enter the mount-wait phase +sleep 2 + +# Send SIGTERM while still waiting for mount +kill -TERM "$WARPGATE_PID" + +# Should exit quickly since it never completed mount +wait_for_exit "$WARPGATE_PID" 10 + +# Verify no orphan rclone processes remain +assert_no_orphan_rclone + +# Verify the mount point is not mounted (it never was) +assert_not_mounted + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/03-signal/test-sigterm-during-warmup.sh b/tests/03-signal/test-sigterm-during-warmup.sh new file mode 100755 index 0000000..0489ca0 --- /dev/null +++ b/tests/03-signal/test-sigterm-during-warmup.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS +start_mock_nas + +# Create large test files on the NAS to make warmup take time +for i in $(seq 1 10); do + create_test_file "warmup/file-${i}.dat" 1024 +done + +# Generate config with auto-warmup enabled and a rule matching our files +gen_config warmup_auto=true \ + "warmup.rules=[[warmup.rules]] +path = \"warmup/\"" + +# Start warpgate and wait for warmup to begin +start_warpgate +wait_for_log_line "Running auto-warmup" 60 + +# Immediately send SIGTERM to interrupt warmup +kill -TERM "$WARPGATE_PID" + +# Wait for the process to exit cleanly +wait_for_exit "$WARPGATE_PID" 30 + +# Verify clean exit — signal handler should stop warmup and proceed +# through orderly shutdown +assert_log_contains "Signal received, shutting down" + +# Verify the FUSE mount was removed +assert_not_mounted + +# Verify no orphan rclone processes remain +assert_no_orphan_rclone + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/03-signal/test-sigterm.sh b/tests/03-signal/test-sigterm.sh new file mode 100755 index 0000000..0545846 --- /dev/null +++ b/tests/03-signal/test-sigterm.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so rclone can connect via SFTP +start_mock_nas + +# Generate a default config pointing at the mock NAS +gen_config + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Send SIGTERM to trigger orderly shutdown +kill -TERM "$WARPGATE_PID" + +# Wait for the process to exit +wait_for_exit "$WARPGATE_PID" 30 + +# Verify exit code 0 (graceful shutdown) +wait "$WARPGATE_PID" 2>/dev/null +code=$? +if [[ "$code" -ne 0 ]]; then + echo "FAIL: expected exit code 0, got $code" >&2 + exit 1 +fi + +# Verify shutdown log messages appeared in correct order +assert_log_contains "Signal received, shutting down" +assert_log_order "Signal received, shutting down" "Write-back queue drained" + +# Verify the FUSE mount was removed +assert_not_mounted + +# Verify no orphan rclone processes remain +assert_no_orphan_rclone + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/04-supervision/test-backoff-delay.sh b/tests/04-supervision/test-backoff-delay.sh new file mode 100755 index 0000000..ff7dfc3 --- /dev/null +++ b/tests/04-supervision/test-backoff-delay.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so rclone can connect via SFTP +start_mock_nas + +# Generate a default config pointing at the mock NAS +gen_config + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# --- Kill #1: expect backoff 2s, counter 1/3 --- + +smbd_pid=$(pgrep -f "smbd.*--foreground") +if [[ -z "$smbd_pid" ]]; then + echo "FAIL: smbd not found before kill #1" >&2 + exit 1 +fi + +kill "$smbd_pid" +wait_for_log_line "Restarting smbd in 2s (1/3)" 15 +assert_log_contains "Restarting smbd in 2s (1/3)" + +# Wait for restart to complete (2s backoff + margin) +sleep 4 + +# --- Kill #2: expect backoff 4s, counter 2/3 --- + +smbd_pid=$(pgrep -f "smbd.*--foreground") +if [[ -z "$smbd_pid" ]]; then + echo "FAIL: smbd not found before kill #2" >&2 + exit 1 +fi + +kill "$smbd_pid" +wait_for_log_line "Restarting smbd in 4s (2/3)" 15 +assert_log_contains "Restarting smbd in 4s (2/3)" + +# Wait for restart to complete (4s backoff + margin) +sleep 6 + +# --- Kill #3: expect backoff 6s, counter 3/3 --- + +smbd_pid=$(pgrep -f "smbd.*--foreground") +if [[ -z "$smbd_pid" ]]; then + echo "FAIL: smbd not found before kill #3" >&2 + exit 1 +fi + +kill "$smbd_pid" +wait_for_log_line "Restarting smbd in 6s (3/3)" 15 +assert_log_contains "Restarting smbd in 6s (3/3)" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/04-supervision/test-independent-trackers.sh b/tests/04-supervision/test-independent-trackers.sh new file mode 100755 index 0000000..c42fe16 --- /dev/null +++ b/tests/04-supervision/test-independent-trackers.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so rclone can connect via SFTP +start_mock_nas + +# Generate config with WebDAV enabled alongside SMB +gen_config enable_webdav=true + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# --- Kill smbd and wait for its independent restart --- + +smbd_pid=$(pgrep -f "smbd.*--foreground") +if [[ -z "$smbd_pid" ]]; then + echo "FAIL: smbd not found" >&2 + exit 1 +fi + +kill "$smbd_pid" +wait_for_log_line "Restarting smbd" 15 +sleep 5 + +# Verify smbd restarted +new_smbd_pid=$(pgrep -f "smbd.*--foreground") +if [[ -z "$new_smbd_pid" ]]; then + echo "FAIL: smbd did not restart" >&2 + exit 1 +fi + +# --- Kill WebDAV and wait for its independent restart --- + +webdav_pid=$(pgrep -f "rclone serve webdav") +if [[ -z "$webdav_pid" ]]; then + echo "FAIL: WebDAV process not found" >&2 + exit 1 +fi + +kill "$webdav_pid" +wait_for_log_line "Restarting WebDAV" 15 +sleep 5 + +# Verify WebDAV restarted +new_webdav_pid=$(pgrep -f "rclone serve webdav") +if [[ -z "$new_webdav_pid" ]]; then + echo "FAIL: WebDAV did not restart" >&2 + exit 1 +fi + +# --- Verify both restart events were logged independently --- + +assert_log_contains "Restarting smbd" +assert_log_contains "Restarting WebDAV" + +# Verify both services are running simultaneously after their restarts +if ! pgrep -f "smbd.*--foreground" > /dev/null; then + echo "FAIL: smbd not running after WebDAV restart" >&2 + exit 1 +fi + +if ! pgrep -f "rclone serve webdav" > /dev/null; then + echo "FAIL: WebDAV not running after smbd restart" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/04-supervision/test-max-restarts.sh b/tests/04-supervision/test-max-restarts.sh new file mode 100755 index 0000000..50753e2 --- /dev/null +++ b/tests/04-supervision/test-max-restarts.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so rclone can connect via SFTP +start_mock_nas + +# Generate a default config pointing at the mock NAS +gen_config + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Kill smbd 4 times in rapid succession. +# MAX_RESTARTS = 3, so after the 3rd restart attempt + 4th kill, +# the supervisor should give up. +for i in 1 2 3 4; do + smbd_pid=$(pgrep -f "smbd.*--foreground" || true) + if [[ -z "$smbd_pid" ]]; then + # smbd may already be gone after exceeding max restarts + break + fi + + kill "$smbd_pid" + + if [[ "$i" -lt 4 ]]; then + # Wait for restart between kills (backoff: 2s, 4s, 6s + margin) + local_wait=$((i * 2 + 2)) + sleep "$local_wait" + fi +done + +# Wait for the supervisor to log the "exceeded max restarts" message +wait_for_log_line "exceeded max restarts" 30 + +# Verify the full give-up message +assert_log_contains "smbd exceeded max restarts (3), giving up" + +# Verify warpgate itself is still running (smbd failure does not cause +# full shutdown -- only rclone mount death does that) +if ! kill -0 "$WARPGATE_PID" 2>/dev/null; then + echo "FAIL: warpgate exited unexpectedly after smbd max restarts" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/04-supervision/test-rclone-death-shutdown.sh b/tests/04-supervision/test-rclone-death-shutdown.sh new file mode 100755 index 0000000..55babee --- /dev/null +++ b/tests/04-supervision/test-rclone-death-shutdown.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so rclone can connect via SFTP +start_mock_nas + +# Generate a default config pointing at the mock NAS +gen_config + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Find rclone mount PID +rclone_pid=$(pgrep -f "rclone mount.*$TEST_MOUNT") +if [[ -z "$rclone_pid" ]]; then + echo "FAIL: rclone mount process not found" >&2 + exit 1 +fi + +# Kill rclone -- this should trigger full warpgate shutdown +kill "$rclone_pid" + +# Wait for warpgate to exit (rclone death causes full shutdown) +wait_for_exit "$WARPGATE_PID" 30 + +# Verify the log contains the expected critical error message +assert_log_contains "rclone mount exited unexpectedly" + +# Verify warpgate exited with non-zero code (it bails on rclone death) +if kill -0 "$WARPGATE_PID" 2>/dev/null; then + echo "FAIL: warpgate is still running after rclone death" >&2 + exit 1 +fi + +# Collect exit code (non-zero expected) +exit_code=0 +wait "$WARPGATE_PID" 2>/dev/null || exit_code=$? +if [[ "$exit_code" -eq 0 ]]; then + echo "FAIL: expected non-zero exit code, got 0" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/04-supervision/test-smbd-restart.sh b/tests/04-supervision/test-smbd-restart.sh new file mode 100755 index 0000000..fcab97b --- /dev/null +++ b/tests/04-supervision/test-smbd-restart.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so rclone can connect via SFTP +start_mock_nas + +# Generate a default config pointing at the mock NAS +gen_config + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Find smbd PID +smbd_pid=$(pgrep -f "smbd.*--foreground") +if [[ -z "$smbd_pid" ]]; then + echo "FAIL: smbd not found" >&2 + exit 1 +fi + +# Kill smbd to trigger supervisor restart +kill "$smbd_pid" + +# Wait for the supervisor to detect the exit and schedule a restart +wait_for_log_line "Restarting smbd" 15 + +# Give the restart time to complete +sleep 5 + +# Verify smbd is running again with a new PID +new_smbd_pid=$(pgrep -f "smbd.*--foreground") +if [[ -z "$new_smbd_pid" ]]; then + echo "FAIL: smbd did not restart" >&2 + exit 1 +fi + +if [[ "$new_smbd_pid" -eq "$smbd_pid" ]]; then + echo "FAIL: smbd PID did not change (expected new process)" >&2 + exit 1 +fi + +# Verify the log shows the correct restart message with backoff and counter +assert_log_contains "Restarting smbd in 2s (1/3)" + +# Test SMB connectivity to the restarted smbd +if command -v smbclient > /dev/null 2>&1; then + smb_output=$(smbclient -L "127.0.0.1" -N 2>&1) || true + if echo "$smb_output" | grep -qi "Sharename\|nas\|IPC"; then + echo "INFO: SMB connectivity verified after restart" + else + echo "FAIL: smbclient could not list shares after smbd restart" >&2 + echo " output: $smb_output" >&2 + exit 1 + fi +else + echo "INFO: smbclient not available, skipping SMB connectivity check" +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/04-supervision/test-stable-period-reset.sh b/tests/04-supervision/test-stable-period-reset.sh new file mode 100755 index 0000000..70d4f44 --- /dev/null +++ b/tests/04-supervision/test-stable-period-reset.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_long_tests +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so rclone can connect via SFTP +start_mock_nas + +# Generate a default config pointing at the mock NAS +gen_config + +# Start warpgate and wait for full startup +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Kill smbd once -- this should be restart attempt 1/3 +smbd_pid=$(pgrep -f "smbd.*--foreground") +if [[ -z "$smbd_pid" ]]; then + echo "FAIL: smbd not found" >&2 + exit 1 +fi + +kill "$smbd_pid" +wait_for_log_line "Restarting smbd in 2s (1/3)" 15 + +# Wait for the restart to complete +sleep 5 + +# Verify smbd is back +if ! pgrep -f "smbd.*--foreground" > /dev/null; then + echo "FAIL: smbd did not restart after first kill" >&2 + exit 1 +fi + +# Sleep longer than RESTART_STABLE_PERIOD (300s) to reset the counter +echo "Waiting 310s for stable period to reset restart counter..." +sleep 310 + +# Kill smbd again -- counter should have reset, so this is 1/3 again +smbd_pid=$(pgrep -f "smbd.*--foreground") +if [[ -z "$smbd_pid" ]]; then + echo "FAIL: smbd not found after stable period" >&2 + exit 1 +fi + +kill "$smbd_pid" +wait_for_log_line "Restarting smbd in 2s (1/3).*" 15 + +# Count occurrences of "1/3" -- should appear twice (once per kill) +count=$(grep -c "Restarting smbd in 2s (1/3)" "$TEST_DIR/warpgate.log" || echo 0) +if [[ "$count" -lt 2 ]]; then + echo "FAIL: expected at least 2 occurrences of '1/3' but got $count" >&2 + echo " (counter did not reset after stable period)" >&2 + exit 1 +fi + +# Verify there is no "2/3" message (which would mean the counter was NOT reset) +if grep -q "Restarting smbd in 4s (2/3)" "$TEST_DIR/warpgate.log" 2>/dev/null; then + echo "FAIL: log contains '2/3' -- counter did not reset after stable period" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/05-cache/test-cache-full-dirty.sh b/tests/05-cache/test-cache-full-dirty.sh new file mode 100755 index 0000000..e16eb83 --- /dev/null +++ b/tests/05-cache/test-cache-full-dirty.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +# Test: dirty files are preserved when cache disk is nearly full +# +# Uses a tiny loopback-mounted filesystem as the cache disk to simulate +# a cache-full scenario. Verifies that dirty files are not lost when +# the cache disk has no free space for eviction. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env + +# Set up a tiny 5 MB cache disk (loopback ext4) +setup_small_cache_disk 5 + +trap 'teardown_small_cache_disk; teardown_test_env' EXIT + +# Start mock NAS +start_mock_nas + +# Generate config with small cache limit and a very long write-back +# so dirty files remain dirty throughout the test +gen_config cache_max_size=4M write_back=300s + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Sever the network so write-back cannot complete +inject_network_down + +# Write several small files through the mount to fill the cache disk. +# Each file is ~256 KB, writing enough to nearly fill the 5 MB disk. +for i in $(seq 1 8); do + dd if=/dev/urandom bs=1K count=256 2>/dev/null | \ + dd of="$TEST_MOUNT/fill-${i}.dat" bs=1K 2>/dev/null + sleep 0.5 +done + +# Allow VFS time to register all dirty writes +sleep 3 + +# Verify that dirty files exist and are tracked +dirty=$(get_dirty_count) +if [[ "$dirty" -lt 1 ]]; then + echo "FAIL: expected dirty count > 0, got $dirty" >&2 + inject_network_up + exit 1 +fi + +# Verify each previously written dirty file is still accessible +for i in $(seq 1 8); do + if [[ ! -f "$TEST_MOUNT/fill-${i}.dat" ]]; then + echo "FAIL: dirty file fill-${i}.dat no longer accessible" >&2 + inject_network_up + exit 1 + fi +done + +# Attempt a 9th write — cache disk should be full, so this should fail +# with ENOSPC or a write error. +write_failed=0 +dd if=/dev/urandom bs=1K count=256 2>/dev/null | \ + dd of="$TEST_MOUNT/fill-9.dat" bs=1K 2>/dev/null || write_failed=1 + +if [[ "$write_failed" -eq 0 ]]; then + echo "WARN: 9th write succeeded — cache disk may not be truly full" >&2 + echo " (expected ENOSPC or write error when cache disk is exhausted)" >&2 +fi + +# Restore the network for clean teardown +inject_network_up + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/05-cache/test-cache-hit-offline.sh b/tests/05-cache/test-cache-hit-offline.sh new file mode 100755 index 0000000..f16f058 --- /dev/null +++ b/tests/05-cache/test-cache-hit-offline.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# Test: cached file remains readable after network goes down +# +# Verifies that once a file has been pulled into the VFS cache, it can +# be read even when the connection to the remote NAS is severed. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS and create a 1 MB test file +start_mock_nas +nas_create_file "landscape.jpg" 1024 + +# Generate default config +gen_config + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Read the file through the FUSE mount to cache it +cat "$TEST_MOUNT/landscape.jpg" > /dev/null +assert_cached "landscape.jpg" + +# Save a checksum for verification after network goes down +expected_cksum=$(md5sum "$TEST_MOUNT/landscape.jpg" | awk '{print $1}') + +# Sever the network link to the mock NAS +inject_network_down + +# Read the file again — should succeed from local cache. +# Time the read and verify it completes quickly (< 2 seconds for 1 MB). +read_start=$SECONDS +actual_cksum=$(md5sum "$TEST_MOUNT/landscape.jpg" | awk '{print $1}') +read_elapsed=$(( SECONDS - read_start )) + +if [[ "$actual_cksum" != "$expected_cksum" ]]; then + echo "FAIL: checksum mismatch after network down" >&2 + echo " expected: $expected_cksum" >&2 + echo " actual: $actual_cksum" >&2 + inject_network_up + exit 1 +fi + +if [[ "$read_elapsed" -gt 2 ]]; then + echo "FAIL: offline cache read took ${read_elapsed}s (expected < 2s for 1 MB)" >&2 + inject_network_up + exit 1 +fi + +# Restore the network +inject_network_up + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/05-cache/test-cache-miss-pull.sh b/tests/05-cache/test-cache-miss-pull.sh new file mode 100755 index 0000000..bbee9f5 --- /dev/null +++ b/tests/05-cache/test-cache-miss-pull.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Test: cache miss triggers remote file pull into local VFS cache +# +# Verifies that reading a file through the FUSE mount for the first time +# fetches it from the remote NAS via rclone and stores it in $CACHE_DIR/vfs/nas/. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS and place a 1 MB raw photo file on it +start_mock_nas +nas_create_file "photo.cr3" 1024 + +# Generate default config pointing at mock NAS +gen_config + +# Start warpgate and wait for mount + RC API readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Verify the file is NOT cached yet +assert_not_cached "photo.cr3" + +# Read the file through the FUSE mount (triggers cache-miss pull) +cat "$TEST_MOUNT/photo.cr3" > /dev/null + +# Verify the content matches the source file on the NAS +diff "$TEST_MOUNT/photo.cr3" "$NAS_ROOT/photo.cr3" + +# Verify the file is now present in the VFS cache +assert_cached "photo.cr3" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/05-cache/test-dirty-no-evict.sh b/tests/05-cache/test-dirty-no-evict.sh new file mode 100755 index 0000000..d0e88de --- /dev/null +++ b/tests/05-cache/test-dirty-no-evict.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +# Test: dirty files are protected from LRU eviction +# +# Verifies that when the cache is under space pressure, dirty (unwritten) +# files are not evicted by the LRU policy. Clean cached files may be +# evicted, but dirty files must survive. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS and pre-create 5 x 1 MB files for LRU pressure +start_mock_nas +for i in $(seq 1 5); do + nas_create_file "filler-${i}.bin" 1024 +done + +# Generate config with small cache (5 MB) and a very long write-back +# so dirty files remain dirty throughout the test +gen_config cache_max_size=5M write_back=300s + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Sever the network BEFORE writing the dirty file to prevent write-back +# from racing with LRU eviction — the file must stay dirty throughout. +inject_network_down + +# Write a 1 MB dirty file through the mount (stays dirty due to 300s delay +# AND network being down, so write-back cannot succeed) +dd if=/dev/urandom bs=1K count=1024 2>/dev/null | \ + dd of="$TEST_MOUNT/important-edit.bin" bs=1K 2>/dev/null + +# Allow VFS to register the dirty write +sleep 2 + +# Confirm the file is dirty +dirty_before=$(get_dirty_count) +if [[ "$dirty_before" -lt 1 ]]; then + echo "FAIL: expected dirty count > 0 after write, got $dirty_before" >&2 + exit 1 +fi + +# Now read the 5 x 1 MB filler files through the mount to create LRU +# pressure. With a 5 MB cache limit, evictions must happen, but the +# dirty file should be protected. +for i in $(seq 1 5); do + cat "$TEST_MOUNT/filler-${i}.bin" > /dev/null +done + +# Allow time for LRU eviction to run +sleep 3 + +# The dirty file must still be present and dirty +dirty_after=$(get_dirty_count) +if [[ "$dirty_after" -lt 1 ]]; then + echo "FAIL: dirty file was evicted; dirty count dropped to $dirty_after" >&2 + exit 1 +fi + +# Verify the dirty file is still readable through the mount +if [[ ! -f "$TEST_MOUNT/important-edit.bin" ]]; then + echo "FAIL: dirty file no longer accessible through mount" >&2 + inject_network_up + exit 1 +fi + +# Verify the dirty file physically remains in the on-disk cache +assert_cached "important-edit.bin" + +# Restore network for clean teardown +inject_network_up + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/05-cache/test-warmup-cache-path.sh b/tests/05-cache/test-warmup-cache-path.sh new file mode 100755 index 0000000..ab09571 --- /dev/null +++ b/tests/05-cache/test-warmup-cache-path.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Test: VFS cache stores files at the expected filesystem path +# +# Verifies that when a file is read through the FUSE mount, it appears +# at $CACHE_DIR/vfs/nas/FILENAME — the exact path that warmup's +# is_cached logic checks to decide whether to skip a file. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS and create files at various depths +start_mock_nas +nas_create_file_content "root-file.txt" "root-level" +nas_create_file_content "sub/nested-file.txt" "nested" +nas_create_file_content "sub/deep/deep-file.txt" "deep-nested" + +# Generate default config (remote_path="/") +gen_config + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Read each file through the FUSE mount to trigger caching +cat "$TEST_MOUNT/root-file.txt" > /dev/null +cat "$TEST_MOUNT/sub/nested-file.txt" > /dev/null +cat "$TEST_MOUNT/sub/deep/deep-file.txt" > /dev/null + +# Verify that each file is cached at the expected path under +# $CACHE_DIR/vfs/nas/ (since remote_path="/", no extra prefix) +assert_cached "root-file.txt" +assert_cached "sub/nested-file.txt" +assert_cached "sub/deep/deep-file.txt" + +# Also verify the actual on-disk paths exist and have correct content +assert_file_content "$CACHE_DIR/vfs/nas/root-file.txt" "root-level" +assert_file_content "$CACHE_DIR/vfs/nas/sub/nested-file.txt" "nested" +assert_file_content "$CACHE_DIR/vfs/nas/sub/deep/deep-file.txt" "deep-nested" + +# Test that assert_cached works on files placed directly into the cache +# directory (bypassing FUSE). This validates the cache path detection used +# by warmup's is_cached logic. +mkdir -p "$CACHE_DIR/vfs/nas" +echo -n "manually-placed" > "$CACHE_DIR/vfs/nas/manual-file.txt" +assert_cached "manual-file.txt" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/05-cache/test-warmup-newer-than.sh b/tests/05-cache/test-warmup-newer-than.sh new file mode 100755 index 0000000..c526957 --- /dev/null +++ b/tests/05-cache/test-warmup-newer-than.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Test: warmup --newer-than filters files by modification time +# +# Verifies that warmup with --newer-than only caches files modified +# within the specified window, skipping older files. This maps to +# rclone lsf --max-age under the hood. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS +start_mock_nas + +# Create an old file (timestamp: 2023-01-01) and a new file (now) +nas_create_file_content "old.txt" "old-content" +touch -t 202301010000 "$NAS_ROOT/old.txt" + +nas_create_file_content "new.txt" "new-content" + +# Generate default config +gen_config + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Run warmup with --newer-than 7d (only files modified in last 7 days) +run_warpgate_cmd warmup --newer-than 7d "" + +# Verify the new file is cached +assert_cached "new.txt" + +# Verify the old file is NOT cached (older than 7 days) +assert_not_cached "old.txt" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/05-cache/test-warmup-skip-cached.sh b/tests/05-cache/test-warmup-skip-cached.sh new file mode 100755 index 0000000..1463138 --- /dev/null +++ b/tests/05-cache/test-warmup-skip-cached.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Test: warmup skips files that are already cached +# +# Verifies that running warmup a second time detects files already +# present in $CACHE_DIR/vfs/nas/... and skips re-reading them. The +# second run output should report all files as "skipped". +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS and create test files +start_mock_nas +nas_create_file "a.txt" 4 +nas_create_file "b.txt" 4 +nas_create_file "c.txt" 4 + +# Generate default config +gen_config + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# First warmup — should cache all files +run_warpgate_cmd warmup "" > /dev/null 2>&1 + +# Verify all files are cached +assert_cached "a.txt" +assert_cached "b.txt" +assert_cached "c.txt" + +# Second warmup — should skip all files since they are already cached +output=$(run_warpgate_cmd warmup "" 2>&1) + +# Count the number of "skipped" occurrences (expect 3, one per file) +skipped_count=$(echo "$output" | grep -c "skipped" || true) +if [[ "$skipped_count" -lt 3 ]]; then + echo "FAIL: expected at least 3 files skipped, got $skipped_count" >&2 + echo " warmup output: $output" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/05-cache/test-warmup.sh b/tests/05-cache/test-warmup.sh new file mode 100755 index 0000000..e68105b --- /dev/null +++ b/tests/05-cache/test-warmup.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Test: warmup command pre-caches files from the remote NAS +# +# Verifies that `warpgate warmup` reads files through the mount to pull +# them into the VFS cache, so that subsequent access is local. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS and create several test files +start_mock_nas +nas_create_file "photos/img001.jpg" 64 +nas_create_file "photos/img002.jpg" 64 +nas_create_file "photos/img003.jpg" 64 +nas_create_file "raw/dsc001.cr3" 128 + +# Generate default config +gen_config + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Verify files are not cached yet +assert_not_cached "photos/img001.jpg" +assert_not_cached "photos/img002.jpg" +assert_not_cached "photos/img003.jpg" +assert_not_cached "raw/dsc001.cr3" + +# Run warmup on the root path to cache everything +run_warpgate_cmd warmup "" + +# Verify all files are now in the VFS cache +assert_cached "photos/img001.jpg" +assert_cached "photos/img002.jpg" +assert_cached "photos/img003.jpg" +assert_cached "raw/dsc001.cr3" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/05-cache/test-write-dirty.sh b/tests/05-cache/test-write-dirty.sh new file mode 100755 index 0000000..a03f324 --- /dev/null +++ b/tests/05-cache/test-write-dirty.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Test: writing through FUSE mount creates a dirty file in VFS +# +# Verifies that a file written through the mount point shows up as a +# pending upload (dirty) in the rclone VFS stats when write-back delay +# is long enough to observe it. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS +start_mock_nas + +# Generate config with a long write-back delay so the file stays dirty +gen_config write_back=60s + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Write a file through the FUSE mount +echo "test-write" > "$TEST_MOUNT/written.txt" + +# Allow a moment for VFS to register the write +sleep 2 + +# Verify the file is counted as dirty (not yet written back) +dirty=$(get_dirty_count) +if [[ "$dirty" -lt 1 ]]; then + echo "FAIL: expected dirty count > 0, got $dirty" >&2 + exit 1 +fi + +# Verify the file is readable through the mount +assert_file_content "$TEST_MOUNT/written.txt" "test-write" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/05-cache/test-writeback-complete.sh b/tests/05-cache/test-writeback-complete.sh new file mode 100755 index 0000000..474ba98 --- /dev/null +++ b/tests/05-cache/test-writeback-complete.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Test: write-back completes and file appears on NAS +# +# Verifies that after a file is written through the mount, the rclone VFS +# write-back mechanism flushes it to the remote NAS within the configured +# write-back delay, and the dirty count returns to zero. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS +start_mock_nas + +# Generate config with a short write-back delay (2s) +gen_config write_back=2s + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Write a file through the FUSE mount +echo "writeback-test" > "$TEST_MOUNT/wb.txt" + +# Wait for the dirty count to return to zero (write-back complete) +wait_for_dirty_zero 30 + +# Verify the file arrived on the NAS with correct content +assert_file_content "$NAS_ROOT/wb.txt" "writeback-test" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/06-writeback/test-concurrent-writes.sh b/tests/06-writeback/test-concurrent-writes.sh new file mode 100755 index 0000000..de29297 --- /dev/null +++ b/tests/06-writeback/test-concurrent-writes.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +# Test: concurrent writes are all written back correctly. +# 10 files written in parallel background subshells should all drain +# and arrive on the NAS with correct content. + +require_root +setup_test_env +trap teardown_test_env EXIT + +start_mock_nas + +gen_config write_back=2s transfers=4 + +start_warpgate +wait_for_mount +wait_for_rc_api + +# Write 10 files concurrently in background subshells +pids=() +for i in $(seq 1 10); do + (echo "concurrent-data-$i" > "$TEST_MOUNT/concurrent-$i.txt") & + pids+=($!) +done + +# Wait for all write subshells to complete +for pid in "${pids[@]}"; do + wait "$pid" +done + +# Wait for all dirty files to drain +wait_for_dirty_zero 60 + +# Verify all 10 files arrived on the NAS with correct content +for i in $(seq 1 10); do + assert_file_content "$NAS_ROOT/concurrent-$i.txt" "concurrent-data-$i" +done + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/06-writeback/test-drain-no-rc.sh b/tests/06-writeback/test-drain-no-rc.sh new file mode 100755 index 0000000..e727e73 --- /dev/null +++ b/tests/06-writeback/test-drain-no-rc.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +# Test: drain returns immediately when the RC API is unavailable. +# Specifically tests the drain path: write a dirty file with long write-back, +# kill rclone (removing the RC API), then send SIGTERM. The drain function +# should detect the RC API is gone and return immediately without hanging. + +require_root +setup_test_env +trap teardown_test_env EXIT + +start_mock_nas + +# Use a very long write-back delay so the file stays dirty +gen_config write_back=300s + +start_warpgate +wait_for_mount +wait_for_rc_api + +# Write a dirty file through the mount — it will stay dirty due to 300s delay +dd if=/dev/urandom bs=1K count=64 2>/dev/null | \ + dd of="$TEST_MOUNT/drain-test.dat" bs=1K 2>/dev/null + +# Allow VFS to register the dirty write +sleep 2 + +# Kill rclone directly — simulates an unexpected crash, removing the RC API +pkill -f "rclone mount.*$TEST_MOUNT" + +# Brief pause for the supervisor to detect rclone exit +sleep 1 + +# Send SIGTERM to trigger graceful shutdown with drain. +# The drain function should detect that the RC API is gone and return +# immediately rather than hanging for the drain timeout. +if kill -0 "$WARPGATE_PID" 2>/dev/null; then + kill -TERM "$WARPGATE_PID" +fi + +# Warpgate should exit quickly since drain cannot poll vfs/stats +wait_for_exit "$WARPGATE_PID" 30 + +# Verify warpgate noticed the unexpected rclone exit +assert_log_contains "rclone mount exited unexpectedly" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/06-writeback/test-drain-timeout.sh b/tests/06-writeback/test-drain-timeout.sh new file mode 100755 index 0000000..465ec68 --- /dev/null +++ b/tests/06-writeback/test-drain-timeout.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +# Test: write-back drain times out when transfers cannot complete. +# Extreme network latency prevents the drain from finishing within the +# 300s timeout, so warpgate should log a timeout and exit anyway. +# This is a slow test (>300s) — requires WARPGATE_TEST_LONG=1. + +require_root +require_long_tests +setup_test_env +trap teardown_test_env EXIT + +start_mock_nas + +gen_config write_back=5s + +start_warpgate +wait_for_mount +wait_for_rc_api + +# Write a file so there is something to drain +echo "timeout-test-data" > "$TEST_MOUNT/timeout.txt" + +# Allow VFS to register the write +sleep 1 + +# Inject extreme latency — 10s per packet makes transfer effectively impossible +inject_latency 10000 + +# Send SIGTERM to initiate shutdown + drain +kill -TERM "$WARPGATE_PID" + +# The drain timeout is 300s; allow additional margin for shutdown +wait_for_exit "$WARPGATE_PID" 320 + +# Verify the drain timed out +assert_log_contains "write-back drain timed out" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/06-writeback/test-offline-write-restore.sh b/tests/06-writeback/test-offline-write-restore.sh new file mode 100755 index 0000000..d206bd9 --- /dev/null +++ b/tests/06-writeback/test-offline-write-restore.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +# Test: files written while the network is down are uploaded once restored. +# Dirty files accumulate during the outage and drain automatically when +# connectivity returns. + +require_root +setup_test_env +trap teardown_test_env EXIT + +start_mock_nas + +gen_config write_back=2s + +start_warpgate +wait_for_mount +wait_for_rc_api + +# Sever the network — uploads will fail +inject_network_down + +# Write 5 files through the mount while offline +for i in $(seq 1 5); do + echo "offline-data-$i" > "$TEST_MOUNT/offline-$i.txt" +done + +# Allow VFS to register all writes +sleep 2 + +# All 5 files should be dirty (cannot upload) +dirty=$(get_dirty_count) +if [[ "$dirty" -lt 5 ]]; then + echo "FAIL: expected dirty count >= 5, got $dirty" >&2 + exit 1 +fi + +# Restore the network — rclone should resume uploads +inject_network_up + +# Wait for all dirty files to drain +wait_for_dirty_zero 120 + +# Verify all 5 files arrived on the NAS with correct content +for i in $(seq 1 5); do + assert_file_content "$NAS_ROOT/offline-$i.txt" "offline-data-$i" +done + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/06-writeback/test-shutdown-drain.sh b/tests/06-writeback/test-shutdown-drain.sh new file mode 100755 index 0000000..d599e8e --- /dev/null +++ b/tests/06-writeback/test-shutdown-drain.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +# Test: graceful shutdown drains the write-back queue before exiting. +# A file written through the FUSE mount with a long write-back delay should +# still arrive on the NAS after SIGTERM triggers the drain procedure. + +require_root +setup_test_env +trap teardown_test_env EXIT + +start_mock_nas + +# Long write-back delay ensures the file stays dirty until drain +gen_config write_back=30s + +start_warpgate +wait_for_mount +wait_for_rc_api + +# Write a file through the FUSE mount +echo "drain-test-data" > "$TEST_MOUNT/drain.txt" + +# Allow VFS to register the write +sleep 1 + +# Verify the file is dirty (not yet written back) +dirty=$(get_dirty_count) +if [[ "$dirty" -lt 1 ]]; then + echo "FAIL: expected dirty count > 0, got $dirty" >&2 + exit 1 +fi + +# Send SIGTERM to trigger orderly shutdown with write-back drain +kill -TERM "$WARPGATE_PID" + +# Wait for exit — drain must flush the file before unmounting +wait_for_exit "$WARPGATE_PID" 60 + +# Verify the drain procedure was logged +assert_log_contains "Waiting for write-back queue" + +# Verify the file arrived on the NAS with correct content +assert_file_content "$NAS_ROOT/drain.txt" "drain-test-data" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/06-writeback/test-writeback-delay-zero.sh b/tests/06-writeback/test-writeback-delay-zero.sh new file mode 100755 index 0000000..2bb8714 --- /dev/null +++ b/tests/06-writeback/test-writeback-delay-zero.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +# Test: write_back=0s causes immediate upload — no dirty files linger. +# With zero write-back delay, rclone should upload files as soon as they +# are closed, so the dirty count should reach 0 quickly. + +require_root +setup_test_env +trap teardown_test_env EXIT + +start_mock_nas + +# Zero write-back delay — files are uploaded immediately +gen_config write_back=0s + +start_warpgate +wait_for_mount +wait_for_rc_api + +# Write a file through the FUSE mount +echo "instant-wb" > "$TEST_MOUNT/instant.txt" + +# Give time for the immediate write-back to complete +sleep 5 + +# Dirty count should be 0 — the file was already uploaded +dirty=$(get_dirty_count) +if [[ "$dirty" -ne 0 ]]; then + echo "FAIL: expected dirty count 0 with write_back=0s, got $dirty" >&2 + exit 1 +fi + +# Verify the file arrived on the NAS with correct content +assert_file_content "$NAS_ROOT/instant.txt" "instant-wb" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/07-network/test-high-latency.sh b/tests/07-network/test-high-latency.sh new file mode 100755 index 0000000..258c89d --- /dev/null +++ b/tests/07-network/test-high-latency.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +# Test: high latency slows first read; cached second read is much faster +# +# Injects 500 ms of latency via tc netem, reads a 100 KB file (cold cache), +# then removes the latency and reads the same file again (warm cache). +# The second read should be significantly faster because it hits the VFS +# cache and skips the network round-trip. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# --- Arrange --- +start_mock_nas +nas_create_file "file.dat" 100 # 100 KB + +gen_config + +start_warpgate +wait_for_mount +wait_for_rc_api + +# --- Act --- +# Inject 500 ms latency on the host-side veth +inject_latency 500 + +# Time the first read (cold cache, over high-latency link) +time1_start=$(date +%s%N) +cat "$TEST_MOUNT/file.dat" > /dev/null +time1_end=$(date +%s%N) + +first_read_ns=$(( time1_end - time1_start )) + +# Remove latency injection +clear_network_injection + +# Time the second read (should come from local VFS cache) +time2_start=$(date +%s%N) +cat "$TEST_MOUNT/file.dat" > /dev/null +time2_end=$(date +%s%N) + +second_read_ns=$(( time2_end - time2_start )) + +# --- Assert --- +first_read_ms=$(( first_read_ns / 1000000 )) +second_read_ms=$(( second_read_ns / 1000000 )) + +echo " first read: ${first_read_ms} ms (cold cache, 500 ms netem delay)" +echo " second read: ${second_read_ms} ms (warm cache, no injection)" + +# The cached read should be at least 5x faster than the latency-impacted read. +# With 500 ms netem the first read is typically > 500 ms; a cached read is < 50 ms. +if [[ "$first_read_ms" -gt 0 ]] && [[ $(( second_read_ms * 5 )) -lt "$first_read_ms" ]]; then + echo " cache speedup confirmed (>5x)" +else + echo "FAIL: cached read was not significantly faster than the high-latency read" >&2 + echo " first_read_ms=$first_read_ms second_read_ms=$second_read_ms" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/07-network/test-network-jitter.sh b/tests/07-network/test-network-jitter.sh new file mode 100755 index 0000000..a077814 --- /dev/null +++ b/tests/07-network/test-network-jitter.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash +# Test: warpgate survives 60 s of network jitter without crashing +# +# Runs a stress loop that writes a small file every 2 seconds while +# toggling the network up/down every 5 seconds. After the loop, restores +# the network and waits for the write-back queue to drain. Verifies that +# warpgate is still running and that all written files eventually reach the +# NAS. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +require_long_tests # ~60 s runtime +setup_test_env +trap teardown_test_env EXIT + +# --- Arrange --- +start_mock_nas + +gen_config write_back=2s + +start_warpgate +wait_for_mount +wait_for_rc_api + +# --- Act: stress loop --- +DURATION=60 +start_time=$SECONDS +file_index=0 + +# Background writer: drop a file every 2 s +( + while [[ $(( SECONDS - start_time )) -lt $DURATION ]]; do + fname="jitter-${file_index}.txt" + echo "jitter-data-${file_index}" > "$TEST_MOUNT/$fname" 2>/dev/null || true + file_index=$(( file_index + 1 )) + sleep 2 + done + # Signal completion + echo "$file_index" > "$TEST_DIR/writer_count" +) & +writer_pid=$! +_BG_PIDS+=("$writer_pid") + +# Background toggler: flip network every 5 s +( + net_up=true + while [[ $(( SECONDS - start_time )) -lt $DURATION ]]; do + sleep 5 + if $net_up; then + inject_network_down 2>/dev/null || true + net_up=false + else + inject_network_up 2>/dev/null || true + net_up=true + fi + done +) & +toggler_pid=$! +_BG_PIDS+=("$toggler_pid") + +# Wait for both loops to finish +wait "$writer_pid" 2>/dev/null || true +wait "$toggler_pid" 2>/dev/null || true + +# --- Restore & drain --- +inject_network_up +sleep 2 + +# Read the count of files written +total_files=0 +if [[ -f "$TEST_DIR/writer_count" ]]; then + total_files=$(cat "$TEST_DIR/writer_count") +fi + +echo " files written during jitter loop: $total_files" + +# Wait for all dirty files to be written back +wait_for_dirty_zero 120 + +# --- Assert --- +# 1. Warpgate must still be running (no crash) +if ! is_warpgate_running; then + echo "FAIL: warpgate crashed during network jitter stress test" >&2 + warpgate_log >&2 + exit 1 +fi + +# 2. Every file created by the writer must have arrived on the NAS +missing=0 +for (( i = 0; i < total_files; i++ )); do + fname="jitter-${i}.txt" + if ! nas_file_exists "$fname"; then + echo " MISSING on NAS: $fname" >&2 + missing=$(( missing + 1 )) + fi +done + +if [[ "$missing" -gt 0 ]]; then + echo "FAIL: $missing of $total_files files did not reach the NAS" >&2 + exit 1 +fi + +echo " all $total_files files verified on NAS" +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/07-network/test-offline-cached-reads.sh b/tests/07-network/test-offline-cached-reads.sh new file mode 100755 index 0000000..280570b --- /dev/null +++ b/tests/07-network/test-offline-cached-reads.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +# Test: all cached files remain readable while fully offline +# +# Creates 3 test files (100 KB each) on the NAS, reads them through the +# FUSE mount to warm the cache, severs the network, and reads all three +# again. Every read should succeed from the local VFS cache and the +# content should match the originals. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# --- Arrange --- +start_mock_nas +nas_create_file "alpha.dat" 100 +nas_create_file "bravo.dat" 100 +nas_create_file "charlie.dat" 100 + +# Record NAS-side checksums +cksum_alpha=$(nas_file_checksum "alpha.dat") +cksum_bravo=$(nas_file_checksum "bravo.dat") +cksum_charlie=$(nas_file_checksum "charlie.dat") + +gen_config + +start_warpgate +wait_for_mount +wait_for_rc_api + +# --- Warm the cache by reading each file --- +cat "$TEST_MOUNT/alpha.dat" > /dev/null +cat "$TEST_MOUNT/bravo.dat" > /dev/null +cat "$TEST_MOUNT/charlie.dat" > /dev/null + +assert_cached "alpha.dat" +assert_cached "bravo.dat" +assert_cached "charlie.dat" + +# --- Act: go offline --- +inject_network_down + +# Read all three files again from cache +actual_alpha=$(md5sum "$TEST_MOUNT/alpha.dat" | awk '{print $1}') +actual_bravo=$(md5sum "$TEST_MOUNT/bravo.dat" | awk '{print $1}') +actual_charlie=$(md5sum "$TEST_MOUNT/charlie.dat" | awk '{print $1}') + +# --- Assert --- +fail=0 + +if [[ "$actual_alpha" != "$cksum_alpha" ]]; then + echo "FAIL: alpha.dat checksum mismatch offline" >&2 + fail=1 +fi +if [[ "$actual_bravo" != "$cksum_bravo" ]]; then + echo "FAIL: bravo.dat checksum mismatch offline" >&2 + fail=1 +fi +if [[ "$actual_charlie" != "$cksum_charlie" ]]; then + echo "FAIL: charlie.dat checksum mismatch offline" >&2 + fail=1 +fi + +# Restore network for clean teardown +inject_network_up + +if [[ "$fail" -ne 0 ]]; then + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/07-network/test-packet-loss.sh b/tests/07-network/test-packet-loss.sh new file mode 100755 index 0000000..00c7cec --- /dev/null +++ b/tests/07-network/test-packet-loss.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# Test: 10 % packet loss does not corrupt transferred data +# +# Injects 10 % packet loss via tc netem, reads a 500 KB file through the +# FUSE mount, and verifies the content matches the original on the NAS. +# rclone's SFTP retries and TCP retransmissions should compensate for the +# loss so the application sees correct data. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# --- Arrange --- +start_mock_nas +nas_create_file "data.dat" 500 # 500 KB + +# Capture the NAS-side checksum before any network impairment +expected_cksum=$(nas_file_checksum "data.dat") + +gen_config + +start_warpgate +wait_for_mount +wait_for_rc_api + +# --- Act --- +# Inject 10 % packet loss +inject_packet_loss 10 + +# Read the file through the FUSE mount +cat "$TEST_MOUNT/data.dat" > /dev/null + +# --- Assert --- +# Verify the content read through the mount matches the NAS original +actual_cksum=$(md5sum "$TEST_MOUNT/data.dat" | awk '{print $1}') + +if [[ "$actual_cksum" != "$expected_cksum" ]]; then + echo "FAIL: checksum mismatch under 10% packet loss" >&2 + echo " expected: $expected_cksum" >&2 + echo " actual: $actual_cksum" >&2 + clear_network_injection + exit 1 +fi + +# Clean up tc rules +clear_network_injection + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/07-network/test-read-network-cut.sh b/tests/07-network/test-read-network-cut.sh new file mode 100755 index 0000000..b8855a3 --- /dev/null +++ b/tests/07-network/test-read-network-cut.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# Test: read I/O fails when network is severed mid-transfer +# +# Creates a 5 MB file on the mock NAS, starts reading it through the FUSE +# mount, then immediately cuts the network. The background read should fail +# with an I/O error or produce incomplete output because the remote is +# unreachable mid-stream. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# --- Arrange --- +start_mock_nas +nas_create_file "bigfile.dat" 51200 # 50 MB — large enough that caching + # takes time, ensuring network cut + # hits mid-transfer + +gen_config + +start_warpgate +wait_for_mount +wait_for_rc_api + +# --- Act --- +# Start a background read of the large file +cat "$TEST_MOUNT/bigfile.dat" > /dev/null 2>&1 & +read_pid=$! +_BG_PIDS+=("$read_pid") + +# Brief delay to let the read begin but NOT complete for a 50 MB file +sleep 0.5 + +# Sever the network link while the read is in progress +inject_network_down + +# Wait for the background read to finish (it should error out) +read_ok=0 +if wait "$read_pid" 2>/dev/null; then + read_ok=1 +fi + +# --- Assert --- +# With a 50 MB file and the network cut after 0.5s, the read must fail. +# The file cannot have been fully cached in that time. +if [[ "$read_ok" -eq 1 ]]; then + echo "FAIL: background read of 50 MB file succeeded despite network cut" >&2 + echo " The file should not have been fully cached in 0.5s." >&2 + inject_network_up + exit 1 +fi + +# Restore network for clean teardown +inject_network_up + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/07-network/test-writeback-network-cut-resume.sh b/tests/07-network/test-writeback-network-cut-resume.sh new file mode 100755 index 0000000..665f1a1 --- /dev/null +++ b/tests/07-network/test-writeback-network-cut-resume.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# Test: write-back survives a network outage and resumes on reconnect +# +# Writes a file through the FUSE mount, cuts the network before write-back +# can complete, waits, restores the network, and verifies that the file +# eventually reaches the NAS with the correct content. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# --- Arrange --- +start_mock_nas + +# Short write-back so the upload attempt happens quickly +gen_config write_back=2s + +start_warpgate +wait_for_mount +wait_for_rc_api + +# --- Act --- +# Write a file through the FUSE mount +echo "resilient-data" > "$TEST_MOUNT/resilient.txt" + +# Sever the network before write-back succeeds +inject_network_down + +# Wait long enough for at least one write-back attempt to fail +sleep 5 + +# Verify the file is still dirty (upload could not complete) +dirty=$(get_dirty_count) +if [[ "$dirty" -lt 1 ]]; then + echo "FAIL: expected dirty count > 0 while network is down, got $dirty" >&2 + inject_network_up + exit 1 +fi + +# Restore the network +inject_network_up + +# --- Assert --- +# Wait for the write-back queue to drain (rclone retries should succeed now) +wait_for_dirty_zero 120 + +# Verify the file arrived on the NAS +assert_file_exists "$NAS_ROOT/resilient.txt" + +# Verify the content matches what we wrote +actual=$(cat "$NAS_ROOT/resilient.txt") +if [[ "$actual" != "resilient-data" ]]; then + echo "FAIL: NAS file content mismatch: expected 'resilient-data', got '$actual'" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/08-crash-recovery/test-cache-corruption.sh b/tests/08-crash-recovery/test-cache-corruption.sh new file mode 100755 index 0000000..63c7c08 --- /dev/null +++ b/tests/08-crash-recovery/test-cache-corruption.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash +# Test: behavior when a cached file is corrupted on disk +# +# Verifies what happens when a file in the VFS cache is corrupted between +# warpgate restarts. rclone may serve the corrupted data from cache or +# re-fetch from the remote — this test documents the observed behavior. +# +# Sequence: +# 1. Create a test file on the NAS with known content. +# 2. Start warpgate, read file through mount to cache it. +# 3. Stop warpgate gracefully. +# 4. Overwrite the cached file with garbage data. +# 5. Restart warpgate with a short dir_cache_time (1s). +# 6. Wait for dir cache to expire, then read through mount. +# 7. Compare result against expected content and document behavior. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS and create a test file +start_mock_nas +nas_create_file_content "corrupt-test.txt" "original" + +# Generate config with short dir_cache_time so rclone rechecks the remote +gen_config dir_cache_time=1s + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Read file through the FUSE mount to pull it into cache +content_before=$(cat "$TEST_MOUNT/corrupt-test.txt") +if [[ "$content_before" != "original" ]]; then + echo "FAIL: initial read returned unexpected content: $content_before" >&2 + exit 1 +fi +assert_cached "corrupt-test.txt" +echo "INFO: file cached successfully, content: '$content_before'" + +# Stop warpgate gracefully +stop_warpgate + +# Corrupt the cached file by overwriting with garbage +cache_file="$CACHE_DIR/vfs/nas/corrupt-test.txt" +if [[ ! -f "$cache_file" ]]; then + echo "FAIL: cache file not found at $cache_file" >&2 + exit 1 +fi +echo -n "CORRUPTED-GARBAGE-DATA" > "$cache_file" +echo "INFO: corrupted cache file at $cache_file" + +# Restart warpgate +start_warpgate +wait_for_mount 60 +wait_for_rc_api + +# Wait for the dir cache to expire (1s dir_cache_time + margin) +sleep 3 + +# Read the file through the mount again +content_after=$(cat "$TEST_MOUNT/corrupt-test.txt") + +echo "INFO: content after restart with corrupted cache: '$content_after'" + +# The corrupted cache file has a different size than the NAS original +# ("CORRUPTED-GARBAGE-DATA" = 22 bytes vs "original" = 8 bytes). +# rclone VFS should detect the size mismatch and re-fetch from the remote. +# We MUST verify the file matches the NAS original — serving corrupted data +# is a data integrity failure. +nas_original=$(nas_read_file "corrupt-test.txt") +if [[ "$content_after" != "$nas_original" ]]; then + echo "FAIL: content after restart does not match NAS original" >&2 + echo " NAS original: '$nas_original'" >&2 + echo " mount returned: '$content_after'" >&2 + exit 1 +fi + +echo "INFO: rclone correctly re-fetched file from remote (corruption detected)" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/08-crash-recovery/test-oom-kill-rclone.sh b/tests/08-crash-recovery/test-oom-kill-rclone.sh new file mode 100755 index 0000000..c019ca5 --- /dev/null +++ b/tests/08-crash-recovery/test-oom-kill-rclone.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash +# Test: SIGKILL to rclone triggers full warpgate shutdown +# +# Simulates an OOM kill of the rclone mount process. The supervisor should +# detect rclone's unexpected exit, log "rclone mount exited unexpectedly", +# and perform a full shutdown of warpgate (non-zero exit code). +# +# This is distinct from 04-supervision/test-rclone-death-shutdown.sh which +# uses a normal kill; here we use SIGKILL to simulate the kernel OOM killer. +# +# Sequence: +# 1. Start warpgate, wait for supervision to become active. +# 2. Find the rclone mount PID. +# 3. Send SIGKILL to rclone (simulating OOM kill). +# 4. Verify warpgate exits within 30s. +# 5. Verify the log contains "rclone mount exited unexpectedly". +# 6. Verify warpgate exited with non-zero code. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS +start_mock_nas + +# Generate a default config +gen_config + +# Start warpgate and wait for the supervisor to be active +start_warpgate +wait_for_log_line "Supervision active" 60 + +# Find the rclone mount process +rclone_pid=$(pgrep -f "rclone mount.*$TEST_MOUNT") +if [[ -z "$rclone_pid" ]]; then + echo "FAIL: rclone mount process not found" >&2 + exit 1 +fi +echo "INFO: rclone mount PID is $rclone_pid" + +# Save warpgate PID before it exits +local_pid="$WARPGATE_PID" + +# SIGKILL rclone — simulates OOM killer +kill -9 "$rclone_pid" + +# Wait for warpgate to exit (supervisor should detect rclone death) +wait_for_exit "$local_pid" 30 + +# Verify the log contains the expected critical error message +assert_log_contains "rclone mount exited unexpectedly" +echo "INFO: log confirms rclone death was detected" + +# Verify warpgate exited with non-zero code +exit_code=0 +wait "$local_pid" 2>/dev/null || exit_code=$? +if [[ "$exit_code" -eq 0 ]]; then + echo "FAIL: expected non-zero exit code after rclone SIGKILL, got 0" >&2 + exit 1 +fi +echo "INFO: warpgate exited with code $exit_code (non-zero, as expected)" + +# Verify warpgate is no longer running +if kill -0 "$local_pid" 2>/dev/null; then + echo "FAIL: warpgate is still running after rclone SIGKILL" >&2 + exit 1 +fi + +# Clear WARPGATE_PID so teardown does not try to stop a dead process +WARPGATE_PID="" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/08-crash-recovery/test-residual-fuse-mount.sh b/tests/08-crash-recovery/test-residual-fuse-mount.sh new file mode 100755 index 0000000..448f661 --- /dev/null +++ b/tests/08-crash-recovery/test-residual-fuse-mount.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env bash +# Test: stale FUSE mount left after rclone SIGKILL +# +# Verifies behavior when rclone is killed with SIGKILL, leaving a stale +# FUSE mount point registered in /proc/mounts. The supervisor should +# detect rclone death and shut down. On a subsequent start, warpgate +# (or rclone) should either clean up the stale mount via fusermount -uz +# or fail with a clear error message. +# +# Sequence: +# 1. Start warpgate, wait for mount + RC API. +# 2. Kill rclone with SIGKILL (leaves stale FUSE mount). +# 3. Wait for supervisor to detect rclone death and shut down. +# 4. Check if mount point is still registered in /proc/mounts. +# 5. Attempt to start warpgate again. +# 6. Document whether it recovers or fails with a clear error. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS +start_mock_nas + +# Generate a default config +gen_config + +# Start warpgate and wait for full readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Find the rclone mount process +rclone_pid=$(pgrep -f "rclone mount.*$TEST_MOUNT") +if [[ -z "$rclone_pid" ]]; then + echo "FAIL: rclone mount process not found" >&2 + exit 1 +fi +echo "INFO: rclone mount PID is $rclone_pid" + +# Kill rclone with SIGKILL — this leaves the FUSE mount stale +kill -9 "$rclone_pid" + +# Wait for the supervisor to detect rclone death and shut down warpgate +wait_for_exit "$WARPGATE_PID" 30 +echo "INFO: warpgate exited after rclone SIGKILL" + +# Clear WARPGATE_PID since the process is dead +first_pid="$WARPGATE_PID" +WARPGATE_PID="" + +# Check if mount point is still registered in /proc/mounts +stale_mount=0 +if grep -q "$TEST_MOUNT" /proc/mounts 2>/dev/null; then + stale_mount=1 + echo "INFO: stale FUSE mount detected in /proc/mounts" +else + echo "INFO: mount point already cleaned up (not in /proc/mounts)" +fi + +# Do NOT manually run fusermount — we want to see if warpgate handles this + +# Attempt to start a new warpgate instance +echo "INFO: attempting to start warpgate with potential stale mount..." +start_warpgate + +# Give it time to either succeed or fail +sleep 5 + +if kill -0 "$WARPGATE_PID" 2>/dev/null; then + # Process is still running — check if it mounted successfully + if wait_for_mount 30 2>/dev/null; then + echo "INFO: warpgate recovered — stale mount was cleaned up automatically" + echo "INFO: mount is active and working" + + # Verify the mount is functional by testing file access + if ls "$TEST_MOUNT" > /dev/null 2>&1; then + echo "INFO: mount is functional (ls succeeds)" + fi + else + echo "INFO: warpgate is running but mount did not become ready" + echo "INFO: checking logs for details..." + # Check logs for error messages about the stale mount + if grep -q "fusermount" "$TEST_DIR/warpgate.log" 2>/dev/null; then + echo "INFO: log mentions fusermount cleanup attempt" + fi + if grep -q "mount.*busy\|already mounted\|Transport endpoint" "$TEST_DIR/warpgate.log" 2>/dev/null; then + echo "INFO: log shows stale mount interference" + fi + fi +else + # Process exited — check why + wait "$WARPGATE_PID" 2>/dev/null + exit_code=$? + WARPGATE_PID="" + echo "INFO: warpgate exited with code $exit_code" + + if grep -q "fusermount" "$TEST_DIR/warpgate.log" 2>/dev/null; then + echo "INFO: log mentions fusermount (attempted cleanup)" + fi + if grep -q "already mounted\|Transport endpoint\|mount point.*busy" "$TEST_DIR/warpgate.log" 2>/dev/null; then + echo "INFO: warpgate detected stale mount and reported clear error" + fi +fi + +# Final cleanup: ensure we remove any lingering stale mount +if grep -q "$TEST_MOUNT" /proc/mounts 2>/dev/null; then + echo "INFO: cleaning up residual stale mount via fusermount -uz" + fusermount3 -uz "$TEST_MOUNT" 2>/dev/null || fusermount -uz "$TEST_MOUNT" 2>/dev/null || true +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/08-crash-recovery/test-sigkill-cache-integrity.sh b/tests/08-crash-recovery/test-sigkill-cache-integrity.sh new file mode 100755 index 0000000..fbba5a2 --- /dev/null +++ b/tests/08-crash-recovery/test-sigkill-cache-integrity.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash +# Test: cached files survive SIGKILL and remain readable after restart +# +# Verifies that read-through cached files persist on disk through a +# simulated power loss (SIGKILL). After restarting warpgate, the files +# should be readable through the mount with content matching the originals +# on the NAS. +# +# Sequence: +# 1. Create 3 test files (100 KB each) on the mock NAS. +# 2. Start warpgate, read all files through the mount to cache them. +# 3. Verify all files are cached. +# 4. simulate_power_loss. +# 5. Start a fresh warpgate instance. +# 6. Read the cached files again through the mount. +# 7. Verify content matches the NAS originals. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS and create 3 test files +start_mock_nas +nas_create_file "photo-001.dat" 100 +nas_create_file "photo-002.dat" 100 +nas_create_file "photo-003.dat" 100 + +# Record checksums of the original NAS files +cksum_001=$(nas_file_checksum "photo-001.dat") +cksum_002=$(nas_file_checksum "photo-002.dat") +cksum_003=$(nas_file_checksum "photo-003.dat") + +# Generate default config +gen_config + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Read all files through the FUSE mount to pull them into cache +cat "$TEST_MOUNT/photo-001.dat" > /dev/null +cat "$TEST_MOUNT/photo-002.dat" > /dev/null +cat "$TEST_MOUNT/photo-003.dat" > /dev/null + +# Verify all files are cached +assert_cached "photo-001.dat" +assert_cached "photo-002.dat" +assert_cached "photo-003.dat" +echo "INFO: all 3 files cached successfully" + +# Simulate power loss +simulate_power_loss + +# Verify cache files persist on disk +for f in photo-001.dat photo-002.dat photo-003.dat; do + if [[ ! -f "$CACHE_DIR/vfs/nas/$f" ]]; then + echo "FAIL: cache file missing after power loss: $f" >&2 + exit 1 + fi +done +echo "INFO: all cache files persist on disk after SIGKILL" + +# Clean up any stale FUSE mount +if mountpoint -q "$TEST_MOUNT" 2>/dev/null; then + fusermount3 -uz "$TEST_MOUNT" 2>/dev/null || fusermount -uz "$TEST_MOUNT" 2>/dev/null || true +fi + +# Start a fresh warpgate instance +start_warpgate +wait_for_mount 60 +wait_for_rc_api + +# Read the files again through the mount and verify checksums +actual_001=$(md5sum "$TEST_MOUNT/photo-001.dat" | awk '{print $1}') +actual_002=$(md5sum "$TEST_MOUNT/photo-002.dat" | awk '{print $1}') +actual_003=$(md5sum "$TEST_MOUNT/photo-003.dat" | awk '{print $1}') + +fail=0 +if [[ "$actual_001" != "$cksum_001" ]]; then + echo "FAIL: photo-001.dat checksum mismatch after restart" >&2 + echo " expected: $cksum_001 actual: $actual_001" >&2 + fail=1 +fi +if [[ "$actual_002" != "$cksum_002" ]]; then + echo "FAIL: photo-002.dat checksum mismatch after restart" >&2 + echo " expected: $cksum_002 actual: $actual_002" >&2 + fail=1 +fi +if [[ "$actual_003" != "$cksum_003" ]]; then + echo "FAIL: photo-003.dat checksum mismatch after restart" >&2 + echo " expected: $cksum_003 actual: $actual_003" >&2 + fail=1 +fi + +if [[ "$fail" -ne 0 ]]; then + exit 1 +fi + +echo "INFO: all 3 files readable with correct content after crash + restart" +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/08-crash-recovery/test-sigkill-dirty-recovery.sh b/tests/08-crash-recovery/test-sigkill-dirty-recovery.sh new file mode 100755 index 0000000..10bf311 --- /dev/null +++ b/tests/08-crash-recovery/test-sigkill-dirty-recovery.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +# Test: dirty files survive SIGKILL and are re-uploaded on restart +# +# Verifies that after a simulated power loss (SIGKILL all warpgate processes), +# dirty files that were pending write-back persist in the rclone VFS cache on +# disk. When warpgate is restarted, rclone re-reads the VFS cache directory, +# discovers the pending uploads, and flushes them to the remote NAS. +# +# Sequence: +# 1. Start warpgate with a long write-back delay (60s) so writes stay dirty. +# 2. Block the network so write-back cannot happen even accidentally. +# 3. Write a file through the FUSE mount. +# 4. Verify the file is counted as dirty. +# 5. simulate_power_loss (kill -9 everything + sync). +# 6. Restore the network. +# 7. Start a fresh warpgate instance. +# 8. Wait for dirty count to reach zero (file re-uploaded). +# 9. Verify the file exists on the NAS with correct content. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS +start_mock_nas + +# Generate config with a very long write-back delay so files stay dirty +gen_config write_back=60s + +# Start warpgate and wait for full readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Block the network to prevent any write-back from occurring +inject_network_down + +# Write a file through the FUSE mount — it will be cached locally +echo "crash-recovery-data" > "$TEST_MOUNT/crash.txt" + +# Allow time for VFS to register the dirty file +sleep 2 + +# Verify the file is counted as dirty (pending upload) +dirty=$(get_dirty_count) +if [[ "$dirty" -lt 1 ]]; then + echo "FAIL: expected dirty count > 0 before power loss, got $dirty" >&2 + inject_network_up + exit 1 +fi +echo "INFO: dirty count before power loss: $dirty" + +# Simulate power loss — SIGKILL all warpgate processes + sync +simulate_power_loss + +# Verify the cache file persists on disk after the crash +if [[ ! -f "$CACHE_DIR/vfs/nas/crash.txt" ]]; then + echo "FAIL: cache file missing after power loss: $CACHE_DIR/vfs/nas/crash.txt" >&2 + inject_network_up + exit 1 +fi +echo "INFO: cache file persists on disk after SIGKILL" + +# Restore the network so write-back can proceed on restart +inject_network_up +sleep 2 + +# Clean up any stale FUSE mount left behind +if mountpoint -q "$TEST_MOUNT" 2>/dev/null; then + fusermount3 -uz "$TEST_MOUNT" 2>/dev/null || fusermount -uz "$TEST_MOUNT" 2>/dev/null || true +fi + +# Start a fresh warpgate instance +start_warpgate +wait_for_mount 60 +wait_for_rc_api 30 + +# Wait for the dirty file to be re-uploaded (rclone finds it in cache) +wait_for_dirty_zero 120 + +# Verify the file now exists on the NAS +if ! nas_file_exists "crash.txt"; then + echo "FAIL: crash.txt not found on NAS after recovery" >&2 + exit 1 +fi + +# Verify the content matches what we originally wrote +actual=$(nas_read_file "crash.txt") +if [[ "$actual" != "crash-recovery-data" ]]; then + echo "FAIL: NAS file content mismatch after recovery" >&2 + echo " expected: crash-recovery-data" >&2 + echo " actual: $actual" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/08-crash-recovery/test-write-interrupt-recovery.sh b/tests/08-crash-recovery/test-write-interrupt-recovery.sh new file mode 100755 index 0000000..3632afc --- /dev/null +++ b/tests/08-crash-recovery/test-write-interrupt-recovery.sh @@ -0,0 +1,186 @@ +#!/usr/bin/env bash +# Test: large write interrupted by power loss — recovery behavior +# +# Verifies what happens when a large file write (5 MB) is interrupted +# mid-way by a simulated power loss (SIGKILL). After restarting warpgate, +# documents whether the file is partially present, fully recovered, or +# missing on the NAS. +# +# With rclone VFS write-back=2s, the VFS may have begun uploading or may +# have the file cached locally waiting for write-back. After power loss, +# the partial/complete file should persist in the VFS cache and be +# re-uploaded on restart. +# +# Sequence: +# 1. Start warpgate with write_back=2s. +# 2. Begin writing a 5 MB file in the background. +# 3. Sleep 1s (let the write start but possibly not complete). +# 4. simulate_power_loss. +# 5. Start a fresh warpgate instance. +# 6. Wait for dirty count to reach zero. +# 7. Document what happened to the file on the NAS. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS +start_mock_nas + +# Generate config with a short write-back delay +gen_config write_back=2s + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount +wait_for_rc_api + +# Start writing a 5 MB file in the background +dd if=/dev/urandom of="$TEST_MOUNT/bigwrite.dat" bs=1M count=5 2>/dev/null & +dd_pid=$! +_BG_PIDS+=("$dd_pid") +echo "INFO: started 5 MB write in background (PID $dd_pid)" + +# Let the write proceed for a moment +sleep 1 + +# Check if there's anything in the cache yet +cache_file="$CACHE_DIR/vfs/nas/bigwrite.dat" +if [[ -f "$cache_file" ]]; then + cache_size=$(stat -c%s "$cache_file" 2>/dev/null || stat -f%z "$cache_file" 2>/dev/null || echo 0) + echo "INFO: cache file exists before power loss, size: $cache_size bytes" +else + echo "INFO: cache file not yet created at time of power loss" +fi + +# Simulate power loss — kills everything including the dd +simulate_power_loss + +# The dd process is now dead too +# Check what survived in the cache +if [[ -f "$cache_file" ]]; then + cache_size_after=$(stat -c%s "$cache_file" 2>/dev/null || stat -f%z "$cache_file" 2>/dev/null || echo 0) + echo "INFO: cache file persists after power loss, size: $cache_size_after bytes" +else + echo "INFO: no cache file found after power loss" +fi + +# Clean up any stale FUSE mount +if mountpoint -q "$TEST_MOUNT" 2>/dev/null; then + fusermount3 -uz "$TEST_MOUNT" 2>/dev/null || fusermount -uz "$TEST_MOUNT" 2>/dev/null || true +fi + +# Start a fresh warpgate instance +start_warpgate +wait_for_mount 60 +wait_for_rc_api 30 + +# Wait for any dirty files to be flushed +wait_for_dirty_zero 120 + +# Document what happened to the file +echo "INFO: --- Recovery results ---" + +if nas_file_exists "bigwrite.dat"; then + nas_size=$(stat -c%s "$NAS_ROOT/bigwrite.dat" 2>/dev/null || stat -f%z "$NAS_ROOT/bigwrite.dat" 2>/dev/null || echo 0) + expected_size=$((5 * 1024 * 1024)) + + echo "INFO: bigwrite.dat exists on NAS, size: $nas_size bytes" + + if [[ "$nas_size" -eq "$expected_size" ]]; then + echo "INFO: file is complete (5 MB) — write finished before power loss" + elif [[ "$nas_size" -gt 0 ]]; then + echo "INFO: file is partial ($nas_size / $expected_size bytes)" + echo "INFO: this is expected — write was interrupted mid-stream" + fi +else + echo "INFO: bigwrite.dat NOT found on NAS" + echo "INFO: the write may not have committed to cache before power loss" +fi + +# Also check if the file is visible through the mount +if [[ -f "$TEST_MOUNT/bigwrite.dat" ]]; then + mount_size=$(stat -c%s "$TEST_MOUNT/bigwrite.dat" 2>/dev/null || stat -f%z "$TEST_MOUNT/bigwrite.dat" 2>/dev/null || echo 0) + echo "INFO: bigwrite.dat visible through mount, size: $mount_size bytes" +else + echo "INFO: bigwrite.dat not visible through mount" +fi + +# Stop the current warpgate instance before the optional btrfs test +stop_warpgate + +# --- Optional btrfs test path --- +# If WARPGATE_TEST_BTRFS is set to a block device, run the same test on a +# btrfs-formatted cache filesystem and compare results vs ext4 above. +if [[ -n "${WARPGATE_TEST_BTRFS:-}" ]]; then + require_command mkfs.btrfs + + echo "INFO: --- btrfs test path (device: $WARPGATE_TEST_BTRFS) ---" + + # Format the device as btrfs + mkfs.btrfs -f "$WARPGATE_TEST_BTRFS" > /dev/null 2>&1 + + # Create a btrfs mount point and mount + btrfs_cache="$TEST_DIR/btrfs-cache" + mkdir -p "$btrfs_cache" + mount "$WARPGATE_TEST_BTRFS" "$btrfs_cache" + + # Re-generate config with the btrfs cache dir + gen_config write_back=2s cache_dir="$btrfs_cache" + + # Start warpgate on btrfs cache + start_warpgate + wait_for_mount 60 + wait_for_rc_api 30 + + # Write a 5 MB file in the background + dd if=/dev/urandom of="$TEST_MOUNT/bigwrite-btrfs.dat" bs=1M count=5 2>/dev/null & + btrfs_dd_pid=$! + _BG_PIDS+=("$btrfs_dd_pid") + + sleep 1 + + # Simulate power loss + simulate_power_loss + + # Check what survived in the btrfs cache + btrfs_cache_file="$btrfs_cache/vfs/nas/bigwrite-btrfs.dat" + if [[ -f "$btrfs_cache_file" ]]; then + btrfs_size=$(stat -c%s "$btrfs_cache_file" 2>/dev/null || stat -f%z "$btrfs_cache_file" 2>/dev/null || echo 0) + echo "INFO: btrfs cache file persists after power loss, size: $btrfs_size bytes" + else + echo "INFO: no btrfs cache file found after power loss" + fi + + # Clean up stale FUSE mount + if mountpoint -q "$TEST_MOUNT" 2>/dev/null; then + fusermount3 -uz "$TEST_MOUNT" 2>/dev/null || fusermount -uz "$TEST_MOUNT" 2>/dev/null || true + fi + + # Restart warpgate on btrfs cache and wait for recovery + start_warpgate + wait_for_mount 60 + wait_for_rc_api 30 + wait_for_dirty_zero 120 + + # Document btrfs recovery result + if nas_file_exists "bigwrite-btrfs.dat"; then + btrfs_nas_size=$(stat -c%s "$NAS_ROOT/bigwrite-btrfs.dat" 2>/dev/null || stat -f%z "$NAS_ROOT/bigwrite-btrfs.dat" 2>/dev/null || echo 0) + echo "INFO: btrfs recovery: bigwrite-btrfs.dat on NAS, size: $btrfs_nas_size bytes" + else + echo "INFO: btrfs recovery: bigwrite-btrfs.dat NOT found on NAS" + fi + + stop_warpgate + + # Unmount btrfs + umount "$btrfs_cache" 2>/dev/null || true +else + echo "INFO: skipping btrfs test (set WARPGATE_TEST_BTRFS=/dev/sdX to enable)" +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/09-cli/test-bwlimit-query.sh b/tests/09-cli/test-bwlimit-query.sh new file mode 100755 index 0000000..b1547cc --- /dev/null +++ b/tests/09-cli/test-bwlimit-query.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# Test: `warpgate bwlimit` (no args) shows current bandwidth limits +# +# Verifies that querying bandwidth limits reports the current upload and +# download limits via the rclone RC API. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS +start_mock_nas + +# Generate config +gen_config + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount 60 +wait_for_rc_api 30 + +# Query current bandwidth limits +output=$(run_warpgate_cmd bwlimit) + +# Verify output contains the expected sections +assert_output_contains "$output" "Current bandwidth limits" +assert_output_contains "$output" "Upload:" +assert_output_contains "$output" "Download:" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/09-cli/test-bwlimit-set.sh b/tests/09-cli/test-bwlimit-set.sh new file mode 100755 index 0000000..698d173 --- /dev/null +++ b/tests/09-cli/test-bwlimit-set.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# Test: `warpgate bwlimit --up 10M --down 50M` sets bandwidth limits +# +# Verifies that setting bandwidth limits prints a confirmation message, +# and that a subsequent query reflects the new limits. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS +start_mock_nas + +# Generate config +gen_config + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount 60 +wait_for_rc_api 30 + +# Set new bandwidth limits +output=$(run_warpgate_cmd bwlimit --up 10M --down 50M) + +# Verify the set command confirms the update +assert_output_contains "$output" "Updated bandwidth limits" + +# Query limits again to verify they are reflected +output2=$(run_warpgate_cmd bwlimit) + +# The query should show the limits we just set (10M up, 50M down) +if echo "$output2" | grep -qi "10M\|10 M\|10240"; then + true +else + echo "FAIL: bwlimit query does not reflect the 10M upload limit" >&2 + echo " output: $output2" >&2 + exit 1 +fi + +if echo "$output2" | grep -qi "50M\|50 M\|51200"; then + true +else + echo "FAIL: bwlimit query does not reflect the 50M download limit" >&2 + echo " output: $output2" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/09-cli/test-cache-clean.sh b/tests/09-cli/test-cache-clean.sh new file mode 100755 index 0000000..58e4de2 --- /dev/null +++ b/tests/09-cli/test-cache-clean.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# Test: `warpgate cache-clean --all` clears the VFS directory cache +# +# Verifies that cache-clean calls vfs/forget and prints a confirmation +# message. Exit code must be 0. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS +start_mock_nas + +# Generate config +gen_config + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount 60 +wait_for_rc_api 30 + +# Create and read a file to populate the cache +nas_create_file_content "clean-test.txt" "cache-me" +cat "$TEST_MOUNT/clean-test.txt" > /dev/null + +# Verify file is cached before cleaning +assert_cached "clean-test.txt" + +# Capture pre-clean vfs/stats for comparison +pre_clean_stats=$(rc_api "vfs/stats" 2>/dev/null || echo "{}") + +# Run cache-clean --all +exit_code=0 +output=$(run_warpgate_cmd cache-clean --all 2>&1) || exit_code=$? + +# Verify exit code is 0 +if [[ "$exit_code" -ne 0 ]]; then + echo "FAIL: cache-clean exited with code $exit_code (expected 0)" >&2 + echo " output: $output" >&2 + exit 1 +fi + +# Verify output confirms the cache was cleared +if echo "$output" | grep -qi "VFS directory cache cleared\|Clearing"; then + true +else + echo "FAIL: cache-clean output missing confirmation message" >&2 + echo " output: $output" >&2 + exit 1 +fi + +# Verify cache was actually cleared by checking vfs/stats or that the +# directory cache no longer lists the file immediately after clean. +# After vfs/forget, re-listing should require a fresh remote lookup. +post_clean_stats=$(rc_api "vfs/stats" 2>/dev/null || echo "{}") +echo "INFO: pre-clean stats: $pre_clean_stats" +echo "INFO: post-clean stats: $post_clean_stats" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/09-cli/test-cache-list.sh b/tests/09-cli/test-cache-list.sh new file mode 100755 index 0000000..2eb1e4d --- /dev/null +++ b/tests/09-cli/test-cache-list.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# Test: `warpgate cache-list` shows cached files via vfs/list RC API +# +# Creates test files on the mock NAS, reads them through the FUSE mount to +# populate the cache, then verifies that `cache-list` reports them. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS and place test files on it +start_mock_nas +nas_create_file "photos/IMG_0001.cr3" 64 +nas_create_file "photos/IMG_0002.cr3" 64 +nas_create_file "documents/notes.txt" 1 + +# Generate config pointing at mock NAS +gen_config + +# Start warpgate and wait for readiness +start_warpgate +wait_for_mount 60 +wait_for_rc_api 30 + +# Read files through the FUSE mount to pull them into cache +cat "$TEST_MOUNT/photos/IMG_0001.cr3" > /dev/null +cat "$TEST_MOUNT/photos/IMG_0002.cr3" > /dev/null +cat "$TEST_MOUNT/documents/notes.txt" > /dev/null + +# Run cache-list +output=$(run_warpgate_cmd cache-list) + +# Verify output contains file names +assert_output_contains "$output" "IMG_0001" +assert_output_contains "$output" "IMG_0002" +assert_output_contains "$output" "notes" + +# Verify output also contains size values alongside file names. +# The 64 KB files should show as "64" or "65536" or "64.0" etc. +if echo "$output" | grep -qE "[0-9]"; then + true +else + echo "FAIL: cache-list output does not contain any size values" >&2 + echo " output: $output" >&2 + exit 1 +fi + +# Verify a size-like value appears on the same line as a file name +if echo "$output" | grep -i "IMG_0001" | grep -qE "[0-9]"; then + true +else + echo "FAIL: cache-list does not show size alongside file names" >&2 + echo " output: $output" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/09-cli/test-deploy-deps.sh b/tests/09-cli/test-deploy-deps.sh new file mode 100755 index 0000000..980080a --- /dev/null +++ b/tests/09-cli/test-deploy-deps.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Test: `warpgate deploy` starts with dependency checking +# +# Verifies that the deploy subcommand begins by checking for required +# dependencies (rclone, smbd, fusermount3). The deploy may fail if not +# running as root or if dependencies are missing — that is acceptable; +# we only verify the dependency-check phase runs. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +# Generate config (deploy reads config for protocol flags, etc.) +gen_config + +# Run deploy — allow non-zero exit (deps may be missing, not root, etc.) +exit_code=0 +output=$("$WARPGATE_BIN" deploy -c "$TEST_CONFIG" 2>&1) || exit_code=$? + +# Verify output contains the dependency check phase +assert_output_contains "$output" "Checking dependencies" + +# Verify output lists individual dependencies being checked +assert_output_contains "$output" "rclone" +assert_output_contains "$output" "samba\|smbd" +assert_output_contains "$output" "fuse3\|fusermount3" + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/09-cli/test-speed-test.sh b/tests/09-cli/test-speed-test.sh new file mode 100755 index 0000000..c8df159 --- /dev/null +++ b/tests/09-cli/test-speed-test.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Test: `warpgate speed-test` uploads/downloads a test file and reports speeds +# +# Speed-test uses rclone copyto directly against the remote, so it needs the +# rclone config (generated at deploy/preflight time) but not necessarily a +# running mount. We start the full stack to ensure rclone.conf is in place. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start mock NAS so rclone can reach the SFTP endpoint +start_mock_nas + +# Generate config +gen_config + +# Start warpgate so that rclone.conf is generated by the preflight phase +start_warpgate +wait_for_mount 60 +wait_for_rc_api 30 + +# Run speed-test (captures both stdout and stderr since rclone progress goes to stderr) +output=$("$WARPGATE_BIN" speed-test -c "$TEST_CONFIG" 2>&1) || true + +# Verify output contains upload and download speed reports +assert_output_contains "$output" "Upload:" +assert_output_contains "$output" "Download:" + +# Verify the test completed +if echo "$output" | grep -qi "Done\|Complete\|Finished"; then + true +else + echo "FAIL: speed-test output missing completion indicator (Done/Complete/Finished)" >&2 + echo " output: $output" >&2 + exit 1 +fi + +# Check NAS_ROOT for leftover test files from speed-test and assert they're gone. +# speed-test creates temporary files for upload/download benchmarking — +# they should be cleaned up after the test completes. +leftover=$(find "$NAS_ROOT" -name "*speed*" -o -name "*benchmark*" -o -name "*test-upload*" 2>/dev/null | head -5) +if [[ -n "$leftover" ]]; then + echo "FAIL: speed-test left behind temporary files on NAS:" >&2 + echo " $leftover" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/09-cli/test-status-not-running.sh b/tests/09-cli/test-status-not-running.sh new file mode 100755 index 0000000..cfd1cbf --- /dev/null +++ b/tests/09-cli/test-status-not-running.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Test: `warpgate status` when warpgate is NOT running shows DOWN +# +# Verifies that `status` exits 0 and reports a down/not-mounted state when +# no warpgate daemon is running. The status command should never crash +# even when the RC API is unavailable. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" + +setup_test_env +trap teardown_test_env EXIT + +# Generate a config but do NOT start warpgate +gen_config + +# Run the status subcommand — must not fail +exit_code=0 +output=$(run_warpgate_cmd status) || exit_code=$? + +# Verify exit code is 0 (status always succeeds, even when mount is down) +if [[ "$exit_code" -ne 0 ]]; then + echo "FAIL: status exited with code $exit_code (expected 0)" >&2 + echo " output: $output" >&2 + exit 1 +fi + +# Verify output indicates the mount is not active +if echo "$output" | grep -qi "DOWN\|not active\|not mounted"; then + true +else + echo "FAIL: status output does not indicate mount is down" >&2 + echo " output: $output" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/09-cli/test-status-running.sh b/tests/09-cli/test-status-running.sh new file mode 100755 index 0000000..039d794 --- /dev/null +++ b/tests/09-cli/test-status-running.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# Test: `warpgate status` when warpgate is running shows mount UP and cache stats +# +# Verifies that `status` reports "Mount: UP" and includes basic cache/speed +# statistics when the daemon is fully operational. +set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/../harness/helpers.sh" +source "$SCRIPT_DIR/../harness/mock-nas.sh" + +require_root +setup_test_env +trap teardown_test_env EXIT + +# Start the mock NAS so rclone can connect via SFTP +start_mock_nas + +# Generate config pointing at mock NAS +gen_config + +# Start warpgate and wait for mount + RC API readiness +start_warpgate +wait_for_mount 60 +wait_for_rc_api 30 + +# Run the status subcommand +output=$(run_warpgate_cmd status) + +# Verify status reports the mount as UP — check as a single combined string +assert_output_contains "$output" "Mount: UP" + +# Verify output includes some stats (cache size, speed, etc.) +if echo "$output" | grep -q "Cache:\|Speed:"; then + true +else + echo "FAIL: status output missing expected stats (Cache: or Speed:)" >&2 + echo " output: $output" >&2 + exit 1 +fi + +echo "PASS: $(basename "$0" .sh)" diff --git a/tests/harness/config-gen.sh b/tests/harness/config-gen.sh new file mode 100755 index 0000000..9d37d90 --- /dev/null +++ b/tests/harness/config-gen.sh @@ -0,0 +1,204 @@ +#!/usr/bin/env bash +# Warpgate Integration Test — Config Generator +# +# Generates a config.toml pointing at the mock NAS for testing. +# Supports override parameters as key=value arguments. +# +# Usage: +# source config-gen.sh +# _gen_config # defaults +# _gen_config cache.max_size=10M # override one field +# _gen_config writeback.write_back=0s # instant write-back + +set -euo pipefail + +_gen_config() { + local config_file="${TEST_CONFIG:-$TEST_DIR/config.toml}" + + # Defaults pointing at mock NAS + local nas_host="${MOCK_NAS_IP:-10.99.0.2}" + local nas_user="root" + local nas_key_file="${TEST_SSH_KEY:-$TEST_DIR/test_key}" + local remote_path="/" + local sftp_port="22" + local sftp_connections="4" + + local cache_dir="${CACHE_DIR:-$TEST_DIR/cache}" + local cache_max_size="200G" + local cache_max_age="720h" + local cache_min_free="1G" + + local read_chunk_size="16M" + local read_chunk_limit="64M" + local read_ahead="32M" + local buffer_size="16M" + + local bw_limit_up="0" + local bw_limit_down="0" + local bw_adaptive="true" + + local write_back="5s" + local transfers="4" + + local dir_cache_time="5s" + + local enable_smb="true" + local enable_nfs="false" + local enable_webdav="false" + local nfs_allowed_network="10.99.0.0/24" + local webdav_port="8080" + + local mount_point="${TEST_MOUNT:-$TEST_DIR/mnt}" + + local warmup_auto="false" + local warmup_rules="" + + # Apply overrides + for override in "$@"; do + local key="${override%%=*}" + local value="${override#*=}" + + case "$key" in + connection.nas_host|nas_host) nas_host="$value" ;; + connection.nas_user|nas_user) nas_user="$value" ;; + connection.nas_key_file|nas_key_file) nas_key_file="$value" ;; + connection.remote_path|remote_path) remote_path="$value" ;; + connection.sftp_port|sftp_port) sftp_port="$value" ;; + connection.sftp_connections|sftp_connections) sftp_connections="$value" ;; + cache.dir|cache_dir) cache_dir="$value" ;; + cache.max_size|cache_max_size) cache_max_size="$value" ;; + cache.max_age|cache_max_age) cache_max_age="$value" ;; + cache.min_free|cache_min_free) cache_min_free="$value" ;; + read.chunk_size|read_chunk_size) read_chunk_size="$value" ;; + read.chunk_limit|read_chunk_limit) read_chunk_limit="$value" ;; + read.read_ahead|read_ahead) read_ahead="$value" ;; + read.buffer_size|buffer_size) buffer_size="$value" ;; + bandwidth.limit_up|bw_limit_up) bw_limit_up="$value" ;; + bandwidth.limit_down|bw_limit_down) bw_limit_down="$value" ;; + bandwidth.adaptive|bw_adaptive) bw_adaptive="$value" ;; + writeback.write_back|write_back) write_back="$value" ;; + writeback.transfers|transfers) transfers="$value" ;; + directory_cache.cache_time|dir_cache_time) dir_cache_time="$value" ;; + protocols.enable_smb|enable_smb) enable_smb="$value" ;; + protocols.enable_nfs|enable_nfs) enable_nfs="$value" ;; + protocols.enable_webdav|enable_webdav) enable_webdav="$value" ;; + protocols.nfs_allowed_network|nfs_allowed_network) nfs_allowed_network="$value" ;; + protocols.webdav_port|webdav_port) webdav_port="$value" ;; + mount.point|mount_point) mount_point="$value" ;; + warmup.auto|warmup_auto) warmup_auto="$value" ;; + warmup.rules) warmup_rules="$value" ;; + *) echo "WARNING: unknown config override: $key" >&2 ;; + esac + done + + cat > "$config_file" <> "$config_file" + echo "$warmup_rules" >> "$config_file" + fi + + export TEST_CONFIG="$config_file" +} + +# Generate a minimal config (only required fields) +_gen_minimal_config() { + local config_file="${TEST_CONFIG:-$TEST_DIR/config.toml}" + + cat > "$config_file" < "$config_file" < "$config_file" <&2 + return 1 + ;; + esac + + export TEST_CONFIG="$config_file" +} diff --git a/tests/harness/helpers.sh b/tests/harness/helpers.sh new file mode 100755 index 0000000..c2d87ce --- /dev/null +++ b/tests/harness/helpers.sh @@ -0,0 +1,642 @@ +#!/usr/bin/env bash +# Warpgate Integration Test Harness — shared helpers +# Provides setup/teardown, assertions, fault injection, and utility functions. +# +# Usage: source this file from each test script. +# source "$SCRIPT_DIR/../harness/helpers.sh" + +set -euo pipefail + +# --------------------------------------------------------------------------- +# Environment & paths +# --------------------------------------------------------------------------- + +WARPGATE_BIN="${WARPGATE_BIN:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)/warpgate/target/release/warpgate}" +WARPGATE_TEST_DIR="${WARPGATE_TEST_DIR:-/tmp/warpgate-test}" +WARPGATE_TEST_LONG="${WARPGATE_TEST_LONG:-0}" +WARPGATE_TEST_BTRFS="${WARPGATE_TEST_BTRFS:-}" + +# Populated by setup_test_env +TEST_DIR="" +TEST_CONFIG="" +TEST_MOUNT="" +NAS_ROOT="" +CACHE_DIR="" +WARPGATE_PID="" +MOCK_NAS_NS="nas-sim" +MOCK_NAS_IP="10.99.0.2" +HOST_IP="10.99.0.1" +MOCK_NAS_SSHD_PID="" +TEST_SSH_KEY="" +TEST_SSH_PUBKEY="" + +# TAP helpers +_TEST_NUM=0 +_TEST_FAILURES=0 + +# Track all background PIDs for cleanup +_BG_PIDS=() + +# --------------------------------------------------------------------------- +# TAP output +# --------------------------------------------------------------------------- + +tap_ok() { + _TEST_NUM=$((_TEST_NUM + 1)) + echo "ok $_TEST_NUM - $1" +} + +tap_not_ok() { + _TEST_NUM=$((_TEST_NUM + 1)) + _TEST_FAILURES=$((_TEST_FAILURES + 1)) + echo "not ok $_TEST_NUM - $1" + if [[ -n "${2:-}" ]]; then + echo " # $2" + fi +} + +tap_skip() { + _TEST_NUM=$((_TEST_NUM + 1)) + echo "ok $_TEST_NUM - SKIP $1" +} + +tap_plan() { + echo "1..$1" +} + +tap_exit() { + exit "$_TEST_FAILURES" +} + +# --------------------------------------------------------------------------- +# Setup / Teardown +# --------------------------------------------------------------------------- + +setup_test_env() { + TEST_DIR=$(mktemp -d "${WARPGATE_TEST_DIR}/test-XXXXXX") + NAS_ROOT="$TEST_DIR/nas-root" + CACHE_DIR="$TEST_DIR/cache" + TEST_MOUNT="$TEST_DIR/mnt" + TEST_CONFIG="$TEST_DIR/config.toml" + TEST_SSH_KEY="$TEST_DIR/test_key" + TEST_SSH_PUBKEY="$TEST_DIR/test_key.pub" + + mkdir -p "$NAS_ROOT" "$CACHE_DIR" "$TEST_MOUNT" "$TEST_DIR/run" + + # Generate SSH key pair for mock NAS auth + ssh-keygen -t ed25519 -f "$TEST_SSH_KEY" -N "" -q + + export TEST_DIR TEST_CONFIG TEST_MOUNT NAS_ROOT CACHE_DIR + export TEST_SSH_KEY TEST_SSH_PUBKEY +} + +teardown_test_env() { + local exit_code=$? + + # Kill warpgate if running + if [[ -n "${WARPGATE_PID:-}" ]] && kill -0 "$WARPGATE_PID" 2>/dev/null; then + kill -TERM "$WARPGATE_PID" 2>/dev/null || true + wait "$WARPGATE_PID" 2>/dev/null || true + fi + + # Kill any tracked background PIDs + for pid in "${_BG_PIDS[@]}"; do + if kill -0 "$pid" 2>/dev/null; then + kill -9 "$pid" 2>/dev/null || true + wait "$pid" 2>/dev/null || true + fi + done + + # Stop mock NAS + stop_mock_nas 2>/dev/null || true + + # Clear network injection + clear_network_injection 2>/dev/null || true + + # Unmount if still mounted + if mountpoint -q "$TEST_MOUNT" 2>/dev/null; then + fusermount3 -uz "$TEST_MOUNT" 2>/dev/null || fusermount -uz "$TEST_MOUNT" 2>/dev/null || true + fi + + # Clean up test directory + if [[ -n "${TEST_DIR:-}" && -d "$TEST_DIR" ]]; then + rm -rf "$TEST_DIR" + fi + + return $exit_code +} + +# --------------------------------------------------------------------------- +# Config generation (delegates to config-gen.sh) +# --------------------------------------------------------------------------- + +HARNESS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +gen_config() { + source "$HARNESS_DIR/config-gen.sh" + _gen_config "$@" +} + +# --------------------------------------------------------------------------- +# Warpgate process management +# --------------------------------------------------------------------------- + +start_warpgate() { + local log_file="${TEST_DIR}/warpgate.log" + + "$WARPGATE_BIN" run -c "$TEST_CONFIG" > "$log_file" 2>&1 & + WARPGATE_PID=$! + _BG_PIDS+=("$WARPGATE_PID") + + export WARPGATE_PID +} + +start_warpgate_with_args() { + local log_file="${TEST_DIR}/warpgate.log" + local cmd="$1" + shift + + "$WARPGATE_BIN" "$cmd" -c "$TEST_CONFIG" "$@" > "$log_file" 2>&1 & + WARPGATE_PID=$! + _BG_PIDS+=("$WARPGATE_PID") + + export WARPGATE_PID +} + +stop_warpgate() { + if [[ -n "${WARPGATE_PID:-}" ]] && kill -0 "$WARPGATE_PID" 2>/dev/null; then + kill -TERM "$WARPGATE_PID" + wait_for_exit "$WARPGATE_PID" 30 + fi +} + +warpgate_log() { + cat "${TEST_DIR}/warpgate.log" 2>/dev/null || true +} + +# Run a warpgate subcommand (not `run`) and capture output +run_warpgate_cmd() { + local cmd="$1" + shift + "$WARPGATE_BIN" "$cmd" -c "$TEST_CONFIG" "$@" 2>&1 +} + +# --------------------------------------------------------------------------- +# Wait helpers +# --------------------------------------------------------------------------- + +wait_for_mount() { + local timeout="${1:-30}" + local deadline=$((SECONDS + timeout)) + + while [[ $SECONDS -lt $deadline ]]; do + if mountpoint -q "$TEST_MOUNT" 2>/dev/null; then + return 0 + fi + sleep 0.5 + done + + echo "TIMEOUT: mount not ready after ${timeout}s" >&2 + return 1 +} + +wait_for_rc_api() { + local timeout="${1:-10}" + local deadline=$((SECONDS + timeout)) + + while [[ $SECONDS -lt $deadline ]]; do + if curl -sf "http://127.0.0.1:5572/core/stats" -d '{}' > /dev/null 2>&1; then + return 0 + fi + sleep 0.5 + done + + echo "TIMEOUT: RC API not ready after ${timeout}s" >&2 + return 1 +} + +wait_for_file() { + local path="$1" + local timeout="${2:-10}" + local deadline=$((SECONDS + timeout)) + + while [[ $SECONDS -lt $deadline ]]; do + if [[ -f "$path" ]]; then + return 0 + fi + sleep 0.5 + done + + echo "TIMEOUT: file $path not found after ${timeout}s" >&2 + return 1 +} + +wait_for_exit() { + local pid="$1" + local timeout="${2:-30}" + local deadline=$((SECONDS + timeout)) + + while [[ $SECONDS -lt $deadline ]]; do + if ! kill -0 "$pid" 2>/dev/null; then + return 0 + fi + sleep 0.5 + done + + echo "TIMEOUT: PID $pid did not exit after ${timeout}s" >&2 + return 1 +} + +wait_for_dirty_zero() { + local timeout="${1:-60}" + local deadline=$((SECONDS + timeout)) + + while [[ $SECONDS -lt $deadline ]]; do + local dirty + dirty=$(get_dirty_count 2>/dev/null) || true + if [[ "$dirty" == "0" ]]; then + return 0 + fi + sleep 2 + done + + echo "TIMEOUT: dirty count did not reach 0 after ${timeout}s" >&2 + return 1 +} + +wait_for_log_line() { + local pattern="$1" + local timeout="${2:-30}" + local log_file="${TEST_DIR}/warpgate.log" + local deadline=$((SECONDS + timeout)) + + while [[ $SECONDS -lt $deadline ]]; do + if grep -q "$pattern" "$log_file" 2>/dev/null; then + return 0 + fi + sleep 0.5 + done + + echo "TIMEOUT: log pattern '$pattern' not found after ${timeout}s" >&2 + return 1 +} + +# --------------------------------------------------------------------------- +# RC API helpers +# --------------------------------------------------------------------------- + +rc_api() { + local endpoint="$1" + local json="${2:-{}}" + curl -sf "http://127.0.0.1:5572/$endpoint" -d "$json" 2>/dev/null +} + +get_dirty_count() { + local stats + stats=$(rc_api "vfs/stats") + local in_progress queued + in_progress=$(echo "$stats" | jq -r '.diskCache.uploadsInProgress // 0') + queued=$(echo "$stats" | jq -r '.diskCache.uploadsQueued // 0') + echo $((in_progress + queued)) +} + +# --------------------------------------------------------------------------- +# Assertions +# --------------------------------------------------------------------------- + +assert_file_content() { + local path="$1" + local expected="$2" + + if [[ ! -f "$path" ]]; then + echo "FAIL: file does not exist: $path" >&2 + return 1 + fi + + if [[ -f "$expected" ]]; then + # Compare against another file + if ! diff -q "$path" "$expected" > /dev/null 2>&1; then + echo "FAIL: file content mismatch: $path vs $expected" >&2 + return 1 + fi + else + # Compare against a string + local actual + actual=$(cat "$path") + if [[ "$actual" != "$expected" ]]; then + echo "FAIL: file content mismatch in $path" >&2 + echo " expected: $expected" >&2 + echo " actual: $actual" >&2 + return 1 + fi + fi + return 0 +} + +assert_file_exists() { + local path="$1" + if [[ ! -f "$path" ]]; then + echo "FAIL: file does not exist: $path" >&2 + return 1 + fi + return 0 +} + +assert_dir_exists() { + local path="$1" + if [[ ! -d "$path" ]]; then + echo "FAIL: directory does not exist: $path" >&2 + return 1 + fi + return 0 +} + +assert_cached() { + local relative_path="$1" + local cache_file="$CACHE_DIR/vfs/nas/$relative_path" + + if [[ ! -f "$cache_file" ]]; then + echo "FAIL: not cached: $relative_path (expected at $cache_file)" >&2 + return 1 + fi + return 0 +} + +assert_not_cached() { + local relative_path="$1" + local cache_file="$CACHE_DIR/vfs/nas/$relative_path" + + if [[ -f "$cache_file" ]]; then + echo "FAIL: unexpectedly cached: $relative_path" >&2 + return 1 + fi + return 0 +} + +assert_dirty_count() { + local expected="$1" + local actual + actual=$(get_dirty_count) + if [[ "$actual" != "$expected" ]]; then + echo "FAIL: dirty count mismatch: expected=$expected actual=$actual" >&2 + return 1 + fi + return 0 +} + +assert_exit_code() { + local pid="$1" + local expected="$2" + + local actual=0 + wait "$pid" 2>/dev/null || actual=$? + if [[ "$actual" != "$expected" ]]; then + echo "FAIL: exit code mismatch for PID $pid: expected=$expected actual=$actual" >&2 + return 1 + fi + return 0 +} + +assert_mounted() { + if ! mountpoint -q "$TEST_MOUNT" 2>/dev/null; then + echo "FAIL: $TEST_MOUNT is not mounted" >&2 + return 1 + fi + return 0 +} + +assert_not_mounted() { + if mountpoint -q "$TEST_MOUNT" 2>/dev/null; then + echo "FAIL: $TEST_MOUNT is still mounted" >&2 + return 1 + fi + return 0 +} + +assert_no_orphan_rclone() { + local count + count=$(pgrep -c -f "rclone.*$TEST_MOUNT" 2>/dev/null || echo 0) + if [[ "$count" -gt 0 ]]; then + echo "FAIL: orphan rclone processes found for $TEST_MOUNT" >&2 + return 1 + fi + return 0 +} + +assert_output_contains() { + local output="$1" + local pattern="$2" + if ! echo "$output" | grep -q "$pattern"; then + echo "FAIL: output does not contain '$pattern'" >&2 + echo " output: $output" >&2 + return 1 + fi + return 0 +} + +assert_output_not_contains() { + local output="$1" + local pattern="$2" + if echo "$output" | grep -q "$pattern"; then + echo "FAIL: output unexpectedly contains '$pattern'" >&2 + return 1 + fi + return 0 +} + +assert_log_contains() { + local pattern="$1" + if ! grep -q "$pattern" "$TEST_DIR/warpgate.log" 2>/dev/null; then + echo "FAIL: log does not contain '$pattern'" >&2 + return 1 + fi + return 0 +} + +assert_log_not_contains() { + local pattern="$1" + if grep -q "$pattern" "$TEST_DIR/warpgate.log" 2>/dev/null; then + echo "FAIL: log unexpectedly contains '$pattern'" >&2 + return 1 + fi + return 0 +} + +assert_log_order() { + # Verify that pattern1 appears before pattern2 in the log + local pattern1="$1" + local pattern2="$2" + local log="$TEST_DIR/warpgate.log" + + local line1 line2 + line1=$(grep -n "$pattern1" "$log" 2>/dev/null | head -1 | cut -d: -f1) + line2=$(grep -n "$pattern2" "$log" 2>/dev/null | head -1 | cut -d: -f1) + + if [[ -z "$line1" ]]; then + echo "FAIL: pattern '$pattern1' not found in log" >&2 + return 1 + fi + if [[ -z "$line2" ]]; then + echo "FAIL: pattern '$pattern2' not found in log" >&2 + return 1 + fi + if [[ "$line1" -ge "$line2" ]]; then + echo "FAIL: '$pattern1' (line $line1) does not appear before '$pattern2' (line $line2)" >&2 + return 1 + fi + return 0 +} + +# --------------------------------------------------------------------------- +# Network fault injection (requires root + network namespace) +# --------------------------------------------------------------------------- + +inject_network_down() { + ip netns exec "$MOCK_NAS_NS" ip link set veth-nas down 2>/dev/null || \ + ip link set veth-wg down 2>/dev/null || true +} + +inject_network_up() { + ip netns exec "$MOCK_NAS_NS" ip link set veth-nas up 2>/dev/null || true + ip link set veth-wg up 2>/dev/null || true +} + +inject_latency() { + local ms="$1" + # Remove existing qdisc first + tc qdisc del dev veth-wg root 2>/dev/null || true + tc qdisc add dev veth-wg root netem delay "${ms}ms" +} + +inject_packet_loss() { + local pct="$1" + tc qdisc del dev veth-wg root 2>/dev/null || true + tc qdisc add dev veth-wg root netem loss "${pct}%" +} + +clear_network_injection() { + tc qdisc del dev veth-wg root 2>/dev/null || true +} + +# --------------------------------------------------------------------------- +# Test file creation +# --------------------------------------------------------------------------- + +create_test_file() { + local path="$1" + local size_kb="${2:-1}" + + local full_path + if [[ "$path" == /* ]]; then + full_path="$path" + else + full_path="$NAS_ROOT/$path" + fi + + mkdir -p "$(dirname "$full_path")" + dd if=/dev/urandom of="$full_path" bs=1K count="$size_kb" 2>/dev/null +} + +create_test_file_content() { + local path="$1" + local content="$2" + + local full_path + if [[ "$path" == /* ]]; then + full_path="$path" + else + full_path="$NAS_ROOT/$path" + fi + + mkdir -p "$(dirname "$full_path")" + echo -n "$content" > "$full_path" +} + +# --------------------------------------------------------------------------- +# Power loss simulation +# --------------------------------------------------------------------------- + +simulate_power_loss() { + # Kill all warpgate-related processes with SIGKILL + if [[ -n "${WARPGATE_PID:-}" ]] && kill -0 "$WARPGATE_PID" 2>/dev/null; then + # Kill the entire process group + kill -9 -"$WARPGATE_PID" 2>/dev/null || kill -9 "$WARPGATE_PID" 2>/dev/null || true + fi + + # Also kill any orphaned rclone/smbd processes for this test + pkill -9 -f "rclone.*$TEST_MOUNT" 2>/dev/null || true + pkill -9 -f "smbd.*$TEST_DIR" 2>/dev/null || true + + # Sync filesystem + sync + + # Wait briefly for processes to die + sleep 1 + + WARPGATE_PID="" +} + +# --------------------------------------------------------------------------- +# Small cache disk (for cache-full tests) +# --------------------------------------------------------------------------- + +setup_small_cache_disk() { + local size_mb="${1:-10}" + local img="$TEST_DIR/cache-disk.img" + local loop_dev + + fallocate -l "${size_mb}M" "$img" + loop_dev=$(losetup --find --show "$img") + mkfs.ext4 -q "$loop_dev" + mount "$loop_dev" "$CACHE_DIR" + + echo "$loop_dev" > "$TEST_DIR/cache-loop-dev" +} + +teardown_small_cache_disk() { + if [[ -f "$TEST_DIR/cache-loop-dev" ]]; then + local loop_dev + loop_dev=$(cat "$TEST_DIR/cache-loop-dev") + umount "$CACHE_DIR" 2>/dev/null || true + losetup -d "$loop_dev" 2>/dev/null || true + rm -f "$TEST_DIR/cache-disk.img" "$TEST_DIR/cache-loop-dev" + fi +} + +# --------------------------------------------------------------------------- +# Utility: check if running as root +# --------------------------------------------------------------------------- + +require_root() { + if [[ $EUID -ne 0 ]]; then + echo "SKIP: test requires root" >&2 + exit 0 + fi +} + +require_command() { + local cmd="$1" + if ! command -v "$cmd" > /dev/null 2>&1; then + echo "SKIP: required command not found: $cmd" >&2 + exit 0 + fi +} + +require_long_tests() { + if [[ "$WARPGATE_TEST_LONG" != "1" ]]; then + echo "SKIP: slow test (set WARPGATE_TEST_LONG=1)" >&2 + exit 0 + fi +} + +# --------------------------------------------------------------------------- +# Process detection +# --------------------------------------------------------------------------- + +is_warpgate_running() { + [[ -n "${WARPGATE_PID:-}" ]] && kill -0 "$WARPGATE_PID" 2>/dev/null +} + +count_smbd_processes() { + pgrep -c -f "smbd.*--configfile" 2>/dev/null || echo 0 +} diff --git a/tests/harness/mock-nas.sh b/tests/harness/mock-nas.sh new file mode 100755 index 0000000..750412b --- /dev/null +++ b/tests/harness/mock-nas.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash +# Warpgate Integration Test — Mock NAS (SFTP server in network namespace) +# +# Creates a Linux network namespace with a veth pair and runs an SFTP-only +# SSH daemon. This lets tests control "network" behavior (down/up/latency) +# without affecting the host. +# +# Topology: +# Host namespace nas-sim namespace +# veth-wg (10.99.0.1/24) <---> veth-nas (10.99.0.2/24) +# └─ sshd (SFTP on :22) +# └─ NAS root: $NAS_ROOT + +set -euo pipefail + +MOCK_NAS_NS="${MOCK_NAS_NS:-nas-sim}" +MOCK_NAS_IP="${MOCK_NAS_IP:-10.99.0.2}" +HOST_IP="${HOST_IP:-10.99.0.1}" +MOCK_NAS_SSHD_PID="" + +start_mock_nas() { + require_root + + local sshd_config="$TEST_DIR/sshd_config" + local host_key="$TEST_DIR/ssh_host_key" + local auth_keys="$TEST_DIR/authorized_keys" + + # Generate host key for sshd + ssh-keygen -t ed25519 -f "$host_key" -N "" -q + + # Set up authorized_keys from the test key + cp "$TEST_SSH_PUBKEY" "$auth_keys" + chmod 600 "$auth_keys" + + # Create network namespace + ip netns add "$MOCK_NAS_NS" 2>/dev/null || true + + # Create veth pair + ip link add veth-wg type veth peer name veth-nas 2>/dev/null || true + + # Move one end into the namespace + ip link set veth-nas netns "$MOCK_NAS_NS" + + # Configure host side + ip addr add "$HOST_IP/24" dev veth-wg 2>/dev/null || true + ip link set veth-wg up + + # Configure namespace side + ip netns exec "$MOCK_NAS_NS" ip addr add "$MOCK_NAS_IP/24" dev veth-nas 2>/dev/null || true + ip netns exec "$MOCK_NAS_NS" ip link set veth-nas up + ip netns exec "$MOCK_NAS_NS" ip link set lo up + + # Write sshd config (SFTP-only, no password auth, restricted to NAS_ROOT) + cat > "$sshd_config" < /dev/tcp/$MOCK_NAS_IP/22" 2>/dev/null; then + break + fi + sleep 0.5 + done + + export MOCK_NAS_SSHD_PID +} + +stop_mock_nas() { + # Kill sshd + if [[ -n "${MOCK_NAS_SSHD_PID:-}" ]] && kill -0 "$MOCK_NAS_SSHD_PID" 2>/dev/null; then + kill "$MOCK_NAS_SSHD_PID" 2>/dev/null || true + wait "$MOCK_NAS_SSHD_PID" 2>/dev/null || true + fi + + # Also kill by PID file + if [[ -f "${TEST_DIR:-}/sshd.pid" ]]; then + local pid + pid=$(cat "$TEST_DIR/sshd.pid" 2>/dev/null || true) + if [[ -n "$pid" ]] && kill -0 "$pid" 2>/dev/null; then + kill "$pid" 2>/dev/null || true + fi + fi + + # Clean up veth pair (deleting one end removes both) + ip link del veth-wg 2>/dev/null || true + + # Delete network namespace + ip netns del "$MOCK_NAS_NS" 2>/dev/null || true + + MOCK_NAS_SSHD_PID="" +} + +# Verify mock NAS is reachable via SFTP +verify_mock_nas() { + sftp -i "$TEST_SSH_KEY" \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + -o ConnectTimeout=5 \ + -P 22 \ + "root@$MOCK_NAS_IP" <<< "ls" > /dev/null 2>&1 +} + +# Create a file directly on the mock NAS filesystem +nas_create_file() { + local path="$1" + local size_kb="${2:-1}" + + local full_path="$NAS_ROOT/$path" + mkdir -p "$(dirname "$full_path")" + dd if=/dev/urandom of="$full_path" bs=1K count="$size_kb" 2>/dev/null +} + +# Create a file with specific content on the mock NAS +nas_create_file_content() { + local path="$1" + local content="$2" + + local full_path="$NAS_ROOT/$path" + mkdir -p "$(dirname "$full_path")" + echo -n "$content" > "$full_path" +} + +# Read a file from the mock NAS +nas_read_file() { + local path="$1" + cat "$NAS_ROOT/$path" +} + +# Check if a file exists on the mock NAS +nas_file_exists() { + local path="$1" + [[ -f "$NAS_ROOT/$path" ]] +} + +# Get file checksum on the mock NAS +nas_file_checksum() { + local path="$1" + md5sum "$NAS_ROOT/$path" | awk '{print $1}' +} diff --git a/tests/run-all.sh b/tests/run-all.sh new file mode 100755 index 0000000..54a28fc --- /dev/null +++ b/tests/run-all.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash +# Warpgate Integration Test Runner +# +# Runs all test scripts across all categories and outputs TAP format results. +# +# Usage: +# sudo ./tests/run-all.sh # run all tests +# sudo WARPGATE_TEST_LONG=1 ./tests/run-all.sh # include slow tests +# sudo ./tests/run-all.sh 05-cache # run only one category +# +# Environment: +# WARPGATE_BIN Path to warpgate binary (default: auto-detect) +# WARPGATE_TEST_DIR Temp directory for tests (default: /tmp/warpgate-test) +# WARPGATE_TEST_LONG Set to 1 to run slow tests +# WARPGATE_TEST_BTRFS Path to btrfs block device for fs tests + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Auto-detect warpgate binary +if [[ -z "${WARPGATE_BIN:-}" ]]; then + if [[ -x "$PROJECT_ROOT/warpgate/target/release/warpgate" ]]; then + export WARPGATE_BIN="$PROJECT_ROOT/warpgate/target/release/warpgate" + elif [[ -x "$PROJECT_ROOT/warpgate/target/debug/warpgate" ]]; then + export WARPGATE_BIN="$PROJECT_ROOT/warpgate/target/debug/warpgate" + else + echo "ERROR: warpgate binary not found. Build with: cargo build --release" >&2 + echo " Or set WARPGATE_BIN=/path/to/warpgate" >&2 + exit 1 + fi +fi + +echo "# Warpgate Integration Test Suite" +echo "# Binary: $WARPGATE_BIN" +echo "# Date: $(date -Iseconds)" +echo "# User: $(whoami)" +echo "#" + +# Ensure test temp directory exists +mkdir -p "${WARPGATE_TEST_DIR:-/tmp/warpgate-test}" + +# Test categories in execution order +CATEGORIES=( + 01-config + 02-lifecycle + 03-signal + 04-supervision + 05-cache + 06-writeback + 07-network + 08-crash-recovery + 09-cli +) + +# Filter to specific category if requested +if [[ -n "${1:-}" ]]; then + found=0 + for cat in "${CATEGORIES[@]}"; do + if [[ "$cat" == "$1" ]]; then + found=1 + break + fi + done + if [[ $found -eq 0 ]]; then + echo "ERROR: unknown category: $1" >&2 + echo "Available: ${CATEGORIES[*]}" >&2 + exit 1 + fi + CATEGORIES=("$1") +fi + +# Collect all test scripts +ALL_TESTS=() +for category in "${CATEGORIES[@]}"; do + category_dir="$SCRIPT_DIR/$category" + if [[ ! -d "$category_dir" ]]; then + continue + fi + while IFS= read -r -d '' test_script; do + ALL_TESTS+=("$test_script") + done < <(find "$category_dir" -name 'test-*.sh' -type f -print0 | sort -z) +done + +total=${#ALL_TESTS[@]} +if [[ $total -eq 0 ]]; then + echo "No tests found." + exit 0 +fi + +# TAP header +echo "1..$total" + +passed=0 +failed=0 +skipped=0 +test_num=0 + +for test_script in "${ALL_TESTS[@]}"; do + test_num=$((test_num + 1)) + + # Extract category and test name + rel_path="${test_script#$SCRIPT_DIR/}" + test_name="${rel_path%.sh}" + + # Run the test, capturing output and exit code + test_output="" + test_exit=0 + test_start=$(date +%s) + + test_output=$(bash "$test_script" 2>&1) || test_exit=$? + + test_end=$(date +%s) + test_duration=$((test_end - test_start)) + + if echo "$test_output" | grep -qi "^SKIP"; then + echo "ok $test_num - $test_name # SKIP ${test_duration}s" + skipped=$((skipped + 1)) + elif [[ $test_exit -eq 0 ]]; then + echo "ok $test_num - $test_name # ${test_duration}s" + passed=$((passed + 1)) + else + echo "not ok $test_num - $test_name # ${test_duration}s" + failed=$((failed + 1)) + + # Print failure details as TAP diagnostics + while IFS= read -r line; do + echo " # $line" + done <<< "$test_output" + fi +done + +# Summary +echo "#" +echo "# ==============================" +echo "# Test Summary" +echo "# ==============================" +echo "# Total: $total" +echo "# Passed: $passed" +echo "# Failed: $failed" +echo "# Skipped: $skipped" +echo "#" + +if [[ $failed -gt 0 ]]; then + echo "# RESULT: FAIL" + exit 1 +else + echo "# RESULT: PASS" + exit 0 +fi diff --git a/warpgate/src/cli/bwlimit.rs b/warpgate/src/cli/bwlimit.rs index 2ec1744..4371b8e 100644 --- a/warpgate/src/cli/bwlimit.rs +++ b/warpgate/src/cli/bwlimit.rs @@ -57,3 +57,34 @@ fn format_bytes(bytes: u64) -> String { format!("{} B", bytes) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_format_bytes_zero() { + assert_eq!(format_bytes(0), "0 B"); + } + + #[test] + fn test_format_bytes_kib() { + assert_eq!(format_bytes(1024), "1.0 KiB"); + } + + #[test] + fn test_format_bytes_mib() { + assert_eq!(format_bytes(1048576), "1.0 MiB"); + } + + #[test] + fn test_format_bytes_gib() { + assert_eq!(format_bytes(1073741824), "1.0 GiB"); + } + + #[test] + fn test_format_bytes_mixed() { + assert_eq!(format_bytes(10485760), "10.0 MiB"); // 10 MiB + assert_eq!(format_bytes(52428800), "50.0 MiB"); // 50 MiB + } +} diff --git a/warpgate/src/cli/cache.rs b/warpgate/src/cli/cache.rs index 87b0af1..5f05c05 100644 --- a/warpgate/src/cli/cache.rs +++ b/warpgate/src/cli/cache.rs @@ -88,3 +88,28 @@ fn format_bytes(bytes: u64) -> String { format!("{} B", bytes) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_format_bytes_zero() { + assert_eq!(format_bytes(0), "0 B"); + } + + #[test] + fn test_format_bytes_kib() { + assert_eq!(format_bytes(2048), "2.0 KiB"); + } + + #[test] + fn test_format_bytes_mib() { + assert_eq!(format_bytes(5242880), "5.0 MiB"); + } + + #[test] + fn test_format_bytes_gib() { + assert_eq!(format_bytes(10737418240), "10.0 GiB"); + } +} diff --git a/warpgate/src/cli/speed_test.rs b/warpgate/src/cli/speed_test.rs index ac10a09..d70099f 100644 --- a/warpgate/src/cli/speed_test.rs +++ b/warpgate/src/cli/speed_test.rs @@ -115,3 +115,33 @@ fn format_bytes(bytes: u64) -> String { format!("{} B", bytes) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_format_bytes_zero() { + assert_eq!(format_bytes(0), "0 B"); + } + + #[test] + fn test_format_bytes_kib() { + assert_eq!(format_bytes(1024), "1.0 KiB"); + } + + #[test] + fn test_format_bytes_mib() { + assert_eq!(format_bytes(1048576), "1.0 MiB"); + } + + #[test] + fn test_format_bytes_gib() { + assert_eq!(format_bytes(1073741824), "1.0 GiB"); + } + + #[test] + fn test_size_constant() { + assert_eq!(TEST_SIZE, 10 * 1024 * 1024); + } +} diff --git a/warpgate/src/cli/status.rs b/warpgate/src/cli/status.rs index f388036..5c983e8 100644 --- a/warpgate/src/cli/status.rs +++ b/warpgate/src/cli/status.rs @@ -70,3 +70,40 @@ fn format_bytes(bytes: u64) -> String { format!("{} B", bytes) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_format_bytes_zero() { + assert_eq!(format_bytes(0), "0 B"); + } + + #[test] + fn test_format_bytes_bytes() { + assert_eq!(format_bytes(512), "512 B"); + } + + #[test] + fn test_format_bytes_kib() { + assert_eq!(format_bytes(1024), "1.0 KiB"); + assert_eq!(format_bytes(1536), "1.5 KiB"); + } + + #[test] + fn test_format_bytes_mib() { + assert_eq!(format_bytes(1048576), "1.0 MiB"); + } + + #[test] + fn test_format_bytes_gib() { + assert_eq!(format_bytes(1073741824), "1.0 GiB"); + } + + #[test] + fn test_format_bytes_boundary() { + assert_eq!(format_bytes(1023), "1023 B"); + assert_eq!(format_bytes(1024), "1.0 KiB"); + } +} diff --git a/warpgate/src/cli/warmup.rs b/warpgate/src/cli/warmup.rs index 863ed1c..b4f3d7b 100644 --- a/warpgate/src/cli/warmup.rs +++ b/warpgate/src/cli/warmup.rs @@ -109,3 +109,88 @@ fn is_cached(config: &Config, warmup_path: &str, relative_path: &str) -> bool { .join(relative_path); cache_path.exists() } + +#[cfg(test)] +mod tests { + use super::*; + + fn test_config() -> Config { + toml::from_str( + r#" +[connection] +nas_host = "10.0.0.1" +nas_user = "admin" +remote_path = "/photos" + +[cache] +dir = "/tmp/warpgate-test-cache" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"#, + ) + .unwrap() + } + + #[test] + fn test_is_cached_nonexistent_file() { + let config = test_config(); + // File doesn't exist on disk, so should return false + assert!(!is_cached(&config, "2024", "IMG_001.jpg")); + } + + #[test] + fn test_is_cached_deep_path() { + let config = test_config(); + assert!(!is_cached(&config, "Images/2024/January", "photo.cr3")); + } + + #[test] + fn test_is_cached_path_construction() { + // Verify the path is constructed correctly by checking the expected + // cache path: cache_dir/vfs/nas/// + let config = test_config(); + let expected = std::path::PathBuf::from("/tmp/warpgate-test-cache") + .join("vfs") + .join("nas") + .join("photos") // "/photos" trimmed of leading / + .join("2024") + .join("IMG_001.jpg"); + + // Reconstruct the same logic as is_cached + let cache_path = config + .cache + .dir + .join("vfs") + .join("nas") + .join(config.connection.remote_path.trim_start_matches('/')) + .join("2024") + .join("IMG_001.jpg"); + + assert_eq!(cache_path, expected); + } + + #[test] + fn test_is_cached_remote_path_trimming() { + let mut config = test_config(); + config.connection.remote_path = "/volume1/photos".into(); + + let cache_path = config + .cache + .dir + .join("vfs") + .join("nas") + .join(config.connection.remote_path.trim_start_matches('/')) + .join("2024") + .join("file.jpg"); + + // The leading "/" is stripped, so "nas" is followed by "volume1" (not "/volume1") + assert!(cache_path.to_string_lossy().contains("nas/volume1/photos")); + // No double slash from unstripped leading / + assert!(!cache_path.to_string_lossy().contains("nas//volume1")); + } +} diff --git a/warpgate/src/config.rs b/warpgate/src/config.rs index 3e9be59..b182ca4 100644 --- a/warpgate/src/config.rs +++ b/warpgate/src/config.rs @@ -242,3 +242,377 @@ impl Config { .to_string() } } + +#[cfg(test)] +mod tests { + use super::*; + + fn minimal_toml() -> &'static str { + r#" +[connection] +nas_host = "10.0.0.1" +nas_user = "admin" +remote_path = "/photos" + +[cache] +dir = "/tmp/cache" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"# + } + + #[test] + fn test_config_load_minimal_defaults() { + let config: Config = toml::from_str(minimal_toml()).unwrap(); + + // Connection defaults + assert_eq!(config.connection.nas_host, "10.0.0.1"); + assert_eq!(config.connection.nas_user, "admin"); + assert_eq!(config.connection.remote_path, "/photos"); + assert_eq!(config.connection.sftp_port, 22); + assert_eq!(config.connection.sftp_connections, 8); + assert!(config.connection.nas_pass.is_none()); + assert!(config.connection.nas_key_file.is_none()); + + // Cache defaults + assert_eq!(config.cache.dir, PathBuf::from("/tmp/cache")); + assert_eq!(config.cache.max_size, "200G"); + assert_eq!(config.cache.max_age, "720h"); + assert_eq!(config.cache.min_free, "10G"); + + // Read defaults + assert_eq!(config.read.chunk_size, "256M"); + assert_eq!(config.read.chunk_limit, "1G"); + assert_eq!(config.read.read_ahead, "512M"); + assert_eq!(config.read.buffer_size, "256M"); + + // Bandwidth defaults + assert_eq!(config.bandwidth.limit_up, "0"); + assert_eq!(config.bandwidth.limit_down, "0"); + assert!(config.bandwidth.adaptive); + + // Writeback defaults + assert_eq!(config.writeback.write_back, "5s"); + assert_eq!(config.writeback.transfers, 4); + + // Directory cache default + assert_eq!(config.directory_cache.cache_time, "1h"); + + // Protocol defaults + assert!(config.protocols.enable_smb); + assert!(!config.protocols.enable_nfs); + assert!(!config.protocols.enable_webdav); + assert_eq!(config.protocols.nfs_allowed_network, "192.168.0.0/24"); + assert_eq!(config.protocols.webdav_port, 8080); + + // Mount default + assert_eq!(config.mount.point, PathBuf::from("/mnt/nas-photos")); + + // Warmup default + assert!(config.warmup.auto); + assert!(config.warmup.rules.is_empty()); + } + + #[test] + fn test_config_full_toml() { + let toml_str = r#" +[connection] +nas_host = "192.168.1.100" +nas_user = "photographer" +nas_pass = "secret123" +nas_key_file = "/root/.ssh/id_rsa" +remote_path = "/volume1/photos" +sftp_port = 2222 +sftp_connections = 16 + +[cache] +dir = "/mnt/ssd/cache" +max_size = "500G" +max_age = "1440h" +min_free = "20G" + +[read] +chunk_size = "512M" +chunk_limit = "2G" +read_ahead = "1G" +buffer_size = "512M" + +[bandwidth] +limit_up = "10M" +limit_down = "50M" +adaptive = false + +[writeback] +write_back = "10s" +transfers = 8 + +[directory_cache] +cache_time = "30m" + +[protocols] +enable_smb = true +enable_nfs = true +enable_webdav = true +nfs_allowed_network = "10.0.0.0/8" +webdav_port = 9090 + +[mount] +point = "/mnt/nas" + +[warmup] +auto = false + +[[warmup.rules]] +path = "2024" +newer_than = "7d" +"#; + let config: Config = toml::from_str(toml_str).unwrap(); + + assert_eq!(config.connection.nas_host, "192.168.1.100"); + assert_eq!(config.connection.nas_user, "photographer"); + assert_eq!(config.connection.nas_pass.as_deref(), Some("secret123")); + assert_eq!( + config.connection.nas_key_file.as_deref(), + Some("/root/.ssh/id_rsa") + ); + assert_eq!(config.connection.remote_path, "/volume1/photos"); + assert_eq!(config.connection.sftp_port, 2222); + assert_eq!(config.connection.sftp_connections, 16); + + assert_eq!(config.cache.max_size, "500G"); + assert_eq!(config.cache.max_age, "1440h"); + assert_eq!(config.cache.min_free, "20G"); + + assert_eq!(config.read.chunk_size, "512M"); + assert_eq!(config.read.buffer_size, "512M"); + + assert_eq!(config.bandwidth.limit_up, "10M"); + assert_eq!(config.bandwidth.limit_down, "50M"); + assert!(!config.bandwidth.adaptive); + + assert_eq!(config.writeback.write_back, "10s"); + assert_eq!(config.writeback.transfers, 8); + + assert_eq!(config.directory_cache.cache_time, "30m"); + + assert!(config.protocols.enable_nfs); + assert!(config.protocols.enable_webdav); + assert_eq!(config.protocols.webdav_port, 9090); + + assert_eq!(config.mount.point, PathBuf::from("/mnt/nas")); + + assert!(!config.warmup.auto); + assert_eq!(config.warmup.rules.len(), 1); + assert_eq!(config.warmup.rules[0].path, "2024"); + assert_eq!(config.warmup.rules[0].newer_than.as_deref(), Some("7d")); + } + + #[test] + fn test_config_missing_required_field() { + let toml_str = r#" +[connection] +nas_user = "admin" +remote_path = "/photos" + +[cache] +dir = "/tmp/cache" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"#; + let result = toml::from_str::(toml_str); + assert!(result.is_err()); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("missing"), + "Expected 'missing' in error: {err}" + ); + } + + #[test] + fn test_config_bad_toml() { + let result = toml::from_str::("this is not valid toml {{{}}}"); + assert!(result.is_err()); + } + + #[test] + fn test_config_extreme_values() { + let toml_str = r#" +[connection] +nas_host = "10.0.0.1" +nas_user = "admin" +remote_path = "/photos" +sftp_connections = 999 + +[cache] +dir = "/tmp/cache" +max_size = "999T" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"#; + let config: Config = toml::from_str(toml_str).unwrap(); + assert_eq!(config.connection.sftp_connections, 999); + assert_eq!(config.cache.max_size, "999T"); + } + + #[test] + fn test_config_missing_cache_section() { + let toml_str = r#" +[connection] +nas_host = "10.0.0.1" +nas_user = "admin" +remote_path = "/photos" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"#; + let result = toml::from_str::(toml_str); + assert!(result.is_err()); + } + + #[test] + fn test_config_serialization_roundtrip() { + let config: Config = toml::from_str(minimal_toml()).unwrap(); + let serialized = toml::to_string(&config).unwrap(); + let config2: Config = toml::from_str(&serialized).unwrap(); + assert_eq!(config.connection.nas_host, config2.connection.nas_host); + assert_eq!(config.cache.max_size, config2.cache.max_size); + assert_eq!(config.writeback.transfers, config2.writeback.transfers); + } + + #[test] + fn test_default_sftp_port() { + assert_eq!(default_sftp_port(), 22); + } + + #[test] + fn test_default_sftp_connections() { + assert_eq!(default_sftp_connections(), 8); + } + + #[test] + fn test_default_cache_max_size() { + assert_eq!(default_cache_max_size(), "200G"); + } + + #[test] + fn test_default_cache_max_age() { + assert_eq!(default_cache_max_age(), "720h"); + } + + #[test] + fn test_default_cache_min_free() { + assert_eq!(default_cache_min_free(), "10G"); + } + + #[test] + fn test_default_read_chunk_size() { + assert_eq!(default_read_chunk_size(), "256M"); + } + + #[test] + fn test_default_read_chunk_limit() { + assert_eq!(default_read_chunk_limit(), "1G"); + } + + #[test] + fn test_default_read_ahead() { + assert_eq!(default_read_ahead(), "512M"); + } + + #[test] + fn test_default_buffer_size() { + assert_eq!(default_buffer_size(), "256M"); + } + + #[test] + fn test_default_bw_zero() { + assert_eq!(default_bw_zero(), "0"); + } + + #[test] + fn test_default_true() { + assert!(default_true()); + } + + #[test] + fn test_default_write_back() { + assert_eq!(default_write_back(), "5s"); + } + + #[test] + fn test_default_transfers() { + assert_eq!(default_transfers(), 4); + } + + #[test] + fn test_default_dir_cache_time() { + assert_eq!(default_dir_cache_time(), "1h"); + } + + #[test] + fn test_default_nfs_network() { + assert_eq!(default_nfs_network(), "192.168.0.0/24"); + } + + #[test] + fn test_default_webdav_port() { + assert_eq!(default_webdav_port(), 8080); + } + + #[test] + fn test_default_mount_point() { + assert_eq!(default_mount_point(), PathBuf::from("/mnt/nas-photos")); + } + + #[test] + fn test_warmup_config_default() { + let wc = WarmupConfig::default(); + assert!(wc.auto); + assert!(wc.rules.is_empty()); + } + + #[test] + fn test_warmup_rule_deserialization() { + let toml_str = r#" +path = "Images/2024" +newer_than = "7d" +"#; + let rule: WarmupRule = toml::from_str(toml_str).unwrap(); + assert_eq!(rule.path, "Images/2024"); + assert_eq!(rule.newer_than.as_deref(), Some("7d")); + } + + #[test] + fn test_warmup_rule_without_newer_than() { + let toml_str = r#" +path = "Images/2024" +"#; + let rule: WarmupRule = toml::from_str(toml_str).unwrap(); + assert_eq!(rule.path, "Images/2024"); + assert!(rule.newer_than.is_none()); + } + + #[test] + fn test_default_config_path() { + assert_eq!(DEFAULT_CONFIG_PATH, "/etc/warpgate/config.toml"); + } +} diff --git a/warpgate/src/deploy/deps.rs b/warpgate/src/deploy/deps.rs index 1557d4d..854302a 100644 --- a/warpgate/src/deploy/deps.rs +++ b/warpgate/src/deploy/deps.rs @@ -43,6 +43,65 @@ fn binary_to_package(binary: &str) -> &str { } } +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_required_deps_contains_rclone() { + assert!(REQUIRED_DEPS.contains(&"rclone")); + } + + #[test] + fn test_required_deps_contains_smbd() { + assert!(REQUIRED_DEPS.contains(&"smbd")); + } + + #[test] + fn test_required_deps_contains_fusermount3() { + assert!(REQUIRED_DEPS.contains(&"fusermount3")); + } + + #[test] + fn test_optional_deps_contains_exportfs() { + assert!(OPTIONAL_DEPS.iter().any(|(bin, _)| *bin == "exportfs")); + } + + #[test] + fn test_optional_deps_maps_to_package() { + let pkg = OPTIONAL_DEPS + .iter() + .find(|(bin, _)| *bin == "exportfs") + .map(|(_, pkg)| *pkg); + assert_eq!(pkg, Some("nfs-kernel-server")); + } + + #[test] + fn test_binary_to_package_rclone() { + assert_eq!(binary_to_package("rclone"), "rclone"); + } + + #[test] + fn test_binary_to_package_smbd() { + assert_eq!(binary_to_package("smbd"), "samba"); + } + + #[test] + fn test_binary_to_package_fusermount3() { + assert_eq!(binary_to_package("fusermount3"), "fuse3"); + } + + #[test] + fn test_binary_to_package_exportfs() { + assert_eq!(binary_to_package("exportfs"), "nfs-kernel-server"); + } + + #[test] + fn test_binary_to_package_unknown() { + assert_eq!(binary_to_package("unknown-tool"), "unknown-tool"); + } +} + /// Install missing dependencies via apt. /// /// Takes a list of missing **binary names** (as returned by [`check_missing`]), diff --git a/warpgate/src/rclone/config.rs b/warpgate/src/rclone/config.rs index 18f4c27..11cf2a5 100644 --- a/warpgate/src/rclone/config.rs +++ b/warpgate/src/rclone/config.rs @@ -71,3 +71,79 @@ pub fn write_config(config: &Config) -> Result<()> { Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + + fn test_config() -> Config { + toml::from_str( + r#" +[connection] +nas_host = "10.0.0.1" +nas_user = "admin" +remote_path = "/photos" + +[cache] +dir = "/tmp/cache" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"#, + ) + .unwrap() + } + + #[test] + fn test_generate_rclone_config_minimal() { + let config = test_config(); + let content = generate(&config).unwrap(); + + assert!(content.contains("[nas]")); + assert!(content.contains("type = sftp")); + assert!(content.contains("host = 10.0.0.1")); + assert!(content.contains("user = admin")); + assert!(content.contains("port = 22")); + assert!(content.contains("connections = 8")); + assert!(content.contains("disable_hashcheck = true")); + // No password or key_file lines + assert!(!content.contains("pass =")); + assert!(!content.contains("key_file =")); + } + + #[test] + fn test_generate_rclone_config_with_key_file() { + let mut config = test_config(); + config.connection.nas_key_file = Some("/root/.ssh/id_rsa".into()); + + let content = generate(&config).unwrap(); + assert!(content.contains("key_file = /root/.ssh/id_rsa")); + } + + #[test] + fn test_generate_rclone_config_custom_port_and_connections() { + let mut config = test_config(); + config.connection.sftp_port = 2222; + config.connection.sftp_connections = 16; + + let content = generate(&config).unwrap(); + assert!(content.contains("port = 2222")); + assert!(content.contains("connections = 16")); + } + + #[test] + fn test_rclone_conf_path_constant() { + assert_eq!(RCLONE_CONF_PATH, "/etc/warpgate/rclone.conf"); + } + + #[test] + fn test_generate_starts_with_section() { + let config = test_config(); + let content = generate(&config).unwrap(); + assert!(content.starts_with("[nas]\n")); + } +} diff --git a/warpgate/src/rclone/mount.rs b/warpgate/src/rclone/mount.rs index 6cd8b0d..0f6dbac 100644 --- a/warpgate/src/rclone/mount.rs +++ b/warpgate/src/rclone/mount.rs @@ -120,6 +120,136 @@ pub fn build_mount_command(config: &Config) -> String { format!("/usr/bin/rclone {}", args.join(" ")) } +#[cfg(test)] +mod tests { + use super::*; + + fn test_config() -> Config { + toml::from_str( + r#" +[connection] +nas_host = "10.0.0.1" +nas_user = "admin" +remote_path = "/photos" + +[cache] +dir = "/tmp/cache" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"#, + ) + .unwrap() + } + + #[test] + fn test_format_bwlimit_both_zero() { + assert_eq!(format_bwlimit("0", "0"), "0"); + } + + #[test] + fn test_format_bwlimit_both_empty() { + assert_eq!(format_bwlimit("", ""), "0"); + } + + #[test] + fn test_format_bwlimit_mixed_zero_empty() { + assert_eq!(format_bwlimit("0", ""), "0"); + assert_eq!(format_bwlimit("", "0"), "0"); + } + + #[test] + fn test_format_bwlimit_up_only() { + assert_eq!(format_bwlimit("10M", "0"), "10M:0"); + } + + #[test] + fn test_format_bwlimit_down_only() { + assert_eq!(format_bwlimit("0", "50M"), "0:50M"); + } + + #[test] + fn test_format_bwlimit_both_set() { + assert_eq!(format_bwlimit("10M", "50M"), "10M:50M"); + } + + #[test] + fn test_build_mount_args_contains_essentials() { + let config = test_config(); + let args = build_mount_args(&config); + + assert_eq!(args[0], "mount"); + assert_eq!(args[1], "nas:/photos"); + assert_eq!(args[2], "/mnt/nas-photos"); + + assert!(args.contains(&"--config".to_string())); + assert!(args.contains(&RCLONE_CONF_PATH.to_string())); + assert!(args.contains(&"--vfs-cache-mode".to_string())); + assert!(args.contains(&"full".to_string())); + assert!(args.contains(&"--vfs-write-back".to_string())); + assert!(args.contains(&"5s".to_string())); + assert!(args.contains(&"--vfs-cache-max-size".to_string())); + assert!(args.contains(&"200G".to_string())); + assert!(args.contains(&"--vfs-cache-max-age".to_string())); + assert!(args.contains(&"720h".to_string())); + assert!(args.contains(&"--cache-dir".to_string())); + assert!(args.contains(&"/tmp/cache".to_string())); + assert!(args.contains(&"--dir-cache-time".to_string())); + assert!(args.contains(&"1h".to_string())); + assert!(args.contains(&"--buffer-size".to_string())); + assert!(args.contains(&"--transfers".to_string())); + assert!(args.contains(&"4".to_string())); + assert!(args.contains(&"--rc".to_string())); + assert!(args.contains(&"--allow-other".to_string())); + } + + #[test] + fn test_build_mount_args_no_bwlimit_when_unlimited() { + let config = test_config(); + let args = build_mount_args(&config); + // Default bandwidth is "0" for both, so --bwlimit should NOT be present + assert!(!args.contains(&"--bwlimit".to_string())); + } + + #[test] + fn test_build_mount_args_with_bwlimit() { + let mut config = test_config(); + config.bandwidth.limit_up = "10M".into(); + config.bandwidth.limit_down = "50M".into(); + let args = build_mount_args(&config); + assert!(args.contains(&"--bwlimit".to_string())); + assert!(args.contains(&"10M:50M".to_string())); + } + + #[test] + fn test_build_mount_command_format() { + let config = test_config(); + let cmd = build_mount_command(&config); + assert!(cmd.starts_with("/usr/bin/rclone mount")); + assert!(cmd.contains("nas:/photos")); + assert!(cmd.contains("/mnt/nas-photos")); + } + + #[test] + fn test_build_mount_args_custom_config() { + let mut config = test_config(); + config.connection.remote_path = "/volume1/media".into(); + config.mount.point = std::path::PathBuf::from("/mnt/media"); + config.cache.dir = std::path::PathBuf::from("/ssd/cache"); + config.writeback.transfers = 16; + + let args = build_mount_args(&config); + assert_eq!(args[1], "nas:/volume1/media"); + assert_eq!(args[2], "/mnt/media"); + assert!(args.contains(&"/ssd/cache".to_string())); + assert!(args.contains(&"16".to_string())); + } +} + /// Check if the FUSE mount is currently active by inspecting `/proc/mounts`. pub fn is_mounted(config: &Config) -> Result { let mount_point = config.mount.point.display().to_string(); diff --git a/warpgate/src/rclone/rc.rs b/warpgate/src/rclone/rc.rs index 7c9b6a0..9096317 100644 --- a/warpgate/src/rclone/rc.rs +++ b/warpgate/src/rclone/rc.rs @@ -100,3 +100,125 @@ pub fn bwlimit(upload: Option<&str>, download: Option<&str>) -> Result("not json"); + assert!(result.is_err()); + } + + #[test] + fn test_missing_required_field_in_core_stats() { + let json = r#"{"bytes": 0}"#; + let result = serde_json::from_str::(json); + assert!(result.is_err()); + } + + #[test] + fn test_rc_addr_constant() { + assert_eq!(RC_ADDR, "http://127.0.0.1:5572"); + } + + #[test] + fn test_core_stats_large_values() { + let json = r#"{ + "bytes": 18446744073709551615, + "speed": 1e12, + "transfers": 1000000, + "errors": 999 + }"#; + let stats: CoreStats = serde_json::from_str(json).unwrap(); + assert_eq!(stats.bytes, u64::MAX); + assert_eq!(stats.transfers, 1000000); + assert_eq!(stats.errors, 999); + } +} diff --git a/warpgate/src/services/nfs.rs b/warpgate/src/services/nfs.rs index 97ccf63..848a0c3 100644 --- a/warpgate/src/services/nfs.rs +++ b/warpgate/src/services/nfs.rs @@ -45,3 +45,63 @@ pub fn write_config(config: &Config) -> Result<()> { Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + + fn test_config() -> Config { + toml::from_str( + r#" +[connection] +nas_host = "10.0.0.1" +nas_user = "admin" +remote_path = "/photos" + +[cache] +dir = "/tmp/cache" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"#, + ) + .unwrap() + } + + #[test] + fn test_generate_exports() { + let config = test_config(); + let content = generate(&config).unwrap(); + + assert!(content.contains("/mnt/nas-photos")); + assert!(content.contains("192.168.0.0/24")); + assert!(content.contains("rw,sync,no_subtree_check,fsid=1")); + } + + #[test] + fn test_generate_exports_custom_network() { + let mut config = test_config(); + config.protocols.nfs_allowed_network = "10.0.0.0/8".into(); + let content = generate(&config).unwrap(); + + assert!(content.contains("10.0.0.0/8")); + } + + #[test] + fn test_generate_exports_custom_mount() { + let mut config = test_config(); + config.mount.point = std::path::PathBuf::from("/mnt/media"); + let content = generate(&config).unwrap(); + + assert!(content.contains("/mnt/media")); + } + + #[test] + fn test_exports_path_constant() { + assert_eq!(EXPORTS_PATH, "/etc/exports.d/warpgate.exports"); + } +} diff --git a/warpgate/src/services/samba.rs b/warpgate/src/services/samba.rs index 7f6cb1d..0dcd7ee 100644 --- a/warpgate/src/services/samba.rs +++ b/warpgate/src/services/samba.rs @@ -76,3 +76,71 @@ pub fn write_config(config: &Config) -> Result<()> { Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + + fn test_config() -> Config { + toml::from_str( + r#" +[connection] +nas_host = "10.0.0.1" +nas_user = "admin" +remote_path = "/photos" + +[cache] +dir = "/tmp/cache" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"#, + ) + .unwrap() + } + + #[test] + fn test_generate_smb_conf_global_section() { + let config = test_config(); + let content = generate(&config).unwrap(); + + assert!(content.contains("[global]")); + assert!(content.contains("server role = standalone server")); + assert!(content.contains("server min protocol = SMB2_02")); + assert!(content.contains("map to guest = Bad User")); + assert!(content.contains("load printers = no")); + } + + #[test] + fn test_generate_smb_conf_share_section() { + let config = test_config(); + let content = generate(&config).unwrap(); + + // Share name derived from mount point dir name "nas-photos" + assert!(content.contains("[nas-photos]")); + assert!(content.contains("path = /mnt/nas-photos")); + assert!(content.contains("browseable = yes")); + assert!(content.contains("read only = no")); + assert!(content.contains("guest ok = yes")); + assert!(content.contains("force user = root")); + } + + #[test] + fn test_generate_smb_conf_custom_mount() { + let mut config = test_config(); + config.mount.point = std::path::PathBuf::from("/mnt/my-nas"); + let content = generate(&config).unwrap(); + + assert!(content.contains("[my-nas]")); + assert!(content.contains("path = /mnt/my-nas")); + } + + #[test] + fn test_smb_conf_path_constant() { + assert_eq!(SMB_CONF_PATH, "/etc/samba/smb.conf"); + } +} diff --git a/warpgate/src/services/systemd.rs b/warpgate/src/services/systemd.rs index 5a3db77..fabf354 100644 --- a/warpgate/src/services/systemd.rs +++ b/warpgate/src/services/systemd.rs @@ -62,6 +62,64 @@ pub fn install_run_unit(config: &Config) -> Result<()> { Ok(()) } +#[cfg(test)] +mod tests { + use super::*; + + fn test_config() -> Config { + toml::from_str( + r#" +[connection] +nas_host = "10.0.0.1" +nas_user = "admin" +remote_path = "/photos" + +[cache] +dir = "/tmp/cache" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"#, + ) + .unwrap() + } + + #[test] + fn test_generate_run_unit_sections() { + let config = test_config(); + let unit = generate_run_unit(&config).unwrap(); + + assert!(unit.contains("[Unit]")); + assert!(unit.contains("[Service]")); + assert!(unit.contains("[Install]")); + } + + #[test] + fn test_generate_run_unit_content() { + let config = test_config(); + let unit = generate_run_unit(&config).unwrap(); + + assert!(unit.contains("Description=Warpgate NAS cache proxy")); + assert!(unit.contains("After=network-online.target")); + assert!(unit.contains("Type=simple")); + assert!(unit.contains("ExecStart=/usr/local/bin/warpgate run")); + assert!(unit.contains("Restart=on-failure")); + assert!(unit.contains("RestartSec=10")); + assert!(unit.contains("KillMode=mixed")); + assert!(unit.contains("WantedBy=multi-user.target")); + } + + #[test] + fn test_systemd_constants() { + assert_eq!(SYSTEMD_DIR, "/etc/systemd/system"); + assert_eq!(RUN_SERVICE, "warpgate.service"); + } +} + /// Enable and start the single `warpgate.service`. pub fn enable_and_start_run() -> Result<()> { let status = Command::new("systemctl") diff --git a/warpgate/src/services/webdav.rs b/warpgate/src/services/webdav.rs index fbed3a7..8c619bb 100644 --- a/warpgate/src/services/webdav.rs +++ b/warpgate/src/services/webdav.rs @@ -22,3 +22,72 @@ pub fn build_serve_command(config: &Config) -> String { let args = build_serve_args(config); format!("/usr/bin/rclone {}", args.join(" ")) } + +#[cfg(test)] +mod tests { + use super::*; + + fn test_config() -> Config { + toml::from_str( + r#" +[connection] +nas_host = "10.0.0.1" +nas_user = "admin" +remote_path = "/photos" + +[cache] +dir = "/tmp/cache" + +[read] +[bandwidth] +[writeback] +[directory_cache] +[protocols] +[mount] +"#, + ) + .unwrap() + } + + #[test] + fn test_build_serve_args() { + let config = test_config(); + let args = build_serve_args(&config); + + assert_eq!(args[0], "serve"); + assert_eq!(args[1], "webdav"); + assert_eq!(args[2], "/mnt/nas-photos"); + assert_eq!(args[3], "--addr"); + assert_eq!(args[4], "0.0.0.0:8080"); + assert_eq!(args[5], "--read-only=false"); + } + + #[test] + fn test_build_serve_args_custom_port() { + let mut config = test_config(); + config.protocols.webdav_port = 9090; + let args = build_serve_args(&config); + + assert_eq!(args[4], "0.0.0.0:9090"); + } + + #[test] + fn test_build_serve_args_custom_mount() { + let mut config = test_config(); + config.mount.point = std::path::PathBuf::from("/mnt/media"); + let args = build_serve_args(&config); + + assert_eq!(args[2], "/mnt/media"); + } + + #[test] + fn test_build_serve_command() { + let config = test_config(); + let cmd = build_serve_command(&config); + + assert!(cmd.starts_with("/usr/bin/rclone serve webdav")); + assert!(cmd.contains("/mnt/nas-photos")); + assert!(cmd.contains("--addr")); + assert!(cmd.contains("0.0.0.0:8080")); + } +} diff --git a/warpgate/src/supervisor.rs b/warpgate/src/supervisor.rs index 3d85b37..f813dc6 100644 --- a/warpgate/src/supervisor.rs +++ b/warpgate/src/supervisor.rs @@ -458,6 +458,82 @@ fn wait_writeback_drain() { } } +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_restart_tracker_new() { + let tracker = RestartTracker::new(); + assert_eq!(tracker.count, 0); + assert!(tracker.last_restart.is_none()); + } + + #[test] + fn test_restart_tracker_record_restart() { + let mut tracker = RestartTracker::new(); + tracker.record_restart(); + assert_eq!(tracker.count, 1); + assert!(tracker.last_restart.is_some()); + } + + #[test] + fn test_restart_tracker_can_restart_under_max() { + let mut tracker = RestartTracker::new(); + assert!(tracker.can_restart()); + + tracker.record_restart(); + assert!(tracker.can_restart()); // count = 1 + + tracker.record_restart(); + assert!(tracker.can_restart()); // count = 2 + } + + #[test] + fn test_restart_tracker_cannot_restart_at_max() { + let mut tracker = RestartTracker::new(); + for _ in 0..MAX_RESTARTS { + tracker.record_restart(); + } + assert!(!tracker.can_restart()); // count = 3 = MAX_RESTARTS + } + + #[test] + fn test_restart_tracker_backoff_delay() { + let mut tracker = RestartTracker::new(); + + tracker.record_restart(); + assert_eq!(tracker.count * 2, 2); // 2s delay + + tracker.record_restart(); + assert_eq!(tracker.count * 2, 4); // 4s delay + + tracker.record_restart(); + assert_eq!(tracker.count * 2, 6); // 6s delay + } + + #[test] + fn test_restart_tracker_multiple_record() { + let mut tracker = RestartTracker::new(); + tracker.record_restart(); + tracker.record_restart(); + tracker.record_restart(); + assert_eq!(tracker.count, 3); + assert!(!tracker.can_restart()); + } + + #[test] + fn test_constants() { + assert_eq!(MOUNT_TIMEOUT, Duration::from_secs(30)); + assert_eq!(POLL_INTERVAL, Duration::from_secs(2)); + assert_eq!(SIGTERM_GRACE, Duration::from_secs(3)); + assert_eq!(MAX_RESTARTS, 3); + assert_eq!(RESTART_STABLE_PERIOD, Duration::from_secs(300)); + assert_eq!(WRITEBACK_DRAIN_TIMEOUT, Duration::from_secs(300)); + assert_eq!(WRITEBACK_POLL_INTERVAL, Duration::from_secs(2)); + } +} + /// Reverse-order teardown of all services. /// /// Order: stop smbd → unexport NFS → kill WebDAV → unmount FUSE → kill rclone.