warpgate/tests/05-cache/test-cache-full-dirty.sh
grabbit a2d49137f9 Add comprehensive test suite: 63 integration tests + 110 Rust unit tests
Integration tests (tests/):
- 9 categories covering config, lifecycle, signals, supervision,
  cache, writeback, network faults, crash recovery, and CLI
- Shell-based harness with mock NAS (network namespace + SFTP),
  fault injection (tc netem), and power loss simulation
- TAP format runner (run-all.sh) with proper SKIP detection

Rust unit tests (warpgate/src/):
- 110 tests across 14 modules, all passing in 0.01s
- Config parsing, defaults validation, RestartTracker logic,
  RC API response parsing, rclone arg generation, service
  config generation, CLI output formatting, warmup path logic

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-18 11:21:35 +08:00

78 lines
2.2 KiB
Bash
Executable File

#!/usr/bin/env bash
# Test: dirty files are preserved when cache disk is nearly full
#
# Uses a tiny loopback-mounted filesystem as the cache disk to simulate
# a cache-full scenario. Verifies that dirty files are not lost when
# the cache disk has no free space for eviction.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
source "$SCRIPT_DIR/../harness/helpers.sh"
source "$SCRIPT_DIR/../harness/mock-nas.sh"
require_root
setup_test_env
# Set up a tiny 5 MB cache disk (loopback ext4)
setup_small_cache_disk 5
trap 'teardown_small_cache_disk; teardown_test_env' EXIT
# Start mock NAS
start_mock_nas
# Generate config with small cache limit and a very long write-back
# so dirty files remain dirty throughout the test
gen_config cache_max_size=4M write_back=300s
# Start warpgate and wait for readiness
start_warpgate
wait_for_mount
wait_for_rc_api
# Sever the network so write-back cannot complete
inject_network_down
# Write several small files through the mount to fill the cache disk.
# Each file is ~256 KB, writing enough to nearly fill the 5 MB disk.
for i in $(seq 1 8); do
dd if=/dev/urandom bs=1K count=256 2>/dev/null | \
dd of="$TEST_MOUNT/fill-${i}.dat" bs=1K 2>/dev/null
sleep 0.5
done
# Allow VFS time to register all dirty writes
sleep 3
# Verify that dirty files exist and are tracked
dirty=$(get_dirty_count)
if [[ "$dirty" -lt 1 ]]; then
echo "FAIL: expected dirty count > 0, got $dirty" >&2
inject_network_up
exit 1
fi
# Verify each previously written dirty file is still accessible
for i in $(seq 1 8); do
if [[ ! -f "$TEST_MOUNT/fill-${i}.dat" ]]; then
echo "FAIL: dirty file fill-${i}.dat no longer accessible" >&2
inject_network_up
exit 1
fi
done
# Attempt a 9th write — cache disk should be full, so this should fail
# with ENOSPC or a write error.
write_failed=0
dd if=/dev/urandom bs=1K count=256 2>/dev/null | \
dd of="$TEST_MOUNT/fill-9.dat" bs=1K 2>/dev/null || write_failed=1
if [[ "$write_failed" -eq 0 ]]; then
echo "WARN: 9th write succeeded — cache disk may not be truly full" >&2
echo " (expected ENOSPC or write error when cache disk is exhausted)" >&2
fi
# Restore the network for clean teardown
inject_network_up
echo "PASS: $(basename "$0" .sh)"