230 lines
8.0 KiB
Rust
230 lines
8.0 KiB
Rust
use std::sync::Arc;
|
|
use std::time::Instant;
|
|
use tokio::time::{sleep, Duration};
|
|
|
|
use crate::frame_pool::{FramePool, HierarchicalFramePool};
|
|
use crate::memory_monitor::GLOBAL_MEMORY_MONITOR;
|
|
|
|
/// Integration test for frame pool performance and zero-allocation behavior
|
|
pub async fn test_frame_pool_integration() -> anyhow::Result<()> {
|
|
println!("🧪 Testing Frame Pool Integration");
|
|
println!("================================");
|
|
|
|
// Test 1: Basic frame pool functionality
|
|
println!("\n📋 Test 1: Basic Frame Pool Functionality");
|
|
test_basic_frame_pool().await?;
|
|
|
|
// Test 2: Hierarchical frame pool
|
|
println!("\n📋 Test 2: Hierarchical Frame Pool");
|
|
test_hierarchical_frame_pool().await?;
|
|
|
|
// Test 3: Memory optimization measurement
|
|
println!("\n📋 Test 3: Memory Optimization Measurement");
|
|
test_memory_optimization().await?;
|
|
|
|
// Test 4: Performance comparison
|
|
println!("\n📋 Test 4: Performance Comparison");
|
|
test_performance_comparison().await?;
|
|
|
|
println!("\n✅ All frame pool tests passed!");
|
|
Ok(())
|
|
}
|
|
|
|
/// Test basic frame pool operations
|
|
async fn test_basic_frame_pool() -> anyhow::Result<()> {
|
|
let pool = FramePool::new(10, 1024 * 900); // 900KB frames
|
|
pool.warm_up();
|
|
|
|
// Test buffer acquisition and return
|
|
let buffers: Vec<_> = (0..5)
|
|
.map(|_| pool.acquire())
|
|
.collect();
|
|
|
|
println!(" ✓ Acquired 5 buffers from pool");
|
|
|
|
// Simulate filling buffers
|
|
for (i, mut buffer) in buffers.into_iter().enumerate() {
|
|
let test_data = format!("Frame data {}", i).into_bytes();
|
|
buffer.as_mut().extend_from_slice(&test_data);
|
|
|
|
// Buffer will be returned to pool when dropped
|
|
}
|
|
|
|
println!(" ✓ Filled buffers with test data");
|
|
|
|
// Allow some time for buffers to return to pool
|
|
sleep(Duration::from_millis(10)).await;
|
|
|
|
let stats = pool.stats();
|
|
println!(" ✓ Pool stats: {} available, {} total allocations",
|
|
stats.available_buffers, stats.total_allocations);
|
|
|
|
assert!(stats.total_allocations >= 5, "Should have recorded allocations");
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Test hierarchical frame pool with different sizes
|
|
async fn test_hierarchical_frame_pool() -> anyhow::Result<()> {
|
|
let hierarchical = HierarchicalFramePool::new(8);
|
|
|
|
// Test different size acquisitions
|
|
let small_buffer = hierarchical.acquire(32 * 1024); // 32KB
|
|
let medium_buffer = hierarchical.acquire(200 * 1024); // 200KB
|
|
let large_buffer = hierarchical.acquire(800 * 1024); // 800KB
|
|
let xl_buffer = hierarchical.acquire(1500 * 1024); // 1.5MB
|
|
|
|
println!(" ✓ Acquired buffers of sizes: 32KB, 200KB, 800KB, 1.5MB");
|
|
|
|
// Verify capacities
|
|
assert!(small_buffer.capacity() >= 32 * 1024);
|
|
assert!(medium_buffer.capacity() >= 200 * 1024);
|
|
assert!(large_buffer.capacity() >= 800 * 1024);
|
|
assert!(xl_buffer.capacity() >= 1500 * 1024);
|
|
|
|
println!(" ✓ Buffer capacities verified");
|
|
|
|
let total_memory = hierarchical.total_memory_usage();
|
|
println!(" ✓ Total pool memory usage: {} KB", total_memory / 1024);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Test memory optimization tracking
|
|
async fn test_memory_optimization() -> anyhow::Result<()> {
|
|
let frame_size = 900 * 1024; // 900KB frames
|
|
let subscriber_count = 4; // 4 components subscribing
|
|
|
|
// Record multiple frame processing events
|
|
for _ in 0..50 {
|
|
GLOBAL_MEMORY_MONITOR.record_frame_processed(frame_size, subscriber_count);
|
|
}
|
|
|
|
let stats = GLOBAL_MEMORY_MONITOR.stats();
|
|
println!(" ✓ Processed {} frames", stats.frames_processed);
|
|
println!(" ✓ Memory saved: {:.2} MB", stats.bytes_saved_total as f64 / 1_000_000.0);
|
|
println!(" ✓ Arc references created: {}", stats.arc_references_created);
|
|
|
|
// Verify memory savings calculation
|
|
let expected_savings = (subscriber_count - 1) * frame_size * 50;
|
|
assert_eq!(stats.bytes_saved_total as usize, expected_savings,
|
|
"Memory savings calculation should be correct");
|
|
|
|
println!(" ✓ Memory optimization tracking working correctly");
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Test performance comparison between pooled vs non-pooled allocation
|
|
async fn test_performance_comparison() -> anyhow::Result<()> {
|
|
let iterations = 1000;
|
|
let frame_size = 640 * 480 * 3; // RGB frame
|
|
|
|
// Test 1: Traditional Vec allocation (baseline)
|
|
let start = Instant::now();
|
|
for _ in 0..iterations {
|
|
let _vec = vec![0u8; frame_size];
|
|
// Vec is dropped here
|
|
}
|
|
let vec_duration = start.elapsed();
|
|
|
|
// Test 2: Frame pool allocation (optimized)
|
|
let pool = FramePool::new(50, frame_size);
|
|
pool.warm_up();
|
|
|
|
let start = Instant::now();
|
|
for _ in 0..iterations {
|
|
let _buffer = pool.acquire();
|
|
// Buffer is returned to pool on drop
|
|
}
|
|
let pool_duration = start.elapsed();
|
|
|
|
println!(" 📊 Performance Comparison ({} allocations):", iterations);
|
|
println!(" Traditional Vec: {:?}", vec_duration);
|
|
println!(" Frame Pool: {:?}", pool_duration);
|
|
|
|
let speedup = vec_duration.as_nanos() as f64 / pool_duration.as_nanos() as f64;
|
|
println!(" Speedup: {:.2}x faster", speedup);
|
|
|
|
// Pool should be faster (in most cases, depending on system load)
|
|
if speedup > 1.0 {
|
|
println!(" ✓ Frame pool is faster than traditional allocation");
|
|
} else {
|
|
println!(" ⚠ Frame pool performance similar to traditional allocation");
|
|
println!(" (This can happen due to system load or compiler optimizations)");
|
|
}
|
|
|
|
let pool_stats = pool.stats();
|
|
println!(" 📈 Pool Statistics:");
|
|
println!(" Cache hit rate: {:.1}%", pool_stats.cache_hit_rate * 100.0);
|
|
println!(" Avg alloc time: {} ns", pool_stats.average_allocation_time_nanos);
|
|
println!(" Total allocations: {}", pool_stats.total_allocations);
|
|
println!(" Total returns: {}", pool_stats.total_returns);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Stress test for concurrent access
|
|
pub async fn stress_test_concurrent_access() -> anyhow::Result<()> {
|
|
println!("\n🚀 Stress Test: Concurrent Frame Pool Access");
|
|
|
|
let pool = Arc::new(FramePool::new(100, 1024 * 1024)); // 1MB frames
|
|
pool.warm_up();
|
|
|
|
let pool_clone = pool.clone();
|
|
|
|
// Spawn multiple concurrent tasks
|
|
let tasks: Vec<_> = (0..10)
|
|
.map(|task_id| {
|
|
let pool = pool_clone.clone();
|
|
tokio::spawn(async move {
|
|
for i in 0..100 {
|
|
let mut buffer = pool.acquire();
|
|
|
|
// Simulate some work
|
|
let data = format!("Task {} iteration {}", task_id, i);
|
|
buffer.as_mut().extend_from_slice(data.as_bytes());
|
|
|
|
if i % 20 == 0 {
|
|
sleep(Duration::from_micros(100)).await;
|
|
}
|
|
}
|
|
|
|
println!(" ✓ Task {} completed 100 allocations", task_id);
|
|
})
|
|
})
|
|
.collect();
|
|
|
|
// Wait for all tasks to complete
|
|
for task in tasks {
|
|
task.await?;
|
|
}
|
|
|
|
let final_stats = pool.stats();
|
|
println!(" 📊 Concurrent Test Results:");
|
|
println!(" Total allocations: {}", final_stats.total_allocations);
|
|
println!(" Total returns: {}", final_stats.total_returns);
|
|
println!(" Available buffers: {}", final_stats.available_buffers);
|
|
println!(" Cache hit rate: {:.1}%", final_stats.cache_hit_rate * 100.0);
|
|
|
|
assert!(final_stats.total_allocations >= 1000, "Should have processed all allocations");
|
|
|
|
println!(" ✅ Concurrent stress test passed");
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
|
|
#[tokio::test]
|
|
async fn test_integration_suite() {
|
|
test_frame_pool_integration().await.unwrap();
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_stress_test() {
|
|
stress_test_concurrent_access().await.unwrap();
|
|
}
|
|
} |