- Add OpenCV as default camera backend, nokhwa as optional alternative - Make camera backends mutually exclusive via feature flags (opencv_camera, nokhwa_camera) - Remove deprecated hardware_camera feature, use nokhwa_camera instead - Add main thread camera initialization for macOS TCC authorization - Add pre-opened capture storage via static Mutex for async compatibility - Add pixel format conversion utilities (pixel_convert.rs) - Update all cfg guards from hardware_camera to nokhwa_camera macOS requires camera authorization requests on main thread. OpenCV's VideoCapture::new() is now called before tokio runtime starts, with the handle stored for later use by async code. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
245 lines
7.7 KiB
Rust
245 lines
7.7 KiB
Rust
//! Pixel format conversion utilities for camera capture
|
|
//!
|
|
//! This module provides efficient conversion from various camera pixel formats
|
|
//! to grayscale, which is the primary format used by the Vida detection algorithm.
|
|
|
|
/// Convert RGB888 (24-bit) to grayscale using ITU-R BT.601 coefficients
|
|
/// Y = 0.299*R + 0.587*G + 0.114*B
|
|
pub fn rgb888_to_grayscale(rgb_data: &[u8], width: usize, height: usize) -> Vec<u8> {
|
|
let expected_size = width * height * 3;
|
|
if rgb_data.len() < expected_size {
|
|
return vec![0u8; width * height];
|
|
}
|
|
|
|
let mut grayscale = Vec::with_capacity(width * height);
|
|
|
|
for pixel in rgb_data.chunks_exact(3) {
|
|
let r = pixel[0] as u32;
|
|
let g = pixel[1] as u32;
|
|
let b = pixel[2] as u32;
|
|
// BT.601 coefficients: 0.299, 0.587, 0.114
|
|
// Using fixed-point: (77*R + 150*G + 29*B) >> 8
|
|
let y = ((77 * r + 150 * g + 29 * b) >> 8) as u8;
|
|
grayscale.push(y);
|
|
}
|
|
|
|
grayscale
|
|
}
|
|
|
|
/// Convert YUYV (YUV 4:2:2 packed) to grayscale
|
|
/// YUYV format: Y0 U0 Y1 V0 (4 bytes for 2 pixels)
|
|
/// We only need the Y channel for grayscale
|
|
pub fn yuyv_to_grayscale(yuyv_data: &[u8], width: usize, height: usize) -> Vec<u8> {
|
|
let expected_size = width * height * 2;
|
|
if yuyv_data.len() < expected_size {
|
|
return vec![0u8; width * height];
|
|
}
|
|
|
|
let mut grayscale = Vec::with_capacity(width * height);
|
|
|
|
// YUYV: [Y0, U, Y1, V, Y2, U, Y3, V, ...]
|
|
// Extract Y values at positions 0, 2, 4, 6, ...
|
|
for chunk in yuyv_data.chunks_exact(4) {
|
|
grayscale.push(chunk[0]); // Y0
|
|
grayscale.push(chunk[2]); // Y1
|
|
}
|
|
|
|
grayscale
|
|
}
|
|
|
|
/// Convert NV12 (YUV 4:2:0 semi-planar) to grayscale
|
|
/// NV12 format: Y plane followed by interleaved UV plane
|
|
/// Y plane size: width * height
|
|
/// UV plane size: width * height / 2
|
|
pub fn nv12_to_grayscale(nv12_data: &[u8], width: usize, height: usize) -> Vec<u8> {
|
|
let y_size = width * height;
|
|
if nv12_data.len() < y_size {
|
|
return vec![0u8; y_size];
|
|
}
|
|
|
|
// NV12: Y plane is already grayscale, just copy it
|
|
nv12_data[..y_size].to_vec()
|
|
}
|
|
|
|
/// Convert NV21 (YUV 4:2:0 semi-planar, VU order) to grayscale
|
|
/// Same as NV12 for grayscale conversion since we only need Y plane
|
|
pub fn nv21_to_grayscale(nv21_data: &[u8], width: usize, height: usize) -> Vec<u8> {
|
|
nv12_to_grayscale(nv21_data, width, height)
|
|
}
|
|
|
|
/// Convert I420 (YUV 4:2:0 planar) to grayscale
|
|
/// I420 format: Y plane, U plane, V plane (all separate)
|
|
pub fn i420_to_grayscale(i420_data: &[u8], width: usize, height: usize) -> Vec<u8> {
|
|
nv12_to_grayscale(i420_data, width, height)
|
|
}
|
|
|
|
/// Convert BGR888 (24-bit, OpenCV format) to grayscale
|
|
pub fn bgr888_to_grayscale(bgr_data: &[u8], width: usize, height: usize) -> Vec<u8> {
|
|
let expected_size = width * height * 3;
|
|
if bgr_data.len() < expected_size {
|
|
return vec![0u8; width * height];
|
|
}
|
|
|
|
let mut grayscale = Vec::with_capacity(width * height);
|
|
|
|
for pixel in bgr_data.chunks_exact(3) {
|
|
let b = pixel[0] as u32;
|
|
let g = pixel[1] as u32;
|
|
let r = pixel[2] as u32;
|
|
let y = ((77 * r + 150 * g + 29 * b) >> 8) as u8;
|
|
grayscale.push(y);
|
|
}
|
|
|
|
grayscale
|
|
}
|
|
|
|
/// Convert RGBA8888 (32-bit with alpha) to grayscale
|
|
pub fn rgba8888_to_grayscale(rgba_data: &[u8], width: usize, height: usize) -> Vec<u8> {
|
|
let expected_size = width * height * 4;
|
|
if rgba_data.len() < expected_size {
|
|
return vec![0u8; width * height];
|
|
}
|
|
|
|
let mut grayscale = Vec::with_capacity(width * height);
|
|
|
|
for pixel in rgba_data.chunks_exact(4) {
|
|
let r = pixel[0] as u32;
|
|
let g = pixel[1] as u32;
|
|
let b = pixel[2] as u32;
|
|
// Alpha channel (pixel[3]) is ignored
|
|
let y = ((77 * r + 150 * g + 29 * b) >> 8) as u8;
|
|
grayscale.push(y);
|
|
}
|
|
|
|
grayscale
|
|
}
|
|
|
|
/// Decode MJPEG frame to grayscale
|
|
/// Note: This is a simplified implementation that requires the `image` crate
|
|
#[cfg(feature = "nokhwa_camera")]
|
|
pub fn mjpeg_to_grayscale(jpeg_data: &[u8], width: usize, height: usize) -> Vec<u8> {
|
|
use image::codecs::jpeg::JpegDecoder;
|
|
use image::{DynamicImage, ImageDecoder};
|
|
use std::io::Cursor;
|
|
|
|
let cursor = Cursor::new(jpeg_data);
|
|
match JpegDecoder::new(cursor) {
|
|
Ok(decoder) => {
|
|
let (w, h) = decoder.dimensions();
|
|
if let Ok(img) = DynamicImage::from_decoder(decoder) {
|
|
let gray = img.to_luma8();
|
|
return gray.into_raw();
|
|
}
|
|
vec![0u8; (w * h) as usize]
|
|
}
|
|
Err(_) => {
|
|
// Return black frame on decode error
|
|
vec![0u8; width * height]
|
|
}
|
|
}
|
|
}
|
|
|
|
#[cfg(not(feature = "nokhwa_camera"))]
|
|
pub fn mjpeg_to_grayscale(_jpeg_data: &[u8], width: usize, height: usize) -> Vec<u8> {
|
|
// Without image crate, return placeholder
|
|
vec![128u8; width * height]
|
|
}
|
|
|
|
/// Pixel format enum for conversion dispatch
|
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
pub enum PixelFormat {
|
|
RGB888,
|
|
BGR888,
|
|
RGBA8888,
|
|
YUYV,
|
|
NV12,
|
|
NV21,
|
|
I420,
|
|
MJPEG,
|
|
Grayscale,
|
|
}
|
|
|
|
/// Convert any supported pixel format to grayscale
|
|
pub fn to_grayscale(
|
|
data: &[u8],
|
|
width: usize,
|
|
height: usize,
|
|
format: PixelFormat,
|
|
) -> Vec<u8> {
|
|
match format {
|
|
PixelFormat::RGB888 => rgb888_to_grayscale(data, width, height),
|
|
PixelFormat::BGR888 => bgr888_to_grayscale(data, width, height),
|
|
PixelFormat::RGBA8888 => rgba8888_to_grayscale(data, width, height),
|
|
PixelFormat::YUYV => yuyv_to_grayscale(data, width, height),
|
|
PixelFormat::NV12 => nv12_to_grayscale(data, width, height),
|
|
PixelFormat::NV21 => nv21_to_grayscale(data, width, height),
|
|
PixelFormat::I420 => i420_to_grayscale(data, width, height),
|
|
PixelFormat::MJPEG => mjpeg_to_grayscale(data, width, height),
|
|
PixelFormat::Grayscale => data.to_vec(),
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
|
|
#[test]
|
|
fn test_rgb_to_grayscale() {
|
|
// White pixel (255, 255, 255) should become ~255
|
|
let rgb = vec![255u8, 255, 255];
|
|
let gray = rgb888_to_grayscale(&rgb, 1, 1);
|
|
assert_eq!(gray.len(), 1);
|
|
assert!(gray[0] >= 254); // Allow for rounding
|
|
|
|
// Black pixel (0, 0, 0) should become 0
|
|
let rgb = vec![0u8, 0, 0];
|
|
let gray = rgb888_to_grayscale(&rgb, 1, 1);
|
|
assert_eq!(gray[0], 0);
|
|
|
|
// Pure red (255, 0, 0) should become ~77
|
|
let rgb = vec![255u8, 0, 0];
|
|
let gray = rgb888_to_grayscale(&rgb, 1, 1);
|
|
assert!(gray[0] >= 75 && gray[0] <= 78);
|
|
}
|
|
|
|
#[test]
|
|
fn test_yuyv_to_grayscale() {
|
|
// YUYV: Y0=100, U=128, Y1=200, V=128
|
|
let yuyv = vec![100u8, 128, 200, 128];
|
|
let gray = yuyv_to_grayscale(&yuyv, 2, 1);
|
|
assert_eq!(gray.len(), 2);
|
|
assert_eq!(gray[0], 100);
|
|
assert_eq!(gray[1], 200);
|
|
}
|
|
|
|
#[test]
|
|
fn test_nv12_to_grayscale() {
|
|
// NV12: Y plane followed by UV
|
|
let y_data = vec![50u8, 100, 150, 200];
|
|
let uv_data = vec![128u8, 128]; // UV for 2x2 image
|
|
let mut nv12 = y_data.clone();
|
|
nv12.extend(uv_data);
|
|
|
|
let gray = nv12_to_grayscale(&nv12, 2, 2);
|
|
assert_eq!(gray, y_data);
|
|
}
|
|
|
|
#[test]
|
|
fn test_to_grayscale_dispatch() {
|
|
let rgb = vec![128u8, 128, 128];
|
|
let gray = to_grayscale(&rgb, 1, 1, PixelFormat::RGB888);
|
|
assert_eq!(gray.len(), 1);
|
|
// Mid-gray should stay mid-gray
|
|
assert!(gray[0] >= 126 && gray[0] <= 130);
|
|
}
|
|
|
|
#[test]
|
|
fn test_empty_input_handling() {
|
|
let empty: Vec<u8> = vec![];
|
|
let gray = rgb888_to_grayscale(&empty, 10, 10);
|
|
// Should return black frame of expected size
|
|
assert_eq!(gray.len(), 100);
|
|
assert!(gray.iter().all(|&v| v == 0));
|
|
}
|
|
}
|