diff --git a/.gitignore b/.gitignore index 015c9c0..c42ff0b 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ www/assets/ mediamtx/ onnxruntime/ capture/ + +python/ diff --git a/Cargo.toml b/Cargo.toml index 0edf7d9..4ecfdf6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ default = [ ] person_matting = ["bevy_ort", "ort", "ndarray"] -pipeline = ["image", "rayon"] +pipeline = ["image", "imageproc", "rayon"] yolo = ["bevy_ort", "ort", "ndarray"] @@ -40,13 +40,15 @@ yolo = ["bevy_ort", "ort", "ndarray"] anyhow = "1.0" async-compat = "0.2" bevy_args = "1.3" -bevy_ort = { version = "0.7.2", optional = true, features = ["yolo_v8"] } +bevy_ort = { version = "0.8", optional = true, features = ["yolo_v8"] } bytes = "1.5" clap = { version = "4.4", features = ["derive"] } futures = "0.3" -image = { version = "0.24", optional = true } +image = { version = "0.24", optional = true } # update /w `bevy` crate +imageproc = { version = "0.23.0", optional = true } # update /w `image` crate ndarray = { version = "0.15", optional = true } openh264 = "0.5" +png = "0.17.13" rayon = { version = "1.8", optional = true } serde = "1.0" serde_json = "1.0" @@ -54,7 +56,6 @@ serde_qs = "0.12" retina = "0.4" tokio = { version = "1.36", features = ["full"] } url = "2.5" -png = "0.17.13" [dependencies.bevy] diff --git a/README.md b/README.md index aeb34c9..7516391 100644 --- a/README.md +++ b/README.md @@ -98,17 +98,6 @@ view the [onshape model](https://cad.onshape.com/documents/20d4b522e97cda88fb785 ![Alt text](docs/light_field_camera_onshape_transparent.webp) -## setup rtsp streaming server - -it is useful to test the light field viewer with emulated camera streams - -### obs studio - -- install https://obsproject.com/ -- install rtsp plugin https://github.com/iamscottxu/obs-rtspserver/releases -- tools > rtsp server > start server - - ## compatible bevy versions | `bevy_light_field` | `bevy` | @@ -122,6 +111,7 @@ it is useful to test the light field viewer with emulated camera streams - [modnet](https://github.com/ZHKKKe/MODNet) - [nersemble](https://github.com/tobias-kirschstein/nersemble) - [paddle_seg_matting](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.9/Matting/docs/quick_start_en.md) +- [pose diffusion](https://github.com/facebookresearch/PoseDiffusion) - [ray diffusion](https://github.com/jasonyzhang/RayDiffusion) diff --git a/assets/modnet_photographic_portrait_matting.onnx b/assets/models/modnet_photographic_portrait_matting.onnx similarity index 100% rename from assets/modnet_photographic_portrait_matting.onnx rename to assets/models/modnet_photographic_portrait_matting.onnx diff --git a/assets/yolov8n.onnx b/assets/models/yolov8n.onnx similarity index 100% rename from assets/yolov8n.onnx rename to assets/models/yolov8n.onnx diff --git a/assets/streams.json b/assets/streams.json index 1df1a04..58599cf 100644 --- a/assets/streams.json +++ b/assets/streams.json @@ -2,19 +2,19 @@ { "uri": "rtsp://192.168.1.21/stream/main", "transport": "Udp", "visible": true }, { "uri": "rtsp://192.168.1.22/stream/main", "transport": "Udp", "visible": true, "person_detection": true }, - { "uri": "rtsp://192.168.1.23/user=admin&password=admin123&channel=1&stream=0.sdp?", "visible": true }, - { "uri": "rtsp://192.168.1.24/user=admin&password=admin123&channel=1&stream=0.sdp?" }, - { "uri": "rtsp://192.168.1.25/user=admin&password=admin123&channel=1&stream=0.sdp?" }, - { "uri": "rtsp://192.168.1.26/user=admin&password=admin123&channel=1&stream=0.sdp?" }, - { "uri": "rtsp://192.168.1.27/user=admin&password=admin123&channel=1&stream=0.sdp?" }, - { "uri": "rtsp://192.168.1.28/user=admin&password=admin123&channel=1&stream=0.sdp?" }, - { "uri": "rtsp://192.168.1.29/user=admin&password=admin123&channel=1&stream=0.sdp?" }, - { "uri": "rtsp://192.168.1.30/user=admin&password=admin123&channel=1&stream=0.sdp?" }, + { "uri": "rtsp://192.168.1.23/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": 45.0, "visible": true }, + { "uri": "rtsp://192.168.1.24/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": 55.0 }, + { "uri": "rtsp://192.168.1.25/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": 90.0 }, + { "uri": "rtsp://192.168.1.26/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": -130.0 }, + { "uri": "rtsp://192.168.1.27/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": -90.0 }, + { "uri": "rtsp://192.168.1.28/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": -135.0 }, + { "uri": "rtsp://192.168.1.29/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": 125.0 }, + { "uri": "rtsp://192.168.1.30/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": -50.0 }, - { "uri": "rtsp://192.168.1.31/user=admin&password=admin123&channel=1&stream=0.sdp?" }, - { "uri": "rtsp://192.168.1.32/user=admin&password=admin123&channel=1&stream=0.sdp?" }, - { "uri": "rtsp://192.168.1.33/user=admin&password=admin123&channel=1&stream=0.sdp?" }, - { "uri": "rtsp://192.168.1.34/user=admin&password=admin123&channel=1&stream=0.sdp?" }, - { "uri": "rtsp://192.168.1.35/user=admin&password=admin123&channel=1&stream=0.sdp?" }, + { "uri": "rtsp://192.168.1.31/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": 0.0 }, + { "uri": "rtsp://192.168.1.32/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": 0.0 }, + { "uri": "rtsp://192.168.1.33/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": 0.0 }, + { "uri": "rtsp://192.168.1.34/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": 135.0 }, + { "uri": "rtsp://192.168.1.35/user=admin&password=admin123&channel=1&stream=0.sdp?", "rotation": -45.0 }, { "uri": "rtsp://192.168.1.36/user=admin&password=admin123&channel=1&stream=0.sdp?" } ] diff --git a/src/lib.rs b/src/lib.rs index 5472eaa..8dbc475 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,10 +1,9 @@ use bevy::prelude::*; - -#[cfg(feature = "person_matting")] -pub mod matting; +use bevy_ort::BevyOrtPlugin; pub mod ffmpeg; pub mod materials; +pub mod matting; pub mod mp4; pub mod person_detect; pub mod pipeline; @@ -18,6 +17,8 @@ pub struct LightFieldPlugin { impl Plugin for LightFieldPlugin { fn build(&self, app: &mut App) { + app.add_plugins(BevyOrtPlugin); + app.add_plugins(materials::StreamMaterialsPlugin); app.add_plugins(person_detect::PersonDetectPlugin); app.add_plugins(pipeline::PipelinePlugin); diff --git a/src/matting.rs b/src/matting.rs index 613a5d5..c4aad9c 100644 --- a/src/matting.rs +++ b/src/matting.rs @@ -4,11 +4,9 @@ use bevy::{ tasks::{block_on, futures_lite::future, AsyncComputeTaskPool, Task}, }; use bevy_ort::{ - BevyOrtPlugin, - inputs, models::modnet::{ - images_to_modnet_input, - modnet_output_to_luma_images, + Modnet, + modnet_inference, }, Onnx, }; @@ -45,9 +43,7 @@ impl MattingPlugin { impl Plugin for MattingPlugin { fn build(&self, app: &mut App) { - app.add_plugins(BevyOrtPlugin); app.register_type::(); - app.init_resource::(); app.insert_resource(self.max_inference_size.clone()); app.add_systems(Startup, load_modnet); app.add_systems(Update, matting_inference); @@ -55,17 +51,12 @@ impl Plugin for MattingPlugin { } -#[derive(Resource, Default)] -pub struct Modnet { - pub onnx: Handle, -} - fn load_modnet( asset_server: Res, mut modnet: ResMut, ) { - let modnet_handle: Handle = asset_server.load("modnet_photographic_portrait_matting.onnx"); + let modnet_handle: Handle = asset_server.load("models/modnet_photographic_portrait_matting.onnx"); modnet.onnx = modnet_handle; } @@ -107,7 +98,8 @@ fn matting_inference( .map(|(_, matted_stream)| { let input = images.get(matted_stream.input.clone()).unwrap(); let output = (matted_stream.output.clone(), matted_stream.material.clone()); - (input, output) + + (input.clone(), output) }) .unzip(); @@ -116,11 +108,6 @@ fn matting_inference( return; } - let input = images_to_modnet_input( - inputs.as_slice(), - inference_size.0.into(), - ); - if onnx_assets.get(&modnet.onnx).is_none() { return; } @@ -128,18 +115,20 @@ fn matting_inference( let onnx = onnx_assets.get(&modnet.onnx).unwrap(); let session_arc = onnx.session.clone(); + let inference_size = inference_size.0.into(); + let task = thread_pool.spawn(async move { + let inputs = inputs.iter().collect::>(); + let mask_images: Result, String> = (|| { let session_lock = session_arc.lock().map_err(|e| e.to_string())?; let session = session_lock.as_ref().ok_or("failed to get session from ONNX asset")?; - let input_values = inputs!["input" => input.view()].map_err(|e| e.to_string())?; - let outputs = session.run(input_values).map_err(|e| e.to_string()); - - let binding = outputs.ok().unwrap(); - let output_value: &ort::Value = binding.get("output").unwrap(); - - Ok(modnet_output_to_luma_images(output_value)) + Ok(modnet_inference( + session, + inputs.as_slice(), + inference_size, + )) })(); match mask_images { diff --git a/src/pipeline.rs b/src/pipeline.rs index 81e1627..d5538db 100644 --- a/src/pipeline.rs +++ b/src/pipeline.rs @@ -8,42 +8,56 @@ use bevy::{ }, }; use bevy_ort::{ - inputs, models::{ modnet::{ - images_to_modnet_input, - modnet_output_to_luma_images, + modnet_inference, + Modnet, + ModnetPlugin }, yolo_v8::{ + yolo_inference, BoundingBox, - prepare_input, - process_output, + Yolo, + YoloPlugin, }, }, Onnx, }; use image::{ + DynamicImage, + GenericImageView, ImageBuffer, Luma, + Rgb, +}; +use imageproc::geometric_transformations::{ + rotate_about_center, + Interpolation, }; use png::Transformations; use rayon::prelude::*; use crate::{ ffmpeg::FfmpegArgs, - matting::Modnet, - stream::StreamId, - yolo::YoloV8, + stream::{ + StreamId, + StreamDescriptors, + }, }; pub struct PipelinePlugin; impl Plugin for PipelinePlugin { fn build(&self, app: &mut App) { + app.add_plugins(( + ModnetPlugin, + YoloPlugin, + )); app.add_systems( Update, ( generate_raw_frames, + generate_rotated_frames, generate_mask_frames, generate_yolo_frames, ) @@ -55,6 +69,7 @@ impl Plugin for PipelinePlugin { #[derive(Component, Reflect)] pub struct PipelineConfig { pub raw_frames: bool, + pub rotate_raw_frames: bool, pub yolo: bool, // https://github.com/ultralytics/ultralytics pub repair_frames: bool, // https://huggingface.co/docs/diffusers/en/optimization/onnx & https://github.com/bnm6900030/swintormer pub upsample_frames: bool, // https://huggingface.co/ssube/stable-diffusion-x4-upscaler-onnx @@ -68,6 +83,7 @@ impl Default for PipelineConfig { fn default() -> Self { Self { raw_frames: true, + rotate_raw_frames: true, yolo: true, repair_frames: false, upsample_frames: false, @@ -191,8 +207,9 @@ fn generate_raw_frames( } -fn generate_mask_frames( +fn generate_rotated_frames( mut commands: Commands, + descriptors: Res, raw_frames: Query< ( Entity, @@ -200,6 +217,71 @@ fn generate_mask_frames( &RawFrames, &Session, ), + Without, + >, +) { + // TODO: create a caching/loading system wrapper over run_node interior + for ( + entity, + config, + raw_frames, + session, + ) in raw_frames.iter() { + // TODO: get stream descriptor rotation + + if config.rotate_raw_frames { + let run_node = !RotatedFrames::exists(session); + let mut rotated_frames = RotatedFrames::load_from_session(session); + + if run_node { + let rotations: HashMap = descriptors.0.iter() + .enumerate() + .map(|(id, descriptor)| (StreamId(id), descriptor.rotation.unwrap_or_default())) + .collect(); + + info!("generating rotated frames for session {}", session.id); + + raw_frames.frames.iter() + .for_each(|(stream_id, frames)| { + let output_directory = format!("{}/{}", rotated_frames.directory, stream_id.0); + std::fs::create_dir_all(&output_directory).unwrap(); + + let frames = frames.par_iter() + .map(|frame| { + let frame_idx = std::path::Path::new(frame).file_stem().unwrap().to_str().unwrap(); + let output_path = format!("{}/{}.png", output_directory, frame_idx); + + rotate_image( + std::path::Path::new(frame), + std::path::Path::new(&output_path), + rotations[stream_id], + ).unwrap(); + + output_path + }) + .collect::>(); + + rotated_frames.frames.insert(*stream_id, frames); + }); + } else { + info!("rotated frames already exist for session {}", session.id); + } + + commands.entity(entity).insert(rotated_frames); + } + } +} + + +fn generate_mask_frames( + mut commands: Commands, + frames: Query< + ( + Entity, + &PipelineConfig, + &RotatedFrames, + &Session, + ), Without, >, modnet: Res, @@ -208,9 +290,9 @@ fn generate_mask_frames( for ( entity, config, - raw_frames, + frames, session, - ) in raw_frames.iter() { + ) in frames.iter() { if config.mask_frames { if onnx_assets.get(&modnet.onnx).is_none() { return; @@ -227,13 +309,13 @@ fn generate_mask_frames( if run_node { info!("generating mask frames for session {}", session.id); - raw_frames.frames.keys() + frames.frames.keys() .for_each(|stream_id| { let output_directory = format!("{}/{}", mask_frames.directory, stream_id.0); - std::fs::create_dir_all(&output_directory).unwrap(); + std::fs::create_dir_all(output_directory).unwrap(); }); - let mask_images = raw_frames.frames.iter() + let mask_images = frames.frames.iter() .map(|(stream_id, frames)| { let frames = frames.iter() .map(|frame| { @@ -251,8 +333,8 @@ fn generate_mask_frames( // TODO: separate image loading and onnx inference (so the image loading result can be viewed in the pipeline grid view) let image = Image::new( Extent3d { - width: width as u32, - height: height as u32, + width, + height, depth_or_array_layers: 1, }, bevy::render::render_resource::TextureDimension::D2, @@ -261,16 +343,16 @@ fn generate_mask_frames( RenderAssetUsages::all(), ); - let tensor_input = images_to_modnet_input(&[&image], None); - - let input_values = inputs!["input" => tensor_input.view()].map_err(|e| e.to_string()).unwrap(); - let outputs = onnx_session.run(input_values).map_err(|e| e.to_string()); - let binding = outputs.ok().unwrap(); - let output_value: &ort::Value = binding.get("output").unwrap(); - let frame_idx = std::path::Path::new(frame).file_stem().unwrap().to_str().unwrap(); - (frame_idx, modnet_output_to_luma_images(output_value).pop().unwrap()) + ( + frame_idx, + modnet_inference( + onnx_session, + &[&image], + Some((512, 512)), + ).pop().unwrap(), + ) }) .collect::>(); @@ -320,7 +402,7 @@ fn generate_yolo_frames( ), Without, >, - yolo_v8: Res, + yolo: Res, onnx_assets: Res>, ) { for ( @@ -330,11 +412,11 @@ fn generate_yolo_frames( session, ) in raw_frames.iter() { if config.yolo { - if onnx_assets.get(&yolo_v8.onnx).is_none() { + if onnx_assets.get(&yolo.onnx).is_none() { return; } - let onnx = onnx_assets.get(&yolo_v8.onnx).unwrap(); + let onnx = onnx_assets.get(&yolo.onnx).unwrap(); let onnx_session_arc = onnx.session.clone(); let onnx_session_lock = onnx_session_arc.lock().map_err(|e| e.to_string()).unwrap(); let onnx_session = onnx_session_lock.as_ref().ok_or("failed to get session from ONNX asset").unwrap(); @@ -348,7 +430,7 @@ fn generate_yolo_frames( raw_frames.frames.keys() .for_each(|stream_id| { let output_directory = format!("{}/{}", yolo_frames.directory, stream_id.0); - std::fs::create_dir_all(&output_directory).unwrap(); + std::fs::create_dir_all(output_directory).unwrap(); }); // TODO: support async ort inference (re. progress bars) @@ -370,8 +452,8 @@ fn generate_yolo_frames( // TODO: separate image loading and onnx inference (so the image loading result can be viewed in the pipeline grid view) let image = Image::new( Extent3d { - width: width as u32, - height: height as u32, + width, + height, depth_or_array_layers: 1, }, bevy::render::render_resource::TextureDimension::D2, @@ -380,36 +462,19 @@ fn generate_yolo_frames( RenderAssetUsages::all(), ); - let model_width = onnx_session.inputs[0].input_type.tensor_dimensions().unwrap()[2] as u32; - let model_height = onnx_session.inputs[0].input_type.tensor_dimensions().unwrap()[3] as u32; - - let tensor_input = prepare_input( - &image, - model_width, - model_height, - ); - - let input_values = inputs!["images" => tensor_input.view()].map_err(|e| e.to_string()).unwrap(); - let outputs = onnx_session.run(input_values).map_err(|e| e.to_string()); - let binding = outputs.ok().unwrap(); - let output_value: &ort::Value = binding.get("output0").unwrap(); - let frame_idx = std::path::Path::new(frame).file_stem().unwrap().to_str().unwrap(); ( frame_idx, - process_output( - output_value, - width, - height, - model_width, - model_height, + yolo_inference( + onnx_session, + &image, + 0.5, ), ) }) .collect::>(); - (stream_id, frames) }) .collect::>(); @@ -439,6 +504,62 @@ fn generate_yolo_frames( } +// TODO: alphablend frames +#[derive(Component, Default)] +pub struct AlphablendFrames { + pub frames: HashMap>, + pub directory: String, +} +impl AlphablendFrames { + pub fn load_from_session( + session: &Session, + ) -> Self { + let directory = format!("{}/alphablend", session.directory); + std::fs::create_dir_all(&directory).unwrap(); + + let mut alphablend_frames = Self { + frames: HashMap::new(), + directory, + }; + alphablend_frames.reload(); + + alphablend_frames + } + + pub fn reload(&mut self) { + std::fs::read_dir(&self.directory) + .unwrap() + .filter_map(|entry| entry.ok()) + .filter(|entry| entry.path().is_dir()) + .map(|stream_dir| { + let stream_id = StreamId(stream_dir.path().file_name().unwrap().to_str().unwrap().parse::().unwrap()); + + let frames = std::fs::read_dir(stream_dir.path()).unwrap() + .filter_map(|entry| entry.ok()) + .filter(|entry| entry.path().is_file() && entry.path().extension().and_then(|s| s.to_str()) == Some("png")) + .map(|entry| entry.path().to_str().unwrap().to_string()) + .collect::>(); + + (stream_id, frames) + }) + .for_each(|(stream_id, frames)| { + self.frames.insert(stream_id, frames); + }); + } + + pub fn exists( + session: &Session, + ) -> bool { + let output_directory = format!("{}/alphablend", session.directory); + std::fs::metadata(output_directory).is_ok() + } + + pub fn image(&self, _camera: usize, _frame: usize) -> Option { + todo!() + } +} + + // TODO: support loading maskframes -> images into a pipeline mask viewer @@ -498,7 +619,6 @@ impl RawFrames { } -// TODO: add YOLO for frame filtering and camera calibration #[derive(Component, Default)] pub struct YoloFrames { pub frames: HashMap>>, @@ -577,28 +697,52 @@ impl YoloFrames { -#[derive(Component, Default, Reflect)] -pub struct RotateFrames { - pub frames: Vec, +#[derive(Component, Default)] +pub struct RotatedFrames { + pub frames: HashMap>, + pub directory: String, } -impl RotateFrames { +impl RotatedFrames { pub fn load_from_session( session: &Session, ) -> Self { - let output_directory = format!("{}/rotated_frames", session.directory); - std::fs::create_dir_all(output_directory).unwrap(); + let directory = format!("{}/rotated_frames", session.directory); + std::fs::create_dir_all(&directory).unwrap(); - // TODO: load all files that are already in the directory + let mut raw_frames = Self { + frames: HashMap::new(), + directory, + }; + raw_frames.reload(); - Self { - frames: vec![], - } + raw_frames + } + + pub fn reload(&mut self) { + std::fs::read_dir(&self.directory) + .unwrap() + .filter_map(|entry| entry.ok()) + .filter(|entry| entry.path().is_dir()) + .map(|stream_dir| { + let stream_id = StreamId(stream_dir.path().file_name().unwrap().to_str().unwrap().parse::().unwrap()); + + let frames = std::fs::read_dir(stream_dir.path()).unwrap() + .filter_map(|entry| entry.ok()) + .filter(|entry| entry.path().is_file() && entry.path().extension().and_then(|s| s.to_str()) == Some("png")) + .map(|entry| entry.path().to_str().unwrap().to_string()) + .collect::>(); + + (stream_id, frames) + }) + .for_each(|(stream_id, frames)| { + self.frames.insert(stream_id, frames); + }); } pub fn exists( session: &Session, ) -> bool { - let output_directory = format!("{}/frames", session.directory); + let output_directory = format!("{}/rotated_frames", session.directory); std::fs::metadata(output_directory).is_ok() } @@ -691,3 +835,37 @@ fn get_next_session_id(output_directory: &str) -> usize { Err(_) => 0, } } + + +fn rotate_image( + image_path: &std::path::Path, + output_path: &std::path::Path, + angle: f32, +) -> image::ImageResult<()> { + if angle == 0.0 { + std::fs::copy(image_path, output_path)?; + return Ok(()); + } + + let dyn_img = image::open(image_path).unwrap(); + let (w, h) = dyn_img.dimensions(); + + let image_bytes = DynamicImage::into_bytes(dyn_img); + let image_buffer = ImageBuffer::, Vec>::from_vec( + w, + h, + image_bytes[..].to_vec(), + ).unwrap(); + + let radians = angle.to_radians(); + + let rotated_image: ImageBuffer::, Vec> = rotate_about_center( + &image_buffer, + radians, + Interpolation::Bilinear, + Rgb([0, 0, 0]), + ); + rotated_image.save(output_path)?; + + Ok(()) +} diff --git a/src/stream.rs b/src/stream.rs index 12fee56..9eaaffd 100644 --- a/src/stream.rs +++ b/src/stream.rs @@ -55,7 +55,7 @@ pub struct RtspStreamPlugin { impl Plugin for RtspStreamPlugin { fn build(&self, app: &mut App) { let config = std::fs::File::open(&self.stream_config).unwrap(); - let stream_uris = serde_json::from_reader::<_, StreamUris>(config).unwrap(); + let stream_uris = serde_json::from_reader::<_, StreamDescriptors>(config).unwrap(); app .insert_resource(stream_uris) @@ -69,7 +69,7 @@ impl Plugin for RtspStreamPlugin { fn create_streams( mut commands: Commands, mut images: ResMut>, - stream_uris: Res, + stream_uris: Res, ) { stream_uris.0.iter() .enumerate() @@ -154,7 +154,7 @@ pub enum StreamTransport { Udp, } -#[derive(Clone, Default, Debug, Serialize, Deserialize)] +#[derive(Component, Clone, Default, Debug, Serialize, Deserialize)] pub struct StreamDescriptor { pub uri: String, @@ -163,10 +163,12 @@ pub struct StreamDescriptor { pub visible: Option, pub person_detection: Option, + + pub rotation: Option, } #[derive(Resource, Clone, Debug, Default, Serialize, Deserialize)] -pub struct StreamUris(pub Vec); +pub struct StreamDescriptors(pub Vec); #[derive(Component, Clone)] diff --git a/src/yolo.rs b/src/yolo.rs index 3ec2653..acb4c8b 100644 --- a/src/yolo.rs +++ b/src/yolo.rs @@ -1,25 +1,22 @@ use bevy::prelude::*; -use bevy_ort::Onnx; +use bevy_ort::{ + Onnx, + models::yolo_v8::Yolo, +}; pub struct YoloPlugin; impl Plugin for YoloPlugin { fn build(&self, app: &mut App) { - app.init_resource::(); - app.add_systems(Startup, load_yolo_v8); + app.init_resource::(); + app.add_systems(Startup, load_yolo); } } - -#[derive(Resource, Default)] -pub struct YoloV8 { - pub onnx: Handle, -} - -fn load_yolo_v8( +fn load_yolo( asset_server: Res, - mut modnet: ResMut, + mut modnet: ResMut, ) { - let modnet_handle: Handle = asset_server.load("yolov8n.onnx"); + let modnet_handle: Handle = asset_server.load("models/yolov8n.onnx"); modnet.onnx = modnet_handle; } diff --git a/tools/viewer.rs b/tools/viewer.rs index f4b5258..c71ed31 100644 --- a/tools/viewer.rs +++ b/tools/viewer.rs @@ -29,6 +29,7 @@ use bevy_args::{ use bevy_light_field::{ LightFieldPlugin, materials::foreground::ForegroundMaterial, + matting::MattingPlugin, person_detect::{ DetectPersons, PersonDetectedEvent, @@ -45,11 +46,7 @@ use bevy_light_field::{ }, }; -#[cfg(feature = "person_matting")] -use bevy_light_field::matting::{ - MattedStream, - MattingPlugin, -}; +use bevy_light_field::matting::MattedStream; #[derive( @@ -120,8 +117,6 @@ fn main() { LightFieldPlugin { stream_config: args.config.clone(), }, - - #[cfg(feature = "person_matting")] MattingPlugin::new(( args.max_matting_width, args.max_matting_height, @@ -131,7 +126,6 @@ fn main() { .add_systems( Startup, ( - #[cfg(feature = "person_matting")] create_mask_streams, setup_camera, select_session_from_args, @@ -164,7 +158,6 @@ fn main() { // TODO: move to MattingPlugin -#[cfg(feature = "person_matting")] fn create_mask_streams( mut commands: Commands, mut images: ResMut>,