diff --git a/crates/viewer/re_renderer/src/importer/cpu_model.rs b/crates/viewer/re_renderer/src/importer/cpu_model.rs new file mode 100644 index 000000000000..00cbca9c8985 --- /dev/null +++ b/crates/viewer/re_renderer/src/importer/cpu_model.rs @@ -0,0 +1,95 @@ +use std::sync::Arc; + +use slotmap::{SecondaryMap, SlotMap}; + +use crate::{ + mesh::{CpuMesh, GpuMesh, MeshError}, + renderer::GpuMeshInstance, + RenderContext, +}; + +slotmap::new_key_type! { + /// Key for identifying a cpu mesh in a model. + pub struct CpuModelMeshKey; +} + +/// Like [`GpuMeshInstance`], but for CPU sided usage in a [`CpuModel`] only. +pub struct CpuMeshInstance { + pub mesh: CpuModelMeshKey, + pub world_from_mesh: glam::Affine3A, + // TODO(andreas): Expose other properties we have on [`GpuMeshInstance`]. +} + +/// A collection of meshes & mesh instances on the CPU. +/// +/// Note that there is currently no `GpuModel` equivalent, since +/// [`GpuMeshInstance`]es use shared ownership of [`GpuMesh`]es. +/// +/// This is the output of a model loader and is ready to be converted into +/// a series of [`GpuMeshInstance`]s that can be rendered. +/// +/// This is meant as a useful intermediate structure for doing post-processing steps on the model prior to gpu upload. +#[derive(Default)] +pub struct CpuModel { + pub meshes: SlotMap, + pub instances: Vec, +} + +impl CpuModel { + /// Creates a new [`CpuModel`] from a single [`CpuMesh`], creating a single instance with identity transform. + pub fn from_single_mesh(mesh: CpuMesh) -> Self { + let mut model = Self::default(); + model.add_single_instance_mesh(mesh); + model + } + + /// Adds a new [`CpuMesh`] to the model, creating a single instance with identity transform. + pub fn add_single_instance_mesh(&mut self, mesh: CpuMesh) { + let mesh_key = self.meshes.insert(mesh); + self.instances.push(CpuMeshInstance { + mesh: mesh_key, + world_from_mesh: glam::Affine3A::IDENTITY, + }); + } + + pub fn calculate_bounding_box(&self) -> re_math::BoundingBox { + re_math::BoundingBox::from_points( + self.instances + .iter() + .filter_map(|mesh_instance| { + self.meshes.get(mesh_instance.mesh).map(|mesh| { + mesh.vertex_positions + .iter() + .map(|p| mesh_instance.world_from_mesh.transform_point3(*p)) + }) + }) + .flatten(), + ) + } + + /// Converts the entire model into a serious of mesh instances that can be rendered. + /// + /// Silently ignores: + /// * instances with invalid mesh keys + /// * unreferenced meshes + pub fn into_gpu_meshes(self, ctx: &RenderContext) -> Result, MeshError> { + let mut gpu_meshes = SecondaryMap::with_capacity(self.meshes.len()); + for (mesh_key, mesh) in &self.meshes { + gpu_meshes.insert(mesh_key, Arc::new(GpuMesh::new(ctx, mesh)?)); + } + + Ok(self + .instances + .into_iter() + .filter_map(|instance| { + Some(GpuMeshInstance { + gpu_mesh: gpu_meshes.get(instance.mesh)?.clone(), + world_from_mesh: instance.world_from_mesh, + additive_tint: Default::default(), + outline_mask_ids: Default::default(), + picking_layer_id: Default::default(), + }) + }) + .collect()) + } +} diff --git a/crates/viewer/re_renderer/src/importer/gltf.rs b/crates/viewer/re_renderer/src/importer/gltf.rs index c4bcd41b4bc2..741efd200456 100644 --- a/crates/viewer/re_renderer/src/importer/gltf.rs +++ b/crates/viewer/re_renderer/src/importer/gltf.rs @@ -1,15 +1,12 @@ -use std::sync::Arc; - use ahash::{HashMap, HashMapExt}; use gltf::texture::WrappingMode; use itertools::Itertools; use smallvec::SmallVec; use crate::{ - mesh::{GpuMesh, Material, Mesh, MeshError}, - renderer::MeshInstance, + mesh::{CpuMesh, Material, MeshError}, resource_managers::{GpuTexture2D, ImageDataDesc, TextureManager2D}, - RenderContext, Rgba32Unmul, + CpuMeshInstance, CpuModel, CpuModelMeshKey, RenderContext, Rgba32Unmul, }; #[derive(thiserror::Error, Debug)] @@ -41,7 +38,7 @@ pub fn load_gltf_from_buffer( mesh_name: &str, buffer: &[u8], ctx: &RenderContext, -) -> Result, GltfImportError> { +) -> Result { re_tracing::profile_function!(); let (doc, buffers, images) = { @@ -105,25 +102,28 @@ pub fn load_gltf_from_buffer( }); } - let mut meshes = HashMap::with_capacity(doc.meshes().len()); + let mut re_model = CpuModel::default(); + let mut mesh_keys = HashMap::with_capacity(doc.meshes().len()); for ref mesh in doc.meshes() { re_tracing::profile_scope!("mesh"); let re_mesh = import_mesh(mesh, &buffers, &images_as_textures, &ctx.texture_manager_2d)?; - meshes.insert( - mesh.index(), - (Arc::new(GpuMesh::new(ctx, &re_mesh)?), Arc::new(re_mesh)), - ); + let re_mesh_key = re_model.meshes.insert(re_mesh); + mesh_keys.insert(mesh.index(), re_mesh_key); } - let mut instances = Vec::new(); for scene in doc.scenes() { for node in scene.nodes() { - gather_instances_recursive(&mut instances, &node, &glam::Affine3A::IDENTITY, &meshes); + gather_instances_recursive( + &mut re_model.instances, + &node, + &glam::Affine3A::IDENTITY, + &mesh_keys, + ); } } - Ok(instances) + Ok(re_model) } fn map_format(format: gltf::image::Format) -> Option { @@ -154,7 +154,7 @@ fn import_mesh( buffers: &[gltf::buffer::Data], gpu_image_handles: &[GpuTexture2D], texture_manager: &TextureManager2D, //imported_materials: HashMap, -) -> Result { +) -> Result { re_tracing::profile_function!(); let mesh_name = mesh.name().map_or(", + instances: &mut Vec, node: &gltf::Node<'_>, transform: &glam::Affine3A, - meshes: &HashMap, Arc)>, + meshes: &HashMap, ) { let (scale, rotation, translation) = match node.transform() { gltf::scene::Transform::Matrix { matrix } => { @@ -324,11 +324,11 @@ fn gather_instances_recursive( } if let Some(mesh) = node.mesh() { - if let Some((gpu_mesh, mesh)) = meshes.get(&mesh.index()) { - let mut gpu_mesh = - MeshInstance::new_with_cpu_mesh(gpu_mesh.clone(), Some(mesh.clone())); - gpu_mesh.world_from_mesh = transform; - instances.push(gpu_mesh); + if let Some(mesh_key) = meshes.get(&mesh.index()) { + instances.push(CpuMeshInstance { + mesh: *mesh_key, + world_from_mesh: transform, + }); } } } diff --git a/crates/viewer/re_renderer/src/importer/mod.rs b/crates/viewer/re_renderer/src/importer/mod.rs index 7b5c42c313f6..2e4c3132b3df 100644 --- a/crates/viewer/re_renderer/src/importer/mod.rs +++ b/crates/viewer/re_renderer/src/importer/mod.rs @@ -1,3 +1,5 @@ +mod cpu_model; + #[cfg(feature = "import-obj")] pub mod obj; @@ -7,31 +9,4 @@ pub mod gltf; #[cfg(feature = "import-stl")] pub mod stl; -use re_math::Vec3Ext as _; - -use crate::renderer::MeshInstance; - -pub fn to_uniform_scale(scale: glam::Vec3) -> f32 { - if scale.has_equal_components(0.001) { - scale.x - } else { - let uniform_scale = (scale.x * scale.y * scale.z).cbrt(); - re_log::warn!("mesh has non-uniform scale ({:?}). This is currently not supported. Using geometric mean {}", scale,uniform_scale); - uniform_scale - } -} - -pub fn calculate_bounding_box(instances: &[MeshInstance]) -> re_math::BoundingBox { - re_math::BoundingBox::from_points( - instances - .iter() - .filter_map(|mesh_instance| { - mesh_instance.mesh.as_ref().map(|mesh| { - mesh.vertex_positions - .iter() - .map(|p| mesh_instance.world_from_mesh.transform_point3(*p)) - }) - }) - .flatten(), - ) -} +pub use cpu_model::{CpuMeshInstance, CpuModel, CpuModelMeshKey}; diff --git a/crates/viewer/re_renderer/src/importer/obj.rs b/crates/viewer/re_renderer/src/importer/obj.rs index 88414dd16782..c4b3cba39543 100644 --- a/crates/viewer/re_renderer/src/importer/obj.rs +++ b/crates/viewer/re_renderer/src/importer/obj.rs @@ -1,11 +1,8 @@ -use std::sync::Arc; - use smallvec::smallvec; use crate::{ - mesh::{GpuMesh, Material, Mesh, MeshError}, - renderer::MeshInstance, - RenderContext, Rgba32Unmul, + mesh::{CpuMesh, Material, MeshError}, + CpuModel, RenderContext, Rgba32Unmul, }; #[derive(thiserror::Error, Debug)] @@ -22,7 +19,7 @@ pub enum ObjImportError { pub fn load_obj_from_buffer( buffer: &[u8], ctx: &RenderContext, -) -> Result, ObjImportError> { +) -> Result { re_tracing::profile_function!(); let (models, _materials) = tobj::load_obj_buf( @@ -36,77 +33,73 @@ pub fn load_obj_from_buffer( )?; // TODO(andreas) Merge all obj meshes into a single re_renderer mesh with multiple materials. - models - .into_iter() - .map(|model| { - // This could be optimized by using bytemuck. - - let mesh = model.mesh; - let vertex_positions: Vec = mesh - .positions - .chunks_exact(3) - .map(|p| glam::vec3(p[0], p[1], p[2])) - .collect(); - - let triangle_indices = mesh - .indices - .chunks_exact(3) - .map(|p| glam::uvec3(p[0], p[1], p[2])) - .collect(); - - let mut vertex_colors: Vec = mesh - .vertex_color - .chunks_exact(3) - .map(|c| { - Rgba32Unmul::from_rgb( - // It is not specified if the color is in linear or gamma space, but gamma seems a safe bet. - (c[0] * 255.0).round() as u8, - (c[1] * 255.0).round() as u8, - (c[2] * 255.0).round() as u8, - ) - }) - .collect(); - vertex_colors.resize(vertex_positions.len(), Rgba32Unmul::WHITE); - - let mut vertex_normals: Vec = mesh - .normals - .chunks_exact(3) - .map(|n| glam::vec3(n[0], n[1], n[2])) - .collect(); - vertex_normals.resize(vertex_positions.len(), glam::Vec3::ZERO); - - let mut vertex_texcoords: Vec = mesh - .texcoords - .chunks_exact(2) - .map(|t| glam::vec2(t[0], t[1])) - .collect(); - vertex_texcoords.resize(vertex_positions.len(), glam::Vec2::ZERO); - - let texture = ctx.texture_manager_2d.white_texture_unorm_handle(); - - let mesh = Mesh { - label: model.name.into(), - triangle_indices, - vertex_positions, - vertex_colors, - vertex_normals, - vertex_texcoords, - - // TODO(andreas): proper material loading - materials: smallvec![Material { - label: "default material".into(), - index_range: 0..mesh.indices.len() as u32, - albedo: texture.clone(), - albedo_factor: crate::Rgba::WHITE, - }], - }; - - mesh.sanity_check()?; - - Ok(MeshInstance::new_with_cpu_mesh( - Arc::new(GpuMesh::new(ctx, &mesh)?), - Some(Arc::new(mesh)), - )) - }) - .collect() + let mut model = CpuModel::default(); + for obj_model in models { + // This could be optimized by using bytemuck. + + let mesh = obj_model.mesh; + let vertex_positions: Vec = mesh + .positions + .chunks_exact(3) + .map(|p| glam::vec3(p[0], p[1], p[2])) + .collect(); + + let triangle_indices = mesh + .indices + .chunks_exact(3) + .map(|p| glam::uvec3(p[0], p[1], p[2])) + .collect(); + + let mut vertex_colors: Vec = mesh + .vertex_color + .chunks_exact(3) + .map(|c| { + Rgba32Unmul::from_rgb( + // It is not specified if the color is in linear or gamma space, but gamma seems a safe bet. + (c[0] * 255.0).round() as u8, + (c[1] * 255.0).round() as u8, + (c[2] * 255.0).round() as u8, + ) + }) + .collect(); + vertex_colors.resize(vertex_positions.len(), Rgba32Unmul::WHITE); + + let mut vertex_normals: Vec = mesh + .normals + .chunks_exact(3) + .map(|n| glam::vec3(n[0], n[1], n[2])) + .collect(); + vertex_normals.resize(vertex_positions.len(), glam::Vec3::ZERO); + + let mut vertex_texcoords: Vec = mesh + .texcoords + .chunks_exact(2) + .map(|t| glam::vec2(t[0], t[1])) + .collect(); + vertex_texcoords.resize(vertex_positions.len(), glam::Vec2::ZERO); + + let texture = ctx.texture_manager_2d.white_texture_unorm_handle(); + + let mesh = CpuMesh { + label: obj_model.name.into(), + triangle_indices, + vertex_positions, + vertex_colors, + vertex_normals, + vertex_texcoords, + + // TODO(andreas): proper material loading + materials: smallvec![Material { + label: "default material".into(), + index_range: 0..mesh.indices.len() as u32, + albedo: texture.clone(), + albedo_factor: crate::Rgba::WHITE, + }], + }; + + mesh.sanity_check()?; + model.add_single_instance_mesh(mesh); + } + + Ok(model) } diff --git a/crates/viewer/re_renderer/src/importer/stl.rs b/crates/viewer/re_renderer/src/importer/stl.rs index a78994aef386..51cd42c4f669 100644 --- a/crates/viewer/re_renderer/src/importer/stl.rs +++ b/crates/viewer/re_renderer/src/importer/stl.rs @@ -1,14 +1,8 @@ -use std::sync::Arc; - use itertools::Itertools; use smallvec::smallvec; use tinystl::StlData; -use crate::{ - mesh::{self, GpuMesh}, - renderer::MeshInstance, - RenderContext, -}; +use crate::{mesh, CpuModel, RenderContext}; #[derive(thiserror::Error, Debug)] pub enum StlImportError { @@ -23,7 +17,7 @@ pub enum StlImportError { pub fn load_stl_from_buffer( buffer: &[u8], ctx: &RenderContext, -) -> Result, StlImportError> { +) -> Result { re_tracing::profile_function!(); let cursor = std::io::Cursor::new(buffer); @@ -43,7 +37,7 @@ pub fn load_stl_from_buffer( albedo_factor: crate::Rgba::WHITE, }; - let mesh = mesh::Mesh { + let mesh = mesh::CpuMesh { label: name.into(), triangle_indices: (0..num_vertices as u32) .tuples::<(_, _, _)>() @@ -70,8 +64,5 @@ pub fn load_stl_from_buffer( mesh.sanity_check()?; - Ok(vec![MeshInstance::new_with_cpu_mesh( - Arc::new(GpuMesh::new(ctx, &mesh)?), - Some(Arc::new(mesh)), - )]) + Ok(CpuModel::from_single_mesh(mesh)) } diff --git a/crates/viewer/re_renderer/src/lib.rs b/crates/viewer/re_renderer/src/lib.rs index 371ffcfecd13..72fdade72dd1 100644 --- a/crates/viewer/re_renderer/src/lib.rs +++ b/crates/viewer/re_renderer/src/lib.rs @@ -58,6 +58,7 @@ pub use colormap::{ pub use context::{adapter_info_summary, RenderContext, RenderContextError}; pub use debug_label::DebugLabel; pub use depth_offset::DepthOffset; +pub use importer::{CpuMeshInstance, CpuModel, CpuModelMeshKey}; pub use line_drawable_builder::{LineDrawableBuilder, LineStripBuilder}; pub use point_cloud_builder::{PointCloudBatchBuilder, PointCloudBuilder}; pub use queueable_draw_data::QueueableDrawData; diff --git a/crates/viewer/re_renderer/src/mesh.rs b/crates/viewer/re_renderer/src/mesh.rs index e6e2589a4940..fb61b7d8ca3c 100644 --- a/crates/viewer/re_renderer/src/mesh.rs +++ b/crates/viewer/re_renderer/src/mesh.rs @@ -45,7 +45,7 @@ pub mod mesh_vertices { } #[derive(Clone)] -pub struct Mesh { +pub struct CpuMesh { pub label: DebugLabel, /// Non-empty array of vertex triangle indices. @@ -70,7 +70,7 @@ pub struct Mesh { pub materials: SmallVec<[Material; 1]>, } -impl Mesh { +impl CpuMesh { pub fn sanity_check(&self) -> Result<(), MeshError> { re_tracing::profile_function!(); @@ -157,7 +157,7 @@ pub enum MeshError { pub struct Material { pub label: DebugLabel, - /// Index range within the owning [`Mesh`] that should be rendered with this material. + /// Index range within the owning [`CpuMesh`] that should be rendered with this material. pub index_range: Range, /// Base color texture, also known as albedo. @@ -190,7 +190,7 @@ pub struct GpuMesh { #[derive(Clone)] pub struct GpuMaterial { - /// Index range within the owning [`Mesh`] that should be rendered with this material. + /// Index range within the owning [`CpuMesh`] that should be rendered with this material. pub index_range: Range, pub bind_group: GpuBindGroup, @@ -210,7 +210,7 @@ pub(crate) mod gpu_data { impl GpuMesh { // TODO(andreas): Take read-only context here and make uploads happen on staging belt. - pub fn new(ctx: &RenderContext, data: &Mesh) -> Result { + pub fn new(ctx: &RenderContext, data: &CpuMesh) -> Result { re_tracing::profile_function!(); data.sanity_check()?; diff --git a/crates/viewer/re_renderer/src/renderer/mesh_renderer.rs b/crates/viewer/re_renderer/src/renderer/mesh_renderer.rs index 9b8be32f863a..1162c3890361 100644 --- a/crates/viewer/re_renderer/src/renderer/mesh_renderer.rs +++ b/crates/viewer/re_renderer/src/renderer/mesh_renderer.rs @@ -11,7 +11,7 @@ use smallvec::smallvec; use crate::{ draw_phases::{DrawPhase, OutlineMaskProcessor}, include_shader_module, - mesh::{gpu_data::MaterialUniformBuffer, mesh_vertices, GpuMesh, Mesh}, + mesh::{gpu_data::MaterialUniformBuffer, mesh_vertices, GpuMesh}, view_builder::ViewBuilder, wgpu_resources::{ BindGroupLayoutDesc, BufferDesc, GpuBindGroupLayoutHandle, GpuBuffer, @@ -110,13 +110,10 @@ impl DrawData for MeshDrawData { type Renderer = MeshRenderer; } -pub struct MeshInstance { +pub struct GpuMeshInstance { /// Gpu mesh used by this instance pub gpu_mesh: Arc, - /// Optional cpu representation of the mesh, not needed for rendering. - pub mesh: Option>, - /// Where this instance is placed in world space and how its oriented & scaled. pub world_from_mesh: glam::Affine3A, @@ -131,17 +128,11 @@ pub struct MeshInstance { pub picking_layer_id: PickingLayerId, } -impl MeshInstance { +impl GpuMeshInstance { /// Creates a new instance of a mesh with all fields set to default except for required ones. pub fn new(gpu_mesh: Arc) -> Self { - Self::new_with_cpu_mesh(gpu_mesh, None) - } - - /// Creates a new instance of a mesh with gpu and optional cpu mesh. - pub fn new_with_cpu_mesh(gpu_mesh: Arc, cpu_mesh: Option>) -> Self { Self { gpu_mesh, - mesh: cpu_mesh, world_from_mesh: glam::Affine3A::IDENTITY, additive_tint: Color32::TRANSPARENT, outline_mask_ids: OutlineMaskPreference::NONE, @@ -158,7 +149,7 @@ impl MeshDrawData { /// Mesh data itself is gpu uploaded if not already present. pub fn new( ctx: &RenderContext, - instances: &[MeshInstance], + instances: &[GpuMeshInstance], ) -> Result { re_tracing::profile_function!(); diff --git a/crates/viewer/re_renderer/src/renderer/mod.rs b/crates/viewer/re_renderer/src/renderer/mod.rs index f29c19da86e3..8bb4421a6708 100644 --- a/crates/viewer/re_renderer/src/renderer/mod.rs +++ b/crates/viewer/re_renderer/src/renderer/mod.rs @@ -23,7 +23,7 @@ pub use rectangles::{ mod mesh_renderer; pub(crate) use mesh_renderer::MeshRenderer; -pub use mesh_renderer::{MeshDrawData, MeshInstance}; +pub use mesh_renderer::{GpuMeshInstance, MeshDrawData}; mod compositor; pub(crate) use compositor::CompositorDrawData; diff --git a/crates/viewer/re_renderer_examples/framework.rs b/crates/viewer/re_renderer_examples/framework.rs index a034242c9143..751565ad619c 100644 --- a/crates/viewer/re_renderer_examples/framework.rs +++ b/crates/viewer/re_renderer_examples/framework.rs @@ -326,13 +326,18 @@ impl Application { } #[allow(dead_code)] -pub fn load_rerun_mesh(re_ctx: &RenderContext) -> Vec { +pub fn load_rerun_mesh( + re_ctx: &RenderContext, +) -> anyhow::Result> { let reader = std::io::Cursor::new(include_bytes!("../../../tests/assets/rerun.obj.zip")); - let mut zip = zip::ZipArchive::new(reader).unwrap(); - let mut zipped_obj = zip.by_name("rerun.obj").unwrap(); + let mut zip = zip::ZipArchive::new(reader)?; + let mut zipped_obj = zip.by_name("rerun.obj")?; let mut obj_data = Vec::new(); - std::io::Read::read_to_end(&mut zipped_obj, &mut obj_data).unwrap(); - re_renderer::importer::obj::load_obj_from_buffer(&obj_data, re_ctx).unwrap() + std::io::Read::read_to_end(&mut zipped_obj, &mut obj_data)?; + Ok( + re_renderer::importer::obj::load_obj_from_buffer(&obj_data, re_ctx)? + .into_gpu_meshes(re_ctx)?, + ) } struct WrapApp { diff --git a/crates/viewer/re_renderer_examples/multiview.rs b/crates/viewer/re_renderer_examples/multiview.rs index bbc3a06380c9..af74feb01fc0 100644 --- a/crates/viewer/re_renderer_examples/multiview.rs +++ b/crates/viewer/re_renderer_examples/multiview.rs @@ -13,7 +13,7 @@ use re_math::IsoTransform; use re_renderer::{ renderer::{ - GenericSkyboxDrawData, LineDrawData, LineStripFlags, MeshDrawData, MeshInstance, + GenericSkyboxDrawData, GpuMeshInstance, LineDrawData, LineStripFlags, MeshDrawData, TestTriangleDrawData, }, view_builder::{OrthographicCameraMode, Projection, TargetConfiguration, ViewBuilder}, @@ -26,7 +26,7 @@ mod framework; fn build_mesh_instances( re_ctx: &RenderContext, - model_mesh_instances: &[MeshInstance], + model_mesh_instances: &[GpuMeshInstance], mesh_instance_positions_and_colors: &[(glam::Vec3, Color32)], seconds_since_startup: f32, ) -> MeshDrawData { @@ -35,9 +35,8 @@ fn build_mesh_instances( .enumerate() .flat_map(|(i, positions_and_colors)| { model_mesh_instances.iter().zip(positions_and_colors).map( - move |(model_mesh_instances, (p, c))| MeshInstance { + move |(model_mesh_instances, (p, c))| GpuMeshInstance { gpu_mesh: model_mesh_instances.gpu_mesh.clone(), - mesh: None, world_from_mesh: glam::Affine3A::from_scale_rotation_translation( glam::vec3( 2.5 + (i % 3) as f32, @@ -157,7 +156,7 @@ struct Multiview { camera_control: CameraControl, camera_position: glam::Vec3, - model_mesh_instances: Vec, + model_mesh_instances: Vec, mesh_instance_positions_and_colors: Vec<(glam::Vec3, Color32)>, // Want to have a large cloud of random points, but doing rng for all of them every frame is too slow @@ -277,7 +276,8 @@ impl Example for Multiview { .map(|_| random_color(&mut rnd)) .collect_vec(); - let model_mesh_instances = crate::framework::load_rerun_mesh(re_ctx); + let model_mesh_instances = + crate::framework::load_rerun_mesh(re_ctx).expect("Failed to load rerun mesh"); let mesh_instance_positions_and_colors = lorenz_points(10.0) .iter() diff --git a/crates/viewer/re_renderer_examples/outlines.rs b/crates/viewer/re_renderer_examples/outlines.rs index 9b3272f9e091..9df0fab6fbbf 100644 --- a/crates/viewer/re_renderer_examples/outlines.rs +++ b/crates/viewer/re_renderer_examples/outlines.rs @@ -5,7 +5,7 @@ use itertools::Itertools; use re_renderer::{ - renderer::MeshInstance, + renderer::GpuMeshInstance, view_builder::{Projection, TargetConfiguration, ViewBuilder}, Color32, OutlineConfig, OutlineMaskPreference, }; @@ -16,7 +16,7 @@ mod framework; struct Outlines { is_paused: bool, seconds_since_startup: f32, - model_mesh_instances: Vec, + model_mesh_instances: Vec, } struct MeshProperties { @@ -34,7 +34,8 @@ impl framework::Example for Outlines { Self { is_paused: false, seconds_since_startup: 0.0, - model_mesh_instances: crate::framework::load_rerun_mesh(re_ctx), + model_mesh_instances: crate::framework::load_rerun_mesh(re_ctx) + .expect("Failed to load rerun mesh"), } } @@ -110,9 +111,8 @@ impl framework::Example for Outlines { .flat_map(|props| { self.model_mesh_instances .iter() - .map(move |instance| MeshInstance { + .map(move |instance| GpuMeshInstance { gpu_mesh: instance.gpu_mesh.clone(), - mesh: None, world_from_mesh: glam::Affine3A::from_rotation_translation( props.rotation, props.position, diff --git a/crates/viewer/re_renderer_examples/picking.rs b/crates/viewer/re_renderer_examples/picking.rs index 45b654b38de2..af5ab17de139 100644 --- a/crates/viewer/re_renderer_examples/picking.rs +++ b/crates/viewer/re_renderer_examples/picking.rs @@ -6,7 +6,7 @@ use itertools::Itertools as _; use rand::Rng; use re_renderer::{ - renderer::MeshInstance, + renderer::GpuMeshInstance, view_builder::{Projection, TargetConfiguration, ViewBuilder}, Color32, GpuReadbackIdentifier, PickingLayerId, PickingLayerInstanceId, PickingLayerProcessor, PointCloudBuilder, RectInt, Size, @@ -24,7 +24,7 @@ struct PointSet { struct Picking { point_sets: Vec, picking_position: glam::UVec2, - model_mesh_instances: Vec, + model_mesh_instances: Vec, mesh_is_hovered: bool, } @@ -86,7 +86,8 @@ impl framework::Example for Picking { }) .collect_vec(); - let model_mesh_instances = crate::framework::load_rerun_mesh(re_ctx); + let model_mesh_instances = + crate::framework::load_rerun_mesh(re_ctx).expect("Failed to load rerun mesh"); Self { point_sets, @@ -176,9 +177,8 @@ impl framework::Example for Picking { let instances = self .model_mesh_instances .iter() - .map(|instance| MeshInstance { + .map(|instance| GpuMeshInstance { gpu_mesh: instance.gpu_mesh.clone(), - mesh: None, world_from_mesh: glam::Affine3A::from_translation(glam::vec3(0.0, 0.0, 0.0)), picking_layer_id: MESH_ID, additive_tint: if self.mesh_is_hovered { diff --git a/crates/viewer/re_space_view_spatial/src/mesh_loader.rs b/crates/viewer/re_space_view_spatial/src/mesh_loader.rs index 1482486d258b..39ac2757ca87 100644 --- a/crates/viewer/re_space_view_spatial/src/mesh_loader.rs +++ b/crates/viewer/re_space_view_spatial/src/mesh_loader.rs @@ -14,7 +14,7 @@ pub struct LoadedMesh { // TODO(andreas): We should only have MeshHandles here (which are generated by the MeshManager!) // Can't do that right now because it's too hard to pass the render context through. - pub mesh_instances: Vec, + pub mesh_instances: Vec, bbox: re_math::BoundingBox, } @@ -42,7 +42,7 @@ impl LoadedMesh { ) -> anyhow::Result { re_tracing::profile_function!(); - let mesh_instances = match media_type.as_str() { + let cpu_model = match media_type.as_str() { MediaType::GLTF | MediaType::GLB => { re_renderer::importer::gltf::load_gltf_from_buffer(&name, bytes, render_ctx)? } @@ -51,7 +51,8 @@ impl LoadedMesh { _ => anyhow::bail!("{media_type} files are not supported"), }; - let bbox = re_renderer::importer::calculate_bounding_box(&mesh_instances); + let bbox = cpu_model.calculate_bounding_box(); + let mesh_instances = cpu_model.into_gpu_meshes(render_ctx)?; Ok(Self { name, @@ -157,7 +158,7 @@ impl LoadedMesh { .clone() }); - let mesh = re_renderer::mesh::Mesh { + let mesh = re_renderer::mesh::CpuMesh { label: name.clone().into(), triangle_indices, vertex_positions: vertex_positions.into(), @@ -172,7 +173,7 @@ impl LoadedMesh { }], }; - let mesh_instances = vec![re_renderer::renderer::MeshInstance::new( + let mesh_instances = vec![re_renderer::renderer::GpuMeshInstance::new( std::sync::Arc::new(GpuMesh::new(render_ctx, &mesh)?), )]; diff --git a/crates/viewer/re_space_view_spatial/src/proc_mesh.rs b/crates/viewer/re_space_view_spatial/src/proc_mesh.rs index f90ada8f6c1f..ba4ca7baa5ae 100644 --- a/crates/viewer/re_space_view_spatial/src/proc_mesh.rs +++ b/crates/viewer/re_space_view_spatial/src/proc_mesh.rs @@ -265,7 +265,7 @@ impl Cache for SolidCache { fn generate_solid(key: &ProcMeshKey, render_ctx: &RenderContext) -> Result { re_tracing::profile_function!(); - let mesh: mesh::Mesh = match *key { + let mesh: mesh::CpuMesh = match *key { ProcMeshKey::Cube => { let mut mg = re_math::MeshGen::new(); mg.push_cube(Vec3::splat(0.5), re_math::IsoTransform::IDENTITY); @@ -280,7 +280,7 @@ fn generate_solid(key: &ProcMeshKey, render_ctx: &RenderContext) -> Result Result, render_ctx: &RenderContext, - instances: &mut Vec, + instances: &mut Vec, ent_context: &SpatialSceneEntityContext<'_>, data: impl Iterator, ) { @@ -88,9 +88,8 @@ impl Asset3DVisualizer { let pose_from_mesh = mesh_instance.world_from_mesh; let world_from_mesh = world_from_pose * pose_from_mesh; - MeshInstance { + GpuMeshInstance { gpu_mesh: mesh_instance.gpu_mesh.clone(), - mesh: None, world_from_mesh, outline_mask_ids, picking_layer_id: picking_layer_id_from_instance_path_hash( diff --git a/crates/viewer/re_space_view_spatial/src/visualizers/meshes.rs b/crates/viewer/re_space_view_spatial/src/visualizers/meshes.rs index 4b1e4fc0c4ad..2246521c4849 100644 --- a/crates/viewer/re_space_view_spatial/src/visualizers/meshes.rs +++ b/crates/viewer/re_space_view_spatial/src/visualizers/meshes.rs @@ -1,6 +1,6 @@ use re_chunk_store::RowId; use re_log_types::{hash::Hash64, Instance, TimeInt}; -use re_renderer::{renderer::MeshInstance, RenderContext}; +use re_renderer::{renderer::GpuMeshInstance, RenderContext}; use re_types::{ archetypes::Mesh3D, components::{ @@ -63,7 +63,7 @@ impl Mesh3DVisualizer { &mut self, ctx: &QueryContext<'_>, render_ctx: &RenderContext, - instances: &mut Vec, + instances: &mut Vec, ent_context: &SpatialSceneEntityContext<'_>, data: impl Iterator>, ) { @@ -126,9 +126,8 @@ impl Mesh3DVisualizer { let entity_from_mesh = mesh_instance.world_from_mesh; let world_from_mesh = world_from_instance * entity_from_mesh; - MeshInstance { + GpuMeshInstance { gpu_mesh: mesh_instance.gpu_mesh.clone(), - mesh: None, world_from_mesh, outline_mask_ids, picking_layer_id: picking_layer_id_from_instance_path_hash( diff --git a/crates/viewer/re_space_view_spatial/src/visualizers/utilities/proc_mesh_vis.rs b/crates/viewer/re_space_view_spatial/src/visualizers/utilities/proc_mesh_vis.rs index b264358e7f3e..5ca2a280cd4a 100644 --- a/crates/viewer/re_space_view_spatial/src/visualizers/utilities/proc_mesh_vis.rs +++ b/crates/viewer/re_space_view_spatial/src/visualizers/utilities/proc_mesh_vis.rs @@ -1,6 +1,6 @@ use re_entity_db::InstancePathHash; use re_log_types::Instance; -use re_renderer::renderer::MeshInstance; +use re_renderer::renderer::GpuMeshInstance; use re_renderer::{LineDrawableBuilder, PickingLayerInstanceId, RenderContext}; use re_types::components::{self, FillMode}; use re_viewer_context::{ @@ -34,7 +34,7 @@ pub struct ProcMeshDrawableBuilder<'ctx, Fb> { pub line_batch_debug_label: re_renderer::DebugLabel, /// Accumulates triangle mesh instances to render. - pub solid_instances: Vec, + pub solid_instances: Vec, pub query: &'ctx ViewQuery<'ctx>, pub render_ctx: &'ctx RenderContext, @@ -221,9 +221,8 @@ where )); }; - self.solid_instances.push(MeshInstance { + self.solid_instances.push(GpuMeshInstance { gpu_mesh: solid_mesh.gpu_mesh, - mesh: None, world_from_mesh: world_from_instance, outline_mask_ids: ent_context.highlight.index_outline_mask(instance), picking_layer_id: picking_layer_id_from_instance_path_hash( diff --git a/docs/snippets/all/archetypes/video_manual_frames.py b/docs/snippets/all/archetypes/video_manual_frames.py index 9aadb1fa4fad..fb232223e7f2 100644 --- a/docs/snippets/all/archetypes/video_manual_frames.py +++ b/docs/snippets/all/archetypes/video_manual_frames.py @@ -19,17 +19,11 @@ # Create two entities, showing the same video frozen at different times. rr.log( "frame_1s", - rr.VideoFrameReference( - timestamp=rr.components.VideoTimestamp(seconds=1.0), - video_reference="video_asset", - ), + rr.VideoFrameReference(seconds=1.0, video_reference="video_asset"), ) rr.log( "frame_2s", - rr.VideoFrameReference( - timestamp=rr.components.VideoTimestamp(seconds=2.0), - video_reference="video_asset", - ), + rr.VideoFrameReference(seconds=2.0, video_reference="video_asset"), ) # Send blueprint that shows two 2D views next to each other. diff --git a/rerun_py/rerun_sdk/rerun/archetypes/asset_video.py b/rerun_py/rerun_sdk/rerun/archetypes/asset_video.py index f3ab25045295..f78337f6e421 100644 --- a/rerun_py/rerun_sdk/rerun/archetypes/asset_video.py +++ b/rerun_py/rerun_sdk/rerun/archetypes/asset_video.py @@ -90,17 +90,11 @@ class AssetVideo(AssetVideoExt, Archetype): # Create two entities, showing the same video frozen at different times. rr.log( "frame_1s", - rr.VideoFrameReference( - timestamp=rr.components.VideoTimestamp(seconds=1.0), - video_reference="video_asset", - ), + rr.VideoFrameReference(seconds=1.0, video_reference="video_asset"), ) rr.log( "frame_2s", - rr.VideoFrameReference( - timestamp=rr.components.VideoTimestamp(seconds=2.0), - video_reference="video_asset", - ), + rr.VideoFrameReference(seconds=2.0, video_reference="video_asset"), ) # Send blueprint that shows two 2D views next to each other. diff --git a/rerun_py/rerun_sdk/rerun/archetypes/video_frame_reference.py b/rerun_py/rerun_sdk/rerun/archetypes/video_frame_reference.py index 47b5c46975d0..6700d994b4dd 100644 --- a/rerun_py/rerun_sdk/rerun/archetypes/video_frame_reference.py +++ b/rerun_py/rerun_sdk/rerun/archetypes/video_frame_reference.py @@ -5,21 +5,19 @@ from __future__ import annotations -from typing import Any - from attrs import define, field -from .. import components, datatypes +from .. import components from .._baseclasses import ( Archetype, ) -from ..error_utils import catch_and_log_exceptions +from .video_frame_reference_ext import VideoFrameReferenceExt __all__ = ["VideoFrameReference"] @define(str=False, repr=False, init=False) -class VideoFrameReference(Archetype): +class VideoFrameReference(VideoFrameReferenceExt, Archetype): """ **Archetype**: References a single video frame. @@ -90,17 +88,11 @@ class VideoFrameReference(Archetype): # Create two entities, showing the same video frozen at different times. rr.log( "frame_1s", - rr.VideoFrameReference( - timestamp=rr.components.VideoTimestamp(seconds=1.0), - video_reference="video_asset", - ), + rr.VideoFrameReference(seconds=1.0, video_reference="video_asset"), ) rr.log( "frame_2s", - rr.VideoFrameReference( - timestamp=rr.components.VideoTimestamp(seconds=2.0), - video_reference="video_asset", - ), + rr.VideoFrameReference(seconds=2.0, video_reference="video_asset"), ) # Send blueprint that shows two 2D views next to each other. @@ -118,37 +110,7 @@ class VideoFrameReference(Archetype): """ - def __init__( - self: Any, timestamp: datatypes.VideoTimestampLike, *, video_reference: datatypes.EntityPathLike | None = None - ): - """ - Create a new instance of the VideoFrameReference archetype. - - Parameters - ---------- - timestamp: - References the closest video frame to this timestamp. - - Note that this uses the closest video frame instead of the latest at this timestamp - in order to be more forgiving of rounding errors for inprecise timestamp types. - video_reference: - Optional reference to an entity with a [`archetypes.AssetVideo`][rerun.archetypes.AssetVideo]. - - If none is specified, the video is assumed to be at the same entity. - Note that blueprint overrides on the referenced video will be ignored regardless, - as this is always interpreted as a reference to the data store. - - For a series of video frame references, it is recommended to specify this path only once - at the beginning of the series and then rely on latest-at query semantics to - keep the video reference active. - - """ - - # You can define your own __init__ function as a member of VideoFrameReferenceExt in video_frame_reference_ext.py - with catch_and_log_exceptions(context=self.__class__.__name__): - self.__attrs_init__(timestamp=timestamp, video_reference=video_reference) - return - self.__attrs_clear__() + # __init__ can be found in video_frame_reference_ext.py def __attrs_clear__(self) -> None: """Convenience method for calling `__attrs_init__` with all `None`s.""" diff --git a/rerun_py/rerun_sdk/rerun/archetypes/video_frame_reference_ext.py b/rerun_py/rerun_sdk/rerun/archetypes/video_frame_reference_ext.py new file mode 100644 index 000000000000..95d0f56fd485 --- /dev/null +++ b/rerun_py/rerun_sdk/rerun/archetypes/video_frame_reference_ext.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +from typing import Any + +from .. import components, datatypes +from ..error_utils import _send_warning_or_raise, catch_and_log_exceptions + + +class VideoFrameReferenceExt: + """Extension for [VideoFrameReference][rerun.archetypes.VideoFrameReference].""" + + def __init__( + self: Any, + timestamp: datatypes.VideoTimestampLike | None = None, + *, + seconds: float | None = None, + nanoseconds: int | None = None, + video_reference: datatypes.EntityPathLike | None = None, + ) -> None: + """ + Create a new instance of the VideoFrameReference archetype. + + Parameters + ---------- + timestamp: + References the closest video frame to this timestamp. + + Note that this uses the closest video frame instead of the latest at this timestamp + in order to be more forgiving of rounding errors for inprecise timestamp types. + + Mutally exclusive with `seconds` and `nanoseconds`. + seconds: + Sets the timestamp to the given number of seconds. + + Mutally exclusive with `timestamp` and `nanoseconds`. + nanoseconds: + Sets the timestamp to the given number of nanoseconds. + + Mutally exclusive with `timestamp` and `seconds`. + video_reference: + Optional reference to an entity with a [`archetypes.AssetVideo`][rerun.archetypes.AssetVideo]. + + If none is specified, the video is assumed to be at the same entity. + Note that blueprint overrides on the referenced video will be ignored regardless, + as this is always interpreted as a reference to the data store. + + For a series of video frame references, it is recommended to specify this path only once + at the beginning of the series and then rely on latest-at query semantics to + keep the video reference active. + + """ + + with catch_and_log_exceptions(context=self.__class__.__name__): + if timestamp is None: + if seconds is None and nanoseconds is None: + raise ValueError("Either timestamp or seconds/nanoseconds must be specified.") + timestamp = components.VideoTimestamp(seconds=seconds, nanoseconds=nanoseconds) + elif seconds is not None or nanoseconds is not None: + raise ValueError("Cannot specify both `timestamp` and `seconds`/`nanoseconds`.") + elif isinstance(timestamp, float): + _send_warning_or_raise("Timestamp can't be specified as a float, use `seconds` instead.") + + self.__attrs_init__( + timestamp=timestamp, + video_reference=video_reference, + ) + return + + self.__attrs_clear__() diff --git a/rerun_py/rerun_sdk/rerun/components/video_timestamp_ext.py b/rerun_py/rerun_sdk/rerun/components/video_timestamp_ext.py index f7c11958ef6f..f64ac5338a99 100644 --- a/rerun_py/rerun_sdk/rerun/components/video_timestamp_ext.py +++ b/rerun_py/rerun_sdk/rerun/components/video_timestamp_ext.py @@ -38,6 +38,8 @@ def __init__( if nanoseconds is not None: raise ValueError("Cannot specify both `seconds` and `nanoseconds`.") nanoseconds = int(seconds * 1e9 + 0.5) + elif nanoseconds is None: + raise ValueError("Either `seconds` or `nanoseconds` must be specified.") self.__attrs_init__(timestamp_ns=nanoseconds) return diff --git a/rerun_py/tests/unit/test_video_frame_reference.py b/rerun_py/tests/unit/test_video_frame_reference.py new file mode 100644 index 000000000000..d0b284231aa1 --- /dev/null +++ b/rerun_py/tests/unit/test_video_frame_reference.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +import pytest +import rerun as rr + + +def test_video_frame_reference() -> None: + rr.set_strict_mode(True) + + # Too many args: + with pytest.raises(ValueError): + rr.VideoFrameReference(timestamp=rr.components.VideoTimestamp(seconds=12.3), seconds=12.3, nanoseconds=123) + with pytest.raises(ValueError): + rr.VideoFrameReference(seconds=12.3, nanoseconds=123) + with pytest.raises(ValueError): + rr.VideoFrameReference(timestamp=rr.components.VideoTimestamp(seconds=12.3), nanoseconds=123) + with pytest.raises(ValueError): + rr.VideoFrameReference(seconds=12.3, nanoseconds=123) + + # No args: + with pytest.raises(ValueError): + rr.VideoFrameReference() + + # Correct usages: + assert rr.VideoFrameReference(seconds=12.3).timestamp == rr.components.VideoTimestampBatch( + rr.components.VideoTimestamp(seconds=12.3) + ) + assert rr.VideoFrameReference(nanoseconds=123).timestamp == rr.components.VideoTimestampBatch( + rr.components.VideoTimestamp(nanoseconds=123) + ) + assert rr.VideoFrameReference( + timestamp=rr.components.VideoTimestamp(nanoseconds=123) + ).timestamp == rr.components.VideoTimestampBatch(rr.components.VideoTimestamp(nanoseconds=123))