diff --git a/naga/src/back/spv/block.rs b/naga/src/back/spv/block.rs index f0c3bfa848..355689d852 100644 --- a/naga/src/back/spv/block.rs +++ b/naga/src/back/spv/block.rs @@ -1758,14 +1758,11 @@ impl<'w> BlockContext<'w> { }; let result_type_id = self.get_type_id(result_lookup_ty); - // The id of the boolean `and` of all dynamic bounds checks up to this point. If - // `None`, then we haven't done any dynamic bounds checks yet. + // The id of the boolean `and` of all dynamic bounds checks up to this point. // - // When we have a chain of bounds checks, we combine them with `OpLogicalAnd`, not - // a short-circuit branch. This means we might do comparisons we don't need to, - // but we expect these checks to almost always succeed, and keeping branches to a - // minimum is essential. + // See `extend_bounds_check_condition_chain` for a full explanation. let mut accumulated_checks = None; + // Is true if we are accessing into a binding array with a non-uniform index. let mut is_non_uniform_binding_array = false; @@ -1773,57 +1770,41 @@ impl<'w> BlockContext<'w> { let root_id = loop { expr_handle = match self.ir_function.expressions[expr_handle] { crate::Expression::Access { base, index } => { - if let crate::Expression::GlobalVariable(var_handle) = - self.ir_function.expressions[base] - { - // The access chain needs to be decorated as NonUniform - // see VUID-RuntimeSpirv-NonUniform-06274 - let gvar = &self.ir_module.global_variables[var_handle]; - if let crate::TypeInner::BindingArray { .. } = - self.ir_module.types[gvar.ty].inner - { - is_non_uniform_binding_array = - self.fun_info[index].uniformity.non_uniform_result.is_some(); - } - } - - let index_id = match self.write_bounds_check(base, index, block)? { - BoundsCheckResult::KnownInBounds(known_index) => { - // Even if the index is known, `OpAccessIndex` - // requires expression operands, not literals. - let scalar = crate::Literal::U32(known_index); - self.writer.get_constant_scalar(scalar) - } - BoundsCheckResult::Computed(computed_index_id) => computed_index_id, - BoundsCheckResult::Conditional(comparison_id) => { - match accumulated_checks { - Some(prior_checks) => { - let combined = self.gen_id(); - block.body.push(Instruction::binary( - spirv::Op::LogicalAnd, - self.writer.get_bool_type_id(), - combined, - prior_checks, - comparison_id, - )); - accumulated_checks = Some(combined); - } - None => { - // Start a fresh chain of checks. - accumulated_checks = Some(comparison_id); - } - } + is_non_uniform_binding_array |= + self.is_nonuniform_binding_array_access(base, index); - // Either way, the index to use is unchanged. - self.cached[index] - } - }; + let index = crate::proc::index::GuardedIndex::Expression(index); + let index_id = + self.write_access_chain_index(base, index, &mut accumulated_checks, block)?; self.temp_list.push(index_id); + base } crate::Expression::AccessIndex { base, index } => { - let const_id = self.get_index_constant(index); - self.temp_list.push(const_id); + // Decide whether we're indexing a struct (bounds checks + // forbidden) or anything else (bounds checks required). + let mut base_ty = self.fun_info[base].ty.inner_with(&self.ir_module.types); + if let crate::TypeInner::Pointer { base, .. } = *base_ty { + base_ty = &self.ir_module.types[base].inner; + } + let index_id = if let crate::TypeInner::Struct { .. } = *base_ty { + self.get_index_constant(index) + } else { + // `index` is constant, so this can't possibly require + // setting `is_nonuniform_binding_array_access`. + + // Even though the index value is statically known, `base` + // may be a runtime-sized array, so we still need to go + // through the bounds check process. + self.write_access_chain_index( + base, + crate::proc::index::GuardedIndex::Known(index), + &mut accumulated_checks, + block, + )? + }; + + self.temp_list.push(index_id); base } crate::Expression::GlobalVariable(handle) => { @@ -1878,6 +1859,105 @@ impl<'w> BlockContext<'w> { Ok(expr_pointer) } + fn is_nonuniform_binding_array_access( + &mut self, + base: Handle, + index: Handle, + ) -> bool { + let crate::Expression::GlobalVariable(var_handle) = self.ir_function.expressions[base] + else { + return false; + }; + + // The access chain needs to be decorated as NonUniform + // see VUID-RuntimeSpirv-NonUniform-06274 + let gvar = &self.ir_module.global_variables[var_handle]; + let crate::TypeInner::BindingArray { .. } = self.ir_module.types[gvar.ty].inner else { + return false; + }; + + self.fun_info[index].uniformity.non_uniform_result.is_some() + } + + /// Compute a single index operand to an `OpAccessChain` instruction. + /// + /// Given that we are indexing `base` with `index`, apply the appropriate + /// bounds check policies, emitting code to `block` to clamp `index` or + /// determine whether it's in bounds. Return the SPIR-V instruction id of + /// the index value we should actually use. + /// + /// Extend `accumulated_checks` to include the results of any needed bounds + /// checks. See [`BlockContext::extend_bounds_check_condition_chain`]. + fn write_access_chain_index( + &mut self, + base: Handle, + index: crate::proc::index::GuardedIndex, + accumulated_checks: &mut Option, + block: &mut Block, + ) -> Result { + match self.write_bounds_check(base, index, block)? { + BoundsCheckResult::KnownInBounds(known_index) => { + // Even if the index is known, `OpAccessChain` + // requires expression operands, not literals. + let scalar = crate::Literal::U32(known_index); + Ok(self.writer.get_constant_scalar(scalar)) + } + BoundsCheckResult::Computed(computed_index_id) => Ok(computed_index_id), + BoundsCheckResult::Conditional { + condition_id: condition, + index_id: index, + } => { + self.extend_bounds_check_condition_chain(accumulated_checks, condition, block); + + // Use the index from the `Access` expression unchanged. + Ok(index) + } + } + } + + /// Add a condition to a chain of bounds checks. + /// + /// As we build an `OpAccessChain` instruction govered by + /// [`BoundsCheckPolicy::ReadZeroSkipWrite`], we accumulate a chain of + /// dynamic bounds checks, one for each index in the chain, which must all + /// be true for that `OpAccessChain`'s execution to be well-defined. This + /// function adds the boolean instruction id `comparison_id` to `chain`. + /// + /// If `chain` is `None`, that means there are no bounds checks in the chain + /// yet. If chain is `Some(id)`, then `id` is the conjunction of all the + /// bounds checks in the chain. + /// + /// When we have multiple bounds checks, we combine them with + /// `OpLogicalAnd`, not a short-circuit branch. This means we might do + /// comparisons we don't need to, but we expect these checks to almost + /// always succeed, and keeping branches to a minimum is essential. + /// + /// [`BoundsCheckPolicy::ReadZeroSkipWrite`]: crate::proc::BoundsCheckPolicy + fn extend_bounds_check_condition_chain( + &mut self, + chain: &mut Option, + comparison_id: Word, + block: &mut Block, + ) { + match *chain { + Some(ref mut prior_checks) => { + let combined = self.gen_id(); + block.body.push(Instruction::binary( + spirv::Op::LogicalAnd, + self.writer.get_bool_type_id(), + combined, + *prior_checks, + comparison_id, + )); + *prior_checks = combined; + } + None => { + // Start a fresh chain of checks. + *chain = Some(comparison_id); + } + } + } + /// Build the instructions for matrix - matrix column operations #[allow(clippy::too_many_arguments)] fn write_matrix_matrix_column_op( diff --git a/naga/src/back/spv/index.rs b/naga/src/back/spv/index.rs index 0295d895b2..9f32b244a1 100644 --- a/naga/src/back/spv/index.rs +++ b/naga/src/back/spv/index.rs @@ -7,11 +7,15 @@ use super::{ selection::Selection, Block, BlockContext, Error, IdGenerator, Instruction, Word, }; -use crate::{arena::Handle, proc::BoundsCheckPolicy}; +use crate::{ + arena::Handle, + proc::{index::GuardedIndex, BoundsCheckPolicy}, +}; /// The results of performing a bounds check. /// /// On success, `write_bounds_check` returns a value of this type. +#[derive(Debug)] pub(super) enum BoundsCheckResult { /// The index is statically known and in bounds, with the given value. KnownInBounds(u32), @@ -19,12 +23,18 @@ pub(super) enum BoundsCheckResult { /// The given instruction computes the index to be used. Computed(Word), - /// The given instruction computes a boolean condition which is true - /// if the index is in bounds. - Conditional(Word), + /// Code was emitted to perform a bounds check at runtime. + Conditional { + /// The access should only be permitted if this value is true. + condition_id: Word, + + /// The access should use this index value. + index_id: Word, + }, } /// A value that we either know at translation time, or need to compute at runtime. +#[derive(Copy, Clone)] pub(super) enum MaybeKnown { /// The value is known at shader translation time. Known(T), @@ -313,33 +323,26 @@ impl<'w> BlockContext<'w> { pub(super) fn write_restricted_index( &mut self, sequence: Handle, - index: Handle, + index: GuardedIndex, block: &mut Block, ) -> Result { - let index_id = self.cached[index]; - - // Get the sequence's maximum valid index. Return early if we've already - // done the bounds check. - let max_index_id = match self.write_sequence_max_index(sequence, block)? { - MaybeKnown::Known(known_max_index) => { - if let Ok(known_index) = self - .ir_module - .to_ctx() - .eval_expr_to_u32_from(index, &self.ir_function.expressions) - { - // Both the index and length are known at compile time. - // - // In strict WGSL compliance mode, out-of-bounds indices cannot be - // reported at shader translation time, and must be replaced with - // in-bounds indices at run time. So we cannot assume that - // validation ensured the index was in bounds. Restrict now. - let restricted = std::cmp::min(known_index, known_max_index); - return Ok(BoundsCheckResult::KnownInBounds(restricted)); - } + let max_index = self.write_sequence_max_index(sequence, block)?; - self.get_index_constant(known_max_index) - } - MaybeKnown::Computed(max_index_id) => max_index_id, + // If both are known, we can compute the index to be used + // right now. + if let (GuardedIndex::Known(index), MaybeKnown::Known(max_index)) = (index, max_index) { + let restricted = std::cmp::min(index, max_index); + return Ok(BoundsCheckResult::KnownInBounds(restricted)); + } + + let index_id = match index { + GuardedIndex::Known(value) => self.get_index_constant(value), + GuardedIndex::Expression(expr) => self.cached[expr], + }; + + let max_index_id = match max_index { + MaybeKnown::Known(value) => self.get_index_constant(value), + MaybeKnown::Computed(id) => id, }; // One or the other of the index or length is dynamic, so emit code for @@ -357,6 +360,8 @@ impl<'w> BlockContext<'w> { /// Write an index bounds comparison to `block`, if needed. /// + /// This is used to implement [`BoundsCheckPolicy::ReadZeroSkipWrite`]. + /// /// If we're able to determine statically that `index` is in bounds for /// `sequence`, return `KnownInBounds(value)`, where `value` is the actual /// value of the index. (In principle, one could know that the index is in @@ -375,48 +380,33 @@ impl<'w> BlockContext<'w> { fn write_index_comparison( &mut self, sequence: Handle, - index: Handle, + index: GuardedIndex, block: &mut Block, ) -> Result { - let index_id = self.cached[index]; + let length = self.write_sequence_length(sequence, block)?; - // Get the sequence's length. Return early if we've already done the - // bounds check. - let length_id = match self.write_sequence_length(sequence, block)? { - MaybeKnown::Known(known_length) => { - if let Ok(known_index) = self - .ir_module - .to_ctx() - .eval_expr_to_u32_from(index, &self.ir_function.expressions) - { - // Both the index and length are known at compile time. - // - // It would be nice to assume that, since we are using the - // `ReadZeroSkipWrite` policy, we are not in strict WGSL - // compliance mode, and thus we can count on the validator to have - // rejected any programs with known out-of-bounds indices, and - // thus just return `KnownInBounds` here without actually - // checking. - // - // But it's also reasonable to expect that bounds check policies - // and error reporting policies should be able to vary - // independently without introducing security holes. So, we should - // support the case where bad indices do not cause validation - // errors, and are handled via `ReadZeroSkipWrite`. - // - // In theory, when `known_index` is bad, we could return a new - // `KnownOutOfBounds` variant here. But it's simpler just to fall - // through and let the bounds check take place. The shader is - // broken anyway, so it doesn't make sense to invest in emitting - // the ideal code for it. - if known_index < known_length { - return Ok(BoundsCheckResult::KnownInBounds(known_index)); - } - } - - self.get_index_constant(known_length) + // If both are known, we can decide whether the index is in + // bounds right now. + if let (GuardedIndex::Known(index), MaybeKnown::Known(length)) = (index, length) { + if index < length { + return Ok(BoundsCheckResult::KnownInBounds(index)); } - MaybeKnown::Computed(length_id) => length_id, + + // In theory, when `index` is bad, we could return a new + // `KnownOutOfBounds` variant here. But it's simpler just to fall + // through and let the bounds check take place. The shader is broken + // anyway, so it doesn't make sense to invest in emitting the ideal + // code for it. + } + + let index_id = match index { + GuardedIndex::Known(value) => self.get_index_constant(value), + GuardedIndex::Expression(expr) => self.cached[expr], + }; + + let length_id = match length { + MaybeKnown::Known(value) => self.get_index_constant(value), + MaybeKnown::Computed(id) => id, }; // Compare the index against the length. @@ -430,7 +420,10 @@ impl<'w> BlockContext<'w> { )); // Indicate that we did generate the check. - Ok(BoundsCheckResult::Conditional(condition_id)) + Ok(BoundsCheckResult::Conditional { + condition_id, + index_id, + }) } /// Emit a conditional load for `BoundsCheckPolicy::ReadZeroSkipWrite`. @@ -486,9 +479,12 @@ impl<'w> BlockContext<'w> { pub(super) fn write_bounds_check( &mut self, base: Handle, - index: Handle, + mut index: GuardedIndex, block: &mut Block, ) -> Result { + // If the value of `index` is known at compile time, find it now. + index.try_resolve_to_constant(self.ir_function, self.ir_module); + let policy = self.writer.bounds_check_policies.choose_policy( base, &self.ir_module.types, @@ -500,7 +496,10 @@ impl<'w> BlockContext<'w> { BoundsCheckPolicy::ReadZeroSkipWrite => { self.write_index_comparison(base, index, block)? } - BoundsCheckPolicy::Unchecked => BoundsCheckResult::Computed(self.cached[index]), + BoundsCheckPolicy::Unchecked => match index { + GuardedIndex::Known(value) => BoundsCheckResult::KnownInBounds(value), + GuardedIndex::Expression(expr) => BoundsCheckResult::Computed(self.cached[expr]), + }, }) } @@ -517,7 +516,7 @@ impl<'w> BlockContext<'w> { let result_type_id = self.get_expression_type_id(&self.fun_info[expr_handle].ty); let base_id = self.cached[base]; - let index_id = self.cached[index]; + let index = GuardedIndex::Expression(index); let result_id = match self.write_bounds_check(base, index, block)? { BoundsCheckResult::KnownInBounds(known_index) => { @@ -540,12 +539,15 @@ impl<'w> BlockContext<'w> { )); result_id } - BoundsCheckResult::Conditional(comparison_id) => { + BoundsCheckResult::Conditional { + condition_id, + index_id: index, + } => { // Run-time bounds checks were required. Emit // conditional load. self.write_conditional_indexed_load( result_type_id, - comparison_id, + condition_id, block, |id_gen, block| { // The in-bounds path. Generate the access. @@ -554,7 +556,7 @@ impl<'w> BlockContext<'w> { result_type_id, element_id, base_id, - index_id, + index, )); element_id }, diff --git a/naga/src/proc/index.rs b/naga/src/proc/index.rs index 555b08d2c3..1066eb2388 100644 --- a/naga/src/proc/index.rs +++ b/naga/src/proc/index.rs @@ -334,7 +334,11 @@ impl GuardedIndex { /// Make a `GuardedIndex::Known` from a `GuardedIndex::Expression` if possible. /// /// Return values that are already `Known` unchanged. - fn try_resolve_to_constant(&mut self, function: &crate::Function, module: &crate::Module) { + pub(crate) fn try_resolve_to_constant( + &mut self, + function: &crate::Function, + module: &crate::Module, + ) { if let GuardedIndex::Expression(expr) = *self { if let Ok(value) = module .to_ctx() diff --git a/naga/tests/in/bounds-check-restrict.wgsl b/naga/tests/in/bounds-check-restrict.wgsl index 2b7208355c..6690d2feab 100644 --- a/naga/tests/in/bounds-check-restrict.wgsl +++ b/naga/tests/in/bounds-check-restrict.wgsl @@ -70,3 +70,11 @@ fn set_in_bounds(v: f32) { globals.v[3] = v; globals.m[2][3] = v; } + +fn index_dynamic_array_constant_index() -> f32 { + return globals.d[1000]; +} + +fn set_dynamic_array_constant_index(v: f32) { + globals.d[1000] = v; +} diff --git a/naga/tests/in/bounds-check-zero-atomic.wgsl b/naga/tests/in/bounds-check-zero-atomic.wgsl index 004f08a0a5..ed927f518b 100644 --- a/naga/tests/in/bounds-check-zero-atomic.wgsl +++ b/naga/tests/in/bounds-check-zero-atomic.wgsl @@ -36,3 +36,11 @@ fn exchange_atomic_dynamic_sized_array(i: i32) -> u32 { return atomicExchange(&globals.c[i], 1u); } +fn fetch_add_atomic_dynamic_sized_array_static_index() -> u32 { + return atomicAdd(&globals.c[1000], 1u); +} + +fn exchange_atomic_dynamic_sized_array_static_index() -> u32 { + return atomicExchange(&globals.c[1000], 1u); +} + diff --git a/naga/tests/in/bounds-check-zero.wgsl b/naga/tests/in/bounds-check-zero.wgsl index 010f46ec3b..514efc4893 100644 --- a/naga/tests/in/bounds-check-zero.wgsl +++ b/naga/tests/in/bounds-check-zero.wgsl @@ -70,3 +70,11 @@ fn set_in_bounds(v: f32) { globals.v[3] = v; globals.m[2][3] = v; } + +fn index_dynamic_array_constant_index() -> f32 { + return globals.d[1000]; +} + +fn set_dynamic_array_constant_index(v: f32) { + globals.d[1000] = v; +} diff --git a/naga/tests/out/msl/bounds-check-restrict.msl b/naga/tests/out/msl/bounds-check-restrict.msl index 0d41436534..a8bf371555 100644 --- a/naga/tests/out/msl/bounds-check-restrict.msl +++ b/naga/tests/out/msl/bounds-check-restrict.msl @@ -163,3 +163,20 @@ void set_in_bounds( globals.m[2].w = v_7; return; } + +float index_dynamic_array_constant_index( + device Globals const& globals, + constant _mslBufferSizes& _buffer_sizes +) { + float _e3 = globals.d[metal::min(unsigned(1000), (_buffer_sizes.size0 - 112 - 4) / 4)]; + return _e3; +} + +void set_dynamic_array_constant_index( + float v_8, + device Globals& globals, + constant _mslBufferSizes& _buffer_sizes +) { + globals.d[metal::min(unsigned(1000), (_buffer_sizes.size0 - 112 - 4) / 4)] = v_8; + return; +} diff --git a/naga/tests/out/msl/bounds-check-zero-atomic.msl b/naga/tests/out/msl/bounds-check-zero-atomic.msl index 4a2f0b07dc..232ae34c61 100644 --- a/naga/tests/out/msl/bounds-check-zero-atomic.msl +++ b/naga/tests/out/msl/bounds-check-zero-atomic.msl @@ -75,3 +75,19 @@ uint exchange_atomic_dynamic_sized_array( uint _e5 = uint(i_3) < 1 + (_buffer_sizes.size0 - 44 - 4) / 4 ? metal::atomic_exchange_explicit(&globals.c[i_3], 1u, metal::memory_order_relaxed) : DefaultConstructible(); return _e5; } + +uint fetch_add_atomic_dynamic_sized_array_static_index( + device Globals& globals, + constant _mslBufferSizes& _buffer_sizes +) { + uint _e4 = uint(1000) < 1 + (_buffer_sizes.size0 - 44 - 4) / 4 ? metal::atomic_fetch_add_explicit(&globals.c[1000], 1u, metal::memory_order_relaxed) : DefaultConstructible(); + return _e4; +} + +uint exchange_atomic_dynamic_sized_array_static_index( + device Globals& globals, + constant _mslBufferSizes& _buffer_sizes +) { + uint _e4 = uint(1000) < 1 + (_buffer_sizes.size0 - 44 - 4) / 4 ? metal::atomic_exchange_explicit(&globals.c[1000], 1u, metal::memory_order_relaxed) : DefaultConstructible(); + return _e4; +} diff --git a/naga/tests/out/msl/bounds-check-zero.msl b/naga/tests/out/msl/bounds-check-zero.msl index 7bbdd50d1b..8269d4bf70 100644 --- a/naga/tests/out/msl/bounds-check-zero.msl +++ b/naga/tests/out/msl/bounds-check-zero.msl @@ -183,3 +183,22 @@ void set_in_bounds( globals.m[2].w = v_7; return; } + +float index_dynamic_array_constant_index( + device Globals const& globals, + constant _mslBufferSizes& _buffer_sizes +) { + float _e3 = uint(1000) < 1 + (_buffer_sizes.size0 - 112 - 4) / 4 ? globals.d[1000] : DefaultConstructible(); + return _e3; +} + +void set_dynamic_array_constant_index( + float v_8, + device Globals& globals, + constant _mslBufferSizes& _buffer_sizes +) { + if (uint(1000) < 1 + (_buffer_sizes.size0 - 112 - 4) / 4) { + globals.d[1000] = v_8; + } + return; +} diff --git a/naga/tests/out/spv/bounds-check-restrict.spvasm b/naga/tests/out/spv/bounds-check-restrict.spvasm index c7cf675a17..c4ba6dfee8 100644 --- a/naga/tests/out/spv/bounds-check-restrict.spvasm +++ b/naga/tests/out/spv/bounds-check-restrict.spvasm @@ -1,7 +1,7 @@ ; SPIR-V ; Version: 1.1 ; Generator: rspirv -; Bound: 163 +; Bound: 180 OpCapability Shader OpCapability Linkage OpExtension "SPV_KHR_storage_buffer_storage_class" @@ -52,6 +52,7 @@ OpDecorate %12 Binding 0 %129 = OpTypeFunction %2 %11 %7 %138 = OpTypeFunction %2 %11 %11 %3 %158 = OpTypeFunction %2 %3 +%168 = OpConstant %6 1000 %16 = OpFunction %3 None %17 %15 = OpFunctionParameter %11 %14 = OpLabel @@ -232,4 +233,27 @@ OpStore %161 %156 %162 = OpAccessChain %43 %12 %62 %62 %35 OpStore %162 %156 OpReturn +OpFunctionEnd +%164 = OpFunction %3 None %91 +%163 = OpLabel +OpBranch %165 +%165 = OpLabel +%166 = OpArrayLength %6 %12 3 +%167 = OpISub %6 %166 %32 +%169 = OpExtInst %6 %1 UMin %168 %167 +%170 = OpAccessChain %20 %12 %35 %169 +%171 = OpLoad %3 %170 +OpReturnValue %171 +OpFunctionEnd +%174 = OpFunction %2 None %158 +%173 = OpFunctionParameter %3 +%172 = OpLabel +OpBranch %175 +%175 = OpLabel +%176 = OpArrayLength %6 %12 3 +%177 = OpISub %6 %176 %32 +%178 = OpExtInst %6 %1 UMin %168 %177 +%179 = OpAccessChain %20 %12 %35 %178 +OpStore %179 %173 +OpReturn OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/bounds-check-zero.spvasm b/naga/tests/out/spv/bounds-check-zero.spvasm index 2bb81261e1..f1f1bedf3f 100644 --- a/naga/tests/out/spv/bounds-check-zero.spvasm +++ b/naga/tests/out/spv/bounds-check-zero.spvasm @@ -1,7 +1,7 @@ ; SPIR-V ; Version: 1.1 ; Generator: rspirv -; Bound: 200 +; Bound: 220 OpCapability Shader OpCapability Linkage OpExtension "SPV_KHR_storage_buffer_storage_class" @@ -56,6 +56,7 @@ OpDecorate %12 Binding 0 %159 = OpTypeFunction %2 %11 %7 %170 = OpTypeFunction %2 %11 %11 %3 %195 = OpTypeFunction %2 %3 +%204 = OpConstant %6 1000 %16 = OpFunction %3 None %17 %15 = OpFunctionParameter %11 %14 = OpLabel @@ -308,4 +309,36 @@ OpStore %198 %193 %199 = OpAccessChain %48 %12 %76 %76 %37 OpStore %199 %193 OpReturn +OpFunctionEnd +%201 = OpFunction %3 None %115 +%200 = OpLabel +OpBranch %202 +%202 = OpLabel +%203 = OpArrayLength %6 %12 3 +%205 = OpULessThan %22 %204 %203 +OpSelectionMerge %207 None +OpBranchConditional %205 %208 %207 +%208 = OpLabel +%206 = OpAccessChain %20 %12 %37 %204 +%209 = OpLoad %3 %206 +OpBranch %207 +%207 = OpLabel +%210 = OpPhi %3 %25 %202 %209 %208 +OpReturnValue %210 +OpFunctionEnd +%213 = OpFunction %2 None %195 +%212 = OpFunctionParameter %3 +%211 = OpLabel +OpBranch %214 +%214 = OpLabel +%215 = OpArrayLength %6 %12 3 +%216 = OpULessThan %22 %204 %215 +OpSelectionMerge %218 None +OpBranchConditional %216 %219 %218 +%219 = OpLabel +%217 = OpAccessChain %20 %12 %37 %204 +OpStore %217 %212 +OpBranch %218 +%218 = OpLabel +OpReturn OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/operators.spvasm b/naga/tests/out/spv/operators.spvasm index a59c2e5558..766f45528b 100644 --- a/naga/tests/out/spv/operators.spvasm +++ b/naga/tests/out/spv/operators.spvasm @@ -387,15 +387,15 @@ OpStore %302 %331 %332 = OpLoad %5 %302 %333 = OpISub %5 %332 %23 OpStore %302 %333 -%335 = OpAccessChain %334 %305 %23 +%335 = OpAccessChain %334 %305 %122 %336 = OpLoad %5 %335 %337 = OpIAdd %5 %336 %23 -%338 = OpAccessChain %334 %305 %23 +%338 = OpAccessChain %334 %305 %122 OpStore %338 %337 -%339 = OpAccessChain %334 %305 %23 +%339 = OpAccessChain %334 %305 %122 %340 = OpLoad %5 %339 %341 = OpISub %5 %340 %23 -%342 = OpAccessChain %334 %305 %23 +%342 = OpAccessChain %334 %305 %122 OpStore %342 %341 OpReturn OpFunctionEnd