Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[naga spv-out] bounds-check runtime-sized array access correctly. #6351

Open
wants to merge 8 commits into
base: trunk
Choose a base branch
from
184 changes: 132 additions & 52 deletions naga/src/back/spv/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1758,72 +1758,53 @@ impl<'w> BlockContext<'w> {
};
let result_type_id = self.get_type_id(result_lookup_ty);

// The id of the boolean `and` of all dynamic bounds checks up to this point. If
// `None`, then we haven't done any dynamic bounds checks yet.
// The id of the boolean `and` of all dynamic bounds checks up to this point.
//
// When we have a chain of bounds checks, we combine them with `OpLogicalAnd`, not
// a short-circuit branch. This means we might do comparisons we don't need to,
// but we expect these checks to almost always succeed, and keeping branches to a
// minimum is essential.
// See `extend_bounds_check_condition_chain` for a full explanation.
let mut accumulated_checks = None;

// Is true if we are accessing into a binding array with a non-uniform index.
let mut is_non_uniform_binding_array = false;

self.temp_list.clear();
let root_id = loop {
expr_handle = match self.ir_function.expressions[expr_handle] {
crate::Expression::Access { base, index } => {
if let crate::Expression::GlobalVariable(var_handle) =
self.ir_function.expressions[base]
{
// The access chain needs to be decorated as NonUniform
// see VUID-RuntimeSpirv-NonUniform-06274
let gvar = &self.ir_module.global_variables[var_handle];
if let crate::TypeInner::BindingArray { .. } =
self.ir_module.types[gvar.ty].inner
{
is_non_uniform_binding_array =
self.fun_info[index].uniformity.non_uniform_result.is_some();
}
}

let index_id = match self.write_bounds_check(base, index, block)? {
BoundsCheckResult::KnownInBounds(known_index) => {
// Even if the index is known, `OpAccessIndex`
// requires expression operands, not literals.
let scalar = crate::Literal::U32(known_index);
self.writer.get_constant_scalar(scalar)
}
BoundsCheckResult::Computed(computed_index_id) => computed_index_id,
BoundsCheckResult::Conditional(comparison_id) => {
match accumulated_checks {
Some(prior_checks) => {
let combined = self.gen_id();
block.body.push(Instruction::binary(
spirv::Op::LogicalAnd,
self.writer.get_bool_type_id(),
combined,
prior_checks,
comparison_id,
));
accumulated_checks = Some(combined);
}
None => {
// Start a fresh chain of checks.
accumulated_checks = Some(comparison_id);
}
}
is_non_uniform_binding_array |=
self.is_nonuniform_binding_array_access(base, index);

// Either way, the index to use is unchanged.
self.cached[index]
}
};
let index = crate::proc::index::GuardedIndex::Expression(index);
let index_id =
self.write_access_chain_index(base, index, &mut accumulated_checks, block)?;
self.temp_list.push(index_id);

base
}
crate::Expression::AccessIndex { base, index } => {
let const_id = self.get_index_constant(index);
self.temp_list.push(const_id);
// Decide whether we're indexing a struct (bounds checks
// forbidden) or anything else (bounds checks required).
let mut base_ty = self.fun_info[base].ty.inner_with(&self.ir_module.types);
if let crate::TypeInner::Pointer { base, .. } = *base_ty {
base_ty = &self.ir_module.types[base].inner;
}
let index_id = if let crate::TypeInner::Struct { .. } = *base_ty {
self.get_index_constant(index)
} else {
// `index` is constant, so this can't possibly require
// setting `is_nonuniform_binding_array_access`.

// Even though the index value is statically known, `base`
// may be a runtime-sized array, so we still need to go
// through the bounds check process.
self.write_access_chain_index(
base,
crate::proc::index::GuardedIndex::Known(index),
&mut accumulated_checks,
block,
)?
};

self.temp_list.push(index_id);
base
}
crate::Expression::GlobalVariable(handle) => {
Expand Down Expand Up @@ -1878,6 +1859,105 @@ impl<'w> BlockContext<'w> {
Ok(expr_pointer)
}

fn is_nonuniform_binding_array_access(
&mut self,
base: Handle<crate::Expression>,
index: Handle<crate::Expression>,
) -> bool {
let crate::Expression::GlobalVariable(var_handle) = self.ir_function.expressions[base]
else {
return false;
};

// The access chain needs to be decorated as NonUniform
// see VUID-RuntimeSpirv-NonUniform-06274
let gvar = &self.ir_module.global_variables[var_handle];
let crate::TypeInner::BindingArray { .. } = self.ir_module.types[gvar.ty].inner else {
return false;
};

self.fun_info[index].uniformity.non_uniform_result.is_some()
}

/// Compute a single index operand to an `OpAccessChain` instruction.
///
/// Given that we are indexing `base` with `index`, apply the appropriate
/// bounds check policies, emitting code to `block` to clamp `index` or
/// determine whether it's in bounds. Return the SPIR-V instruction id of
/// the index value we should actually use.
///
/// Extend `accumulated_checks` to include the results of any needed bounds
/// checks. See [`BlockContext::extend_bounds_check_condition_chain`].
fn write_access_chain_index(
&mut self,
base: Handle<crate::Expression>,
index: crate::proc::index::GuardedIndex,
accumulated_checks: &mut Option<Word>,
block: &mut Block,
) -> Result<Word, Error> {
match self.write_bounds_check(base, index, block)? {
BoundsCheckResult::KnownInBounds(known_index) => {
// Even if the index is known, `OpAccessChain`
// requires expression operands, not literals.
let scalar = crate::Literal::U32(known_index);
Ok(self.writer.get_constant_scalar(scalar))
}
BoundsCheckResult::Computed(computed_index_id) => Ok(computed_index_id),
BoundsCheckResult::Conditional {
condition_id: condition,
index_id: index,
} => {
self.extend_bounds_check_condition_chain(accumulated_checks, condition, block);

// Use the index from the `Access` expression unchanged.
Ok(index)
}
}
}

/// Add a condition to a chain of bounds checks.
///
/// As we build an `OpAccessChain` instruction govered by
/// [`BoundsCheckPolicy::ReadZeroSkipWrite`], we accumulate a chain of
/// dynamic bounds checks, one for each index in the chain, which must all
/// be true for that `OpAccessChain`'s execution to be well-defined. This
/// function adds the boolean instruction id `comparison_id` to `chain`.
///
/// If `chain` is `None`, that means there are no bounds checks in the chain
/// yet. If chain is `Some(id)`, then `id` is the conjunction of all the
/// bounds checks in the chain.
///
/// When we have multiple bounds checks, we combine them with
/// `OpLogicalAnd`, not a short-circuit branch. This means we might do
/// comparisons we don't need to, but we expect these checks to almost
/// always succeed, and keeping branches to a minimum is essential.
///
/// [`BoundsCheckPolicy::ReadZeroSkipWrite`]: crate::proc::BoundsCheckPolicy
fn extend_bounds_check_condition_chain(
&mut self,
chain: &mut Option<Word>,
comparison_id: Word,
block: &mut Block,
) {
match *chain {
Some(ref mut prior_checks) => {
let combined = self.gen_id();
block.body.push(Instruction::binary(
spirv::Op::LogicalAnd,
self.writer.get_bool_type_id(),
combined,
*prior_checks,
comparison_id,
));
*prior_checks = combined;
}
None => {
// Start a fresh chain of checks.
*chain = Some(comparison_id);
}
}
}

/// Build the instructions for matrix - matrix column operations
#[allow(clippy::too_many_arguments)]
fn write_matrix_matrix_column_op(
Expand Down
Loading