Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

renaming LirMatMulUnary #1514

Merged
merged 5 commits into from
Sep 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions core/src/ops/cnn/conv/conv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,17 @@ use crate::ops::cnn::PaddingSpec::*;
use crate::ops::einsum::EinSum;
use crate::ops::math::{add, div, mul, sub};
use crate::ops::math::{Add, Div, Mul, Sub};
use crate::ops::matmul::lir_unary::AddMatMulGeometry;
use crate::ops::matmul::lir_unary::MapOutputAxisToInput;
use crate::ops::matmul::mir_quant::wire_ensure_q8_flavour;
use crate::ops::matmul::optimized::AddMatMulGeometry;
use crate::ops::matmul::optimized::MapOutputAxisToInput;
use crate::ops::matmul::quant::wire_ensure_q8_flavour;
use crate::ops::matmul::pack::MatMatMulPack;
use crate::ops::nn::Reduce;

use super::depth_wise::DepthWise;
use super::im2col::Im2Col;
use crate::ops::cnn::conv::KernelFormat;
use crate::ops::cnn::pools::{ConcretePoolGeometry, PoolGeometry, PoolSpec};
use crate::ops::matmul::lir_unary::{LirMatMulUnary, ProtoFusedSpec};
use crate::ops::matmul::optimized::{OptMatMul, ProtoFusedSpec};
use crate::ops::nn::{BaseDataShape, DataFormat, DataShape};

use tract_linalg::frame::PackedFormat;
Expand Down Expand Up @@ -117,7 +117,7 @@ impl Conv {
wires: &[OutletId],
) -> TractResult<TVec<OutletId>> {
ensure!(self.q_params.is_some());
use crate::ops::matmul::mir_quant as qmm;
use crate::ops::matmul::quant as qmm;

let c_dt = self.q_params.unwrap();
let &[mut x, mut kernel, bias, mut x0, x_scale, mut k0, mut k_scale, y0, y_scale] = wires
Expand Down Expand Up @@ -287,7 +287,7 @@ impl Conv {
c_axis,
h_axis,
)
.context("in wire_lir_matmatmul")?;
.context("in wire_opt_matmul")?;

let wire = self.wire_remove_group(model, name, &wire, &mmm_output_shape, c_axis)?;
let wire = self.wire_rm_n_if_needed(model, name, &wire)?;
Expand Down Expand Up @@ -511,7 +511,7 @@ impl Conv {
ops.push(ProtoFusedSpec::Store(unsafe { mmm.c_view(c_m_axis, c_n_axis) }));
model.wire_node(
format!("{name}.matmatmul"),
LirMatMulUnary::new(mmm, c_datum_type.fact(mmm_output_shape), c_m_axis, c_n_axis, ops)?,
OptMatMul::new(mmm, c_datum_type.fact(mmm_output_shape), c_m_axis, c_n_axis, ops)?,
&wires,
)
}
Expand Down
18 changes: 9 additions & 9 deletions core/src/ops/cnn/maxpool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ impl EvalOp for MaxPool {

fn eval(&self, inputs: TVec<TValue>) -> TractResult<TVec<TValue>> {
let shape: TVec<TDim> = inputs[0].shape().iter().map(|d| d.to_dim()).collect();
self.to_lir(&shape)?.eval(inputs)
self.to_optimized(&shape)?.eval(inputs)
}
}

Expand Down Expand Up @@ -74,8 +74,8 @@ impl TypedOp for MaxPool {
}

impl MaxPool {
fn to_lir(&self, input_shape: &[TDim]) -> TractResult<LirMaxPool> {
Ok(LirMaxPool {
fn to_optimized(&self, input_shape: &[TDim]) -> TractResult<OptMaxPool> {
Ok(OptMaxPool {
pool_spec: self.pool_spec.clone(),
with_index_outputs: self.with_index_outputs,
geometry: self.pool_spec.compute_geo(input_shape)?,
Expand All @@ -84,15 +84,15 @@ impl MaxPool {
}

#[derive(Debug, Clone, new, Hash)]
pub struct LirMaxPool {
pub struct OptMaxPool {
pub pool_spec: PoolSpec,
pub with_index_outputs: Option<DatumType>,
pub geometry: PoolGeometry,
}

impl Op for LirMaxPool {
impl Op for OptMaxPool {
fn name(&self) -> Cow<str> {
"LirMaxPool".into()
"OptMaxPool".into()
}

fn info(&self) -> TractResult<Vec<String>> {
Expand All @@ -102,7 +102,7 @@ impl Op for LirMaxPool {
op_as_typed_op!();
}

impl EvalOp for LirMaxPool {
impl EvalOp for OptMaxPool {
fn is_stateless(&self) -> bool {
true
}
Expand All @@ -114,7 +114,7 @@ impl EvalOp for LirMaxPool {
}
}

impl TypedOp for LirMaxPool {
impl TypedOp for OptMaxPool {
fn output_facts(&self, inputs: &[&TypedFact]) -> TractResult<TVec<TypedFact>> {
let mut facts = self.pool_spec.output_facts(inputs)?;
if let Some(idt) = self.with_index_outputs {
Expand All @@ -127,7 +127,7 @@ impl TypedOp for LirMaxPool {
as_op!();
}

impl LirMaxPool {
impl OptMaxPool {
fn eval_t<T: Datum + Copy + num_traits::Bounded + PartialOrd>(
&self,
input: &Tensor,
Expand Down
18 changes: 9 additions & 9 deletions core/src/ops/cnn/sumpool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ impl EvalOp for SumPool {

fn eval(&self, inputs: TVec<TValue>) -> TractResult<TVec<TValue>> {
let shape: TVec<TDim> = inputs[0].shape().iter().map(|d| d.to_dim()).collect();
self.to_lir(&shape)?.eval(inputs)
self.to_optimized(&shape)?.eval(inputs)
}
}

Expand Down Expand Up @@ -64,8 +64,8 @@ impl TypedOp for SumPool {
}

impl SumPool {
fn to_lir(&self, input_shape: &[TDim]) -> TractResult<LirSumPool> {
Ok(LirSumPool {
fn to_optimized(&self, input_shape: &[TDim]) -> TractResult<OptSumPool> {
Ok(OptSumPool {
pool_spec: self.pool_spec.clone(),
count_include_pad: self.count_include_pad,
normalize: self.normalize,
Expand All @@ -75,16 +75,16 @@ impl SumPool {
}

#[derive(Debug, Clone, new, Hash)]
pub struct LirSumPool {
pub struct OptSumPool {
pub pool_spec: PoolSpec,
pub count_include_pad: bool,
pub normalize: bool,
pub geometry: PoolGeometry,
}

impl Op for LirSumPool {
impl Op for OptSumPool {
fn name(&self) -> Cow<str> {
"LirSumPool".into()
"OptSumPool".into()
}

fn info(&self) -> TractResult<Vec<String>> {
Expand All @@ -98,7 +98,7 @@ impl Op for LirSumPool {
op_as_typed_op!();
}

impl EvalOp for LirSumPool {
impl EvalOp for OptSumPool {
fn is_stateless(&self) -> bool {
true
}
Expand Down Expand Up @@ -128,7 +128,7 @@ impl EvalOp for LirSumPool {
}
}

impl TypedOp for LirSumPool {
impl TypedOp for OptSumPool {
fn output_facts(&self, inputs: &[&TypedFact]) -> TractResult<TVec<TypedFact>> {
self.pool_spec.output_facts(inputs)
}
Expand All @@ -153,7 +153,7 @@ impl TypedOp for LirSumPool {
as_op!();
}

impl LirSumPool {
impl OptSumPool {
fn eval_t<T: Copy + Datum + Sum + num_traits::Float>(
&self,
input: &Tensor,
Expand Down
20 changes: 10 additions & 10 deletions core/src/ops/einsum/codegen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ use super::*;
use crate::ops::cast::cast;
use crate::ops::math::add;
use crate::ops::matmul::de_block_quant::BlockQuantValue;
use crate::ops::matmul::lir_unary::{
AddMatMulGeometry, LirMatMulUnary, MapOutputAxisToInput, ProtoFusedSpec,
use crate::ops::matmul::optimized::{
AddMatMulGeometry, OptMatMul, MapOutputAxisToInput, ProtoFusedSpec,
};
use crate::ops::matmul::mir_quant::{
use crate::ops::matmul::quant::{
combine_scales, compensate_zero_points, requant, wire_ensure_q8_flavour,
};
use crate::ops::matmul::pack::MatMatMulPack;
Expand Down Expand Up @@ -37,8 +37,8 @@ pub(crate) fn codegen(
AxesOrPatch::NotAMatMul(_) => return Ok(None),
};
if op.q_params.is_none() {
lir_mat_mul_unary(op, model, node, (m_axis, k_axis, n_axis))
.context("Translating to LirMatMul")
optimized_mat_mul(op, model, node, (m_axis, k_axis, n_axis))
.context("Translating to OptMatMul")
} else {
dequant(op, model, node, (m_axis, k_axis, n_axis)).context("Dequantize")
}
Expand Down Expand Up @@ -379,7 +379,7 @@ fn wire_packing(
}
}

fn lir_mat_mul_unary(
fn optimized_mat_mul(
op: &EinSum,
model: &TypedModel,
node: &TypedNode,
Expand Down Expand Up @@ -439,7 +439,7 @@ fn lir_mat_mul_unary(
(mmm, packing)
};

let mut patch = TypedModelPatch::new("Einsum to LirMatMulUnary");
let mut patch = TypedModelPatch::new("Einsum to OptMatMul");
let packers = mmm.packings()[packing];

let pa = wire_packing(model, node, 0, &mut patch, packers.0, a_k, a_m)
Expand Down Expand Up @@ -473,15 +473,15 @@ fn lir_mat_mul_unary(
c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping),
};
let output = unsafe { mmm.c_view(c_m, c_n) };
let lir = LirMatMulUnary::new(
let opt = OptMatMul::new(
mmm,
c_fact,
c_m,
c_n,
vec![ProtoFusedSpec::AddMatMul { geo, a: 0, b: 1, packing }, ProtoFusedSpec::Store(output)],
)
.context("Creating LirMatMulUnary")?;
let output = patch.wire_node(name, lir, &[pa, pb])?[0];
.context("Creating OptMatMul")?;
let output = patch.wire_node(name, opt, &[pa, pb])?[0];
patch.shunt_outside(model, node.id.into(), output)?;
Ok(Some(patch))
}
4 changes: 2 additions & 2 deletions core/src/ops/matmul.rs → core/src/ops/matmul/mod.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
pub mod de_block_quant;
pub mod lir_unary;
pub mod mir_quant;
pub mod optimized;
pub mod pack;
pub mod quant;

use crate::internal::*;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ impl ResolveTo<ConcreteMatrixGeometry> for SymbolicMatrixGeometry {
pub type MatrixGeometry = GeometryBound<SymbolicMatrixGeometry, ConcreteMatrixGeometry>;

#[derive(Clone, Debug)]
pub struct LirMatMulUnary {
pub struct OptMatMul {
pub c_fact: TypedFact,
pub micro_ops: Vec<ProtoFusedSpec>,
pub geometry: MatrixGeometry,
Expand All @@ -260,9 +260,9 @@ pub struct LirMatMulUnary {
pub trivial_path: bool,
}

impl Op for LirMatMulUnary {
impl Op for OptMatMul {
fn name(&self) -> Cow<str> {
"LirMatMulUnary".into()
"OptMatMul".into()
}

fn info(&self) -> TractResult<Vec<String>> {
Expand All @@ -283,7 +283,7 @@ impl Op for LirMatMulUnary {
op_as_typed_op!();
}

impl EvalOp for LirMatMulUnary {
impl EvalOp for OptMatMul {
fn is_stateless(&self) -> bool {
true
}
Expand Down Expand Up @@ -338,7 +338,7 @@ impl EvalOp for LirMatMulUnary {
}
}

impl TypedOp for LirMatMulUnary {
impl TypedOp for OptMatMul {
fn output_facts(&self, inputs: &[&TypedFact]) -> TractResult<TVec<TypedFact>> {
ensure!(self.c_m_axis < self.c_fact.rank());
ensure!(self.c_n_axis < self.c_fact.rank());
Expand Down Expand Up @@ -512,7 +512,7 @@ impl TypedOp for LirMatMulUnary {
as_op!();
}

impl LirMatMulUnary {
impl OptMatMul {
pub fn new(
mmm: Box<dyn MatMatMul>,
c_fact: TypedFact,
Expand All @@ -528,7 +528,7 @@ impl LirMatMulUnary {
n: c_fact.shape[c_n_axis].clone(),
});
let geometry = geometry.clone().optimize_if(Some(&Default::default())).unwrap_or(geometry);
let mut it = LirMatMulUnary {
let mut it = OptMatMul {
mmm,
c_fact,
geometry,
Expand Down
File renamed without changes.
6 changes: 3 additions & 3 deletions core/src/ops/scan/mir.rs → core/src/ops/scan/decluttered.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use crate::ops::einsum::EinSum;
use crate::ops::konst::Const;
use crate::optim::OptimizerSession;

use super::lir::{LirScan, LirScanOpParams};
use super::optimized::{OptScan, ScanOpParams};
use tract_data::internal::*;

use super::*;
Expand All @@ -18,14 +18,14 @@ pub struct Scan {
}

impl Scan {
pub fn to_codegen_op(&self, optimize_inner: bool) -> TractResult<LirScan> {
pub fn to_codegen_op(&self, optimize_inner: bool) -> TractResult<OptScan> {
let mut model = self.body.clone();
if optimize_inner {
model = model.into_optimized()?;
}
let plan = SimplePlan::new(model)?;

Ok(LirScan::new(Arc::new(LirScanOpParams::new(
Ok(OptScan::new(Arc::new(ScanOpParams::new(
self.skip,
self.reset_every_turn,
Arc::new(plan),
Expand Down
8 changes: 4 additions & 4 deletions core/src/ops/scan/mod.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
use crate::internal::*;
use std::fmt;

mod lir;
mod mir;
mod decluttered;
mod optimized;

pub use lir::{LirScan, State};
pub use mir::Scan;
pub use optimized::{OptScan, State};
pub use decluttered::Scan;

#[derive(Clone, new, Hash, Eq, PartialEq, Copy, Debug)]
pub struct ScanInfo {
Expand Down
Loading
Loading