[mlir][linalg] Add quantized conv2d operator with FCHW,NCHW order (#107740)
This patch adds a quantized version of the `linalg.conv2d_nchw_fchw` Op. This is the "channel-first" ordering typically used by PyTorch and others.
This commit is contained in:
@@ -876,6 +876,35 @@ def conv_2d_nhwc_fhwc_q(
|
||||
) * (TypeFn.cast_signed(U, K[D.f, D.kh, D.kw, D.c]) - TypeFn.cast_signed(U, KZp))
|
||||
|
||||
|
||||
@linalg_structured_op
|
||||
def conv_2d_nchw_fchw_q(
|
||||
I=TensorDef(T1, S.N, S.C, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW),
|
||||
K=TensorDef(T2, S.F, S.C, S.KH, S.KW),
|
||||
IZp=ScalarDef(I32),
|
||||
KZp=ScalarDef(I32),
|
||||
O=TensorDef(U, S.N, S.F, S.OH, S.OW, output=True),
|
||||
strides=IndexAttrDef(S.SH, S.SW, default=[1, 1]),
|
||||
dilations=IndexAttrDef(S.DH, S.DW, default=[1, 1]),
|
||||
):
|
||||
"""Performs 2-D convolution with zero point offsets.
|
||||
|
||||
Layout:
|
||||
* Input: NCHW.
|
||||
* Kernel: FCHW.
|
||||
|
||||
Numeric casting is performed on the operands to the inner multiply, promoting
|
||||
them to the same data type as the accumulator/output. This includes the zero
|
||||
point offsets common to quantized operations.
|
||||
"""
|
||||
implements(ConvolutionOpInterface)
|
||||
domain(D.n, D.f, D.oh, D.ow, D.c, D.kh, D.kw)
|
||||
O[D.n, D.f, D.oh, D.ow] += (
|
||||
TypeFn.cast_signed(
|
||||
U, I[D.n, D.c, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW]
|
||||
)
|
||||
- TypeFn.cast_signed(U, IZp)
|
||||
) * (TypeFn.cast_signed(U, K[D.f, D.c, D.kh, D.kw]) - TypeFn.cast_signed(U, KZp))
|
||||
|
||||
@linalg_structured_op
|
||||
def conv_2d_nchw_fchw(
|
||||
I=TensorDef(T1, S.N, S.C, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW),
|
||||
|
||||
Reference in New Issue
Block a user