-
Notifications
You must be signed in to change notification settings - Fork 606
Open
Description
How to reproduce?
test.mlir
func.func @forwards(%arg0: !torch.vtensor<[1,4,2048],f32>, %int2: !torch.int) -> !torch.vtensor<[1,4,2048],f32> {
%0 = torch.aten.pow.Tensor_Scalar %arg0, %int2 : !torch.vtensor<[1,4,2048],f32>, !torch.int -> !torch.vtensor<[1,4,2048],f32>
return %0 : !torch.vtensor<[1,4,2048],f32>
}
torch-mlir-opt -pass-pipeline='builtin.module(torch-backend-to-linalg-on-tensors-backend-pipeline)' test.mlir -o linalg.mlir
mlir-opt linalg.mlir \
-one-shot-bufferize="bufferize-function-boundaries" \
-buffer-deallocation-pipeline \
-convert-bufferization-to-memref \
-convert-linalg-to-loops \
-expand-strided-metadata \
-lower-affine \
-convert-scf-to-cf \
-test-cf-assert \
-convert-index-to-llvm \
-finalize-memref-to-llvm \
-convert-func-to-llvm \
-convert-arith-to-llvm \
-convert-cf-to-llvm \
-convert-math-to-llvm \
-reconcile-unrealized-casts -o llvm.mlir
mlir-translate llvm.mlir --mlir-to-llvmir -o llvm.ll
llvm-project/build/bin/llc --relocation-model=pic llvm.ll -o llvm.s
It looks llvm.powi
only supports i32
as exponent, but the linalg IR generated from above test.mlir
has math.fpowi
with i64
exponent like below.
#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
module {
func.func @forwards(%arg0: tensor<1x4x2048xf32>, %arg1: i64) -> tensor<1x4x2048xf32> {
%0 = tensor.empty() : tensor<1x4x2048xf32>
%1 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x4x2048xf32>) outs(%0 : tensor<1x4x2048xf32>) {
^bb0(%in: f32, %out: f32):
%2 = math.fpowi %in, %arg1 : f32, i64
linalg.yield %2 : f32
} -> tensor<1x4x2048xf32>
return %1 : tensor<1x4x2048xf32>
}
}
Possible solution
do explicitly integer truncing to i32 when lowering from torch ir to linalg ir.
Metadata
Metadata
Assignees
Labels
No labels