|
| 1 | +// RUN: torch-mlir-opt <%s -convert-torch-to-linalg -split-input-file -verify-diagnostics | FileCheck %s |
| 2 | + |
| 3 | +// ----- |
| 4 | + |
| 5 | +#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }> |
| 6 | + |
| 7 | +// CHECK: #[[$CSR:.*]] = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }> |
| 8 | +// CHECK-LABEL: func.func @sum( |
| 9 | +// CHECK-SAME: %[[A:.*]]: !torch.vtensor<[64,64],f32,#[[$CSR]]>) -> !torch.vtensor<[],f32> |
| 10 | +// CHECK: %[[S:.*]] = torch_c.to_builtin_tensor %[[A]] : !torch.vtensor<[64,64],f32,#[[$CSR]]> -> tensor<64x64xf32, #[[$CSR]]> |
| 11 | +// CHECK: linalg.generic {{{.*}}} ins(%[[S]] : tensor<64x64xf32, #[[$CSR]]>) |
| 12 | +func.func @sum(%arg0: !torch.vtensor<[64,64],f32,#CSR>) -> !torch.vtensor<[],f32> { |
| 13 | + %none = torch.constant.none |
| 14 | + %0 = torch.aten.sum %arg0, %none |
| 15 | + : !torch.vtensor<[64,64],f32,#CSR>, !torch.none -> !torch.vtensor<[],f32> |
| 16 | + return %0 : !torch.vtensor<[],f32> |
| 17 | +} |
| 18 | + |
| 19 | +// ----- |
| 20 | + |
| 21 | +#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }> |
| 22 | + |
| 23 | +// CHECK: #[[$CSR:.*]] = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }> |
| 24 | +// CHECK-LABEL: func.func @SpMM( |
| 25 | +// CHECK-SAME: %[[A:.*]]: !torch.vtensor<[8,16],f32,#[[$CSR]]>, |
| 26 | +// CHECK-SAME: %[[B:.*]]: !torch.vtensor<[16,8],f32>) -> !torch.vtensor<[8,8],f32> |
| 27 | +// CHECK: %[[S:.*]] = torch_c.to_builtin_tensor %[[A]] : !torch.vtensor<[8,16],f32,#[[$CSR]]> -> tensor<8x16xf32, #[[$CSR]]> |
| 28 | +// CHECK: %[[T:.*]] = torch_c.to_builtin_tensor %[[B]] : !torch.vtensor<[16,8],f32> -> tensor<16x8xf32> |
| 29 | +// CHECK: linalg.matmul ins(%[[S]], %[[T]] : tensor<8x16xf32, #[[$CSR]]>, tensor<16x8xf32>) |
| 30 | +func.func @SpMM(%arg0: !torch.vtensor<[8,16],f32,#CSR>, |
| 31 | + %arg1: !torch.vtensor<[16,8],f32>) -> !torch.vtensor<[8,8],f32> { |
| 32 | + %0 = torch.aten.matmul %arg0, %arg1 |
| 33 | + : !torch.vtensor<[8,16],f32,#CSR>, |
| 34 | + !torch.vtensor<[16,8],f32> -> !torch.vtensor<[8,8],f32> |
| 35 | + return %0 : !torch.vtensor<[8,8],f32> |
| 36 | +} |
0 commit comments