// DEFINE: %{option} = enable-runtime-library=true // DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ // DEFINE: mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // // RUN: %{command} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" // RUN: %{command} #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> #CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> #CSC = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> // // Traits for tensor operations. // #trait_vec_select = { indexing_maps = [ affine_map<(i) -> (i)>, // A affine_map<(i) -> (i)> // C (out) ], iterator_types = ["parallel"] } #trait_mat_select = { indexing_maps = [ affine_map<(i,j) -> (i,j)>, // A (in) affine_map<(i,j) -> (i,j)> // X (out) ], iterator_types = ["parallel", "parallel"] } module { func.func @vecSelect(%arga: tensor) -> tensor { %c0 = arith.constant 0 : index %cf1 = arith.constant 1.0 : f64 %d0 = tensor.dim %arga, %c0 : tensor %xv = bufferization.alloc_tensor(%d0): tensor %0 = linalg.generic #trait_vec_select ins(%arga: tensor) outs(%xv: tensor) { ^bb(%a: f64, %b: f64): %1 = sparse_tensor.select %a : f64 { ^bb0(%x: f64): %keep = arith.cmpf "oge", %x, %cf1 : f64 sparse_tensor.yield %keep : i1 } linalg.yield %1 : f64 } -> tensor return %0 : tensor } func.func @matUpperTriangle(%arga: tensor) -> tensor { %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index %d0 = tensor.dim %arga, %c0 : tensor %d1 = tensor.dim %arga, %c1 : tensor %xv = bufferization.alloc_tensor(%d0, %d1): tensor %0 = linalg.generic #trait_mat_select ins(%arga: tensor) outs(%xv: tensor) { ^bb(%a: f64, %b: f64): %row = linalg.index 0 : index %col = linalg.index 1 : index %1 = sparse_tensor.select %a : f64 { ^bb0(%x: f64): %keep = arith.cmpi "ugt", %col, %row : index sparse_tensor.yield %keep : i1 } linalg.yield %1 : f64 } -> tensor return %0 : tensor } // Dumps a sparse vector of type f64. func.func @dump_vec(%arg0: tensor) { // Dump the values array to verify only sparse contents are stored. %c0 = arith.constant 0 : index %d0 = arith.constant 0.0 : f64 %0 = sparse_tensor.values %arg0 : tensor to memref %1 = vector.transfer_read %0[%c0], %d0: memref, vector<8xf64> vector.print %1 : vector<8xf64> // Dump the dense vector to verify structure is correct. %dv = sparse_tensor.convert %arg0 : tensor to tensor %2 = vector.transfer_read %dv[%c0], %d0: tensor, vector<16xf64> vector.print %2 : vector<16xf64> return } // Dump a sparse matrix. func.func @dump_mat(%arg0: tensor) { // Dump the values array to verify only sparse contents are stored. %c0 = arith.constant 0 : index %d0 = arith.constant 0.0 : f64 %0 = sparse_tensor.values %arg0 : tensor to memref %1 = vector.transfer_read %0[%c0], %d0: memref, vector<16xf64> vector.print %1 : vector<16xf64> %dm = sparse_tensor.convert %arg0 : tensor to tensor %2 = vector.transfer_read %dm[%c0, %c0], %d0: tensor, vector<5x5xf64> vector.print %2 : vector<5x5xf64> return } // Driver method to call and verify vector kernels. func.func @entry() { %c0 = arith.constant 0 : index // Setup sparse matrices. %v1 = arith.constant sparse< [ [1], [3], [5], [7], [9] ], [ 1.0, 2.0, -4.0, 0.0, 5.0 ] > : tensor<10xf64> %m1 = arith.constant sparse< [ [0, 3], [1, 4], [2, 1], [2, 3], [3, 3], [3, 4], [4, 2] ], [ 1., 2., 3., 4., 5., 6., 7.] > : tensor<5x5xf64> %sv1 = sparse_tensor.convert %v1 : tensor<10xf64> to tensor %sm1 = sparse_tensor.convert %m1 : tensor<5x5xf64> to tensor // Call sparse matrix kernels. %1 = call @vecSelect(%sv1) : (tensor) -> tensor %2 = call @matUpperTriangle(%sm1) : (tensor) -> tensor // // Verify the results. // // CHECK: ( 1, 2, -4, 0, 5, 0, 0, 0 ) // CHECK-NEXT: ( 0, 1, 0, 2, 0, -4, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( ( 0, 0, 0, 1, 0 ), ( 0, 0, 0, 0, 2 ), ( 0, 3, 0, 4, 0 ), ( 0, 0, 0, 5, 6 ), ( 0, 0, 7, 0, 0 ) ) // CHECK-NEXT: ( 1, 2, 5, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( 0, 1, 0, 2, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( 1, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( ( 0, 0, 0, 1, 0 ), ( 0, 0, 0, 0, 2 ), ( 0, 0, 0, 4, 0 ), ( 0, 0, 0, 0, 6 ), ( 0, 0, 0, 0, 0 ) ) // call @dump_vec(%sv1) : (tensor) -> () call @dump_mat(%sm1) : (tensor) -> () call @dump_vec(%1) : (tensor) -> () call @dump_mat(%2) : (tensor) -> () // Release the resources. bufferization.dealloc_tensor %sv1 : tensor bufferization.dealloc_tensor %sm1 : tensor bufferization.dealloc_tensor %1 : tensor bufferization.dealloc_tensor %2 : tensor return } }