// RUN: mlir-opt %s -convert-linalg-to-affine-loops | FileCheck %s // Test that we can lower all the way to LLVM without crashing, don't check results here. // RUN: mlir-opt %s -convert-linalg-to-affine-loops -convert-linalg-to-llvm -o=/dev/null 2>&1 // CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> // CHECK-DAG: #[[$stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)> // CHECK-DAG: #[[$clampMinMap:.*]] = affine_map<(d0) -> (d0, 0)> func @matmul(%arg0: memref, %M: index, %N: index, %K: index) { %c0 = constant 0 : index %c1 = constant 1 : index %A = view %arg0[%c0][%M, %K] : memref to memref %B = view %arg0[%c0][%K, %N] : memref to memref %C = view %arg0[%c0][%M, %N] : memref to memref linalg.matmul ins(%A, %B: memref, memref) outs(%C: memref) return } // CHECK-LABEL: func @matmul(%{{.*}}: memref, // CHECK-SAME: [[M:arg[0-9]+]]: index // CHECK-SAME: [[N:arg[0-9]+]]: index // CHECK-SAME: [[K:arg[0-9]+]]: index // CHECK: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref // CHECK: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref // CHECK: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { // CHECK-DAG: %[[a:.*]] = affine.load %[[A]][%{{.*}}, %{{.*}}] : memref // CHECK-DAG: %[[b:.*]] = affine.load %[[B]][%{{.*}}, %{{.*}}] : memref // CHECK-DAG: %[[inc:.*]] = mulf %[[a]], %[[b]] : f32 // CHECK-DAG: %[[c:.*]] = affine.load %[[C]][%{{.*}}, %{{.*}}] : memref // CHECK-DAG: %[[res:.*]] = addf %[[c]], %[[inc]] : f32 // CHECK: affine.store %[[res]], %[[C]][%{{.*}}, %{{.*}}] : memref func @conv_view3(%arg0: memref, %arg1: memref, %arg2: memref) { linalg.conv(%arg0, %arg1, %arg2) {strides = [2]}: memref, memref, memref return } // CHECK-LABEL: func @conv_view3( // CHECK: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { // CHECK: %[[Z0:.*]] = dim %arg0, %c0 : memref // CHECK: %[[Q:.*]] = dim %arg0, %c1 : memref // CHECK: %[[K:.*]] = dim %arg0, %c2 : memref // CHECK: %[[B:.*]] = dim %arg1, %c0 : memref // CHECK: %[[X0:.*]] = dim %arg2, %c1 : memref // CHECK: affine.for %{{.*}} = 0 to %[[B]] { // CHECK: affine.for %{{.*}} = 0 to %[[X0]] { // CHECK: affine.for %{{.*}} = 0 to %[[K]] { // CHECK: affine.for %{{.*}} = 0 to %[[Q]] { // CHECK: affine.for %{{.*}} = 0 to %[[Z0]] { // CHECK: %[[SUM:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}}) // No padding needed here; only affine loads. // CHECK-NEXT: affine.load // CHECK-NEXT: affine.load func @conv_padding(%arg0: memref, %arg1: memref, %arg2: memref) { linalg.conv(%arg0, %arg1, %arg2) {dilations = [1, 1], padding = dense<[[0, 1], [1, 1]]> : tensor<2x2xi64>, strides = [1, 1]} : memref, memref, memref return } // CHECK-LABEL: func @conv_padding // CHECK: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { // CHECK: %[[ZERO:.*]] = constant 0.000000e+00 : f32 // CHECK: %[[Z0:.*]] = dim %arg0, %c0 : memref // CHECK: %[[Z1:.*]] = dim %arg0, %c1 : memref // CHECK: %[[Q:.*]] = dim %arg0, %c2 : memref // CHECK: %[[K:.*]] = dim %arg0, %c3 : memref // CHECK: %[[B:.*]] = dim %arg1, %c0 : memref // CHECK: %[[X0:.*]] = dim %arg2, %c1 : memref // CHECK: %[[X1:.*]] = dim %arg2, %c2 : memref // CHECK: affine.for %{{.*}} = 0 to %[[B]] { // CHECK: affine.for %{{.*}} = 0 to %[[X0]] { // CHECK: affine.for %{{.*}} = 0 to %[[X1]] { // CHECK: affine.for %{{.*}} = 0 to %[[K]] { // CHECK: affine.for %{{.*}} = 0 to %[[Q]] { // CHECK: affine.for %{{.*}} = 0 to %[[Z0]] { // CHECK: affine.for %{{.*}} = 0 to %[[Z1]] { // CHECK: %[[SUM0:.*]] = affine.apply #{{.*}}(%{{.*}}, %{{.*}}) // CHECK: %[[SUM1:.*]] = affine.apply #{{.*}}(%{{.*}}, %{{.*}}) // CHECK: %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[SUM0]]) // CHECK: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[SUM1]]) // Padded conv involves an affine.max in the memory access and this is not // allowed by affine.load. Use std.load in such cases. // CHECK: %{{.*}} = load %{{.*}}[%{{.*}}, %[[IDX]], %[[IDY]], %{{.*}}] : memref // CHECK: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : f32 // CHECK: %{{.*}} = affine.load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref // CHECK: %{{.*}} = mulf %{{.*}}, %{{.*}} : f32 // CHECK: %{{.*}} = affine.load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref // CHECK: %{{.*}} = addf %{{.*}}, %{{.*}} : f32 // CHECK: affine.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref //----------------------------------------------------------------------------// // Named ops to loops. //----------------------------------------------------------------------------// func @named_batch_matmul(%A: memref, %B: memref, %C: memref) { linalg.batch_matmul ins(%A, %B: memref, memref) outs(%C : memref) return } // CHECK-LABEL: @named_batch_matmul // CHECK-SAME: %[[mA:[a-zA-Z0-9]+]]: memref // CHECK-SAME: %[[mB:[a-zA-Z0-9]+]]: memref // CHECK-SAME: %[[mC:[a-zA-Z0-9]+]]: memref // CHECK: %[[B:.*]] = dim %[[mA]], %c0 : memref // CHECK: %[[M:.*]] = dim %[[mA]], %c1 : memref // CHECK: %[[K:.*]] = dim %[[mA]], %c2 : memref // CHECK: %[[N:.*]] = dim %[[mB]], %c2 : memref // CHECK: affine.for %[[b:.*]] = 0 to %[[B]] { // CHECK: affine.for %[[m:.*]] = 0 to %[[M]] { // CHECK: affine.for %[[n:.*]] = 0 to %[[N]] { // CHECK: affine.for %[[k:.*]] = 0 to %[[K]] { // CHECK: %[[va:.*]] = affine.load %[[mA]][%[[b]], %[[m]], %[[k]]] : memref // CHECK: %[[vb:.*]] = affine.load %[[mB]][%[[b]], %[[k]], %[[n]]] : memref // CHECK: %[[vc:.*]] = affine.load %[[mC]][%[[b]], %[[m]], %[[n]]] : memref // CHECK: %[[inc:.*]] = mulf %[[va]], %[[vb]] : f32 // CHECK: %[[res:.*]] = addf %[[vc]], %[[inc]] : f32 // CHECK: affine.store %[[res]], %[[mC]][%[[b]], %[[m]], %[[n]]] : memref // CHECK-LABEL: func @pooling_max_min func @pooling_max_min(%arg0: memref, %arg1: memref, %arg2: memref) { linalg.pooling_max(%arg0, %arg1, %arg2) { strides = [2, 1] }: memref, memref, memref linalg.pooling_min(%arg0, %arg1, %arg2) { strides = [2, 1] }: memref, memref, memref return } // This is a basic check to make sure the right load/stores are used. loops.mlir // checks for the rest. // CHECK: affine.load // CHECK-NEXT: affine.load // CHECK-NEXT: cmpf // CHECK-NEXT: select // CHECK-NEXT: affine.store // The min pooling body. // CHECK: affine.load // CHECK-NEXT: affine.load // CHECK-NEXT: cmpf // CHECK-NEXT: select // CHECK-NEXT: affine.store