Skip to content

Commit

Permalink
Revert "Allow Layouts to propogate to local_load" (#5237)
Browse files Browse the repository at this point in the history
This is causing some performance regression. I'll investigate and reland
it.
Reverts #5219
  • Loading branch information
ThomasRaoux authored Nov 22, 2024
1 parent 340cbc6 commit 84ced0e
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 20 deletions.
3 changes: 1 addition & 2 deletions lib/Dialect/TritonGPU/Transforms/Utility.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -563,8 +563,7 @@ bool canFoldIntoConversion(Operation *op, Attribute targetEncoding) {
}
return isa<triton::gpu::ConvertLayoutOp, arith::ConstantOp,
triton::MakeRangeOp, triton::SplatOp, triton::HistogramOp,
triton::gpu::LocalAllocOp, triton::gpu::LocalLoadOp,
triton::gpu::LocalStoreOp>(op);
triton::gpu::LocalAllocOp, triton::gpu::LocalStoreOp>(op);
}

scf::ForOp replaceForOpWithNewSignature(
Expand Down
18 changes: 0 additions & 18 deletions test/TritonGPU/combine.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -2685,21 +2685,3 @@ module attributes {"triton_gpu.num-warps" = 1 : i32, "triton_gpu.threads-per-war
tt.return
}
}

// -----

#blocked = #triton_gpu.blocked<{sizePerThread = [1, 1, 1, 1, 4], threadsPerWarp = [2, 1, 16, 1, 1], warpsPerCTA = [1, 1, 2, 2, 1], order = [4, 0, 1, 2, 3]}>
#blocked2 = #triton_gpu.blocked<{sizePerThread = [1, 1, 1, 1, 4], threadsPerWarp = [1, 1, 32, 1, 1], warpsPerCTA = [1, 1, 1, 1, 4], order = [4, 3, 2, 1, 0]}>
#blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 1, 1, 1, 4], threadsPerWarp = [2, 1, 16, 1, 1], warpsPerCTA = [1, 2, 2, 1, 1], order = [4, 0, 3, 2, 1]}>
#shared = #triton_gpu.shared<{vec = 1, perPhase = 1, maxPhase = 1, order = [4, 0, 1, 2, 3], hasLeadingOffset = false}>
module attributes {"triton_gpu.num-ctas" = 1 : i32, "triton_gpu.num-warps" = 4 : i32, triton_gpu.target = "cuda:100", "triton_gpu.threads-per-warp" = 32 : i32} {
// CHECK-LABEL: lift_convert_to_local_load
// CHECK-NOT: convert_layout
// CHECK: tt.return
tt.func public @lift_convert_to_local_load(%arg0 : !triton_gpu.memdesc<2x1x32x4x4xi8, #shared, #triton_gpu.shared_memory, mutable>) -> tensor<2x4x32x1x4xi8, #blocked2> {
%1 = triton_gpu.local_load %arg0 : !triton_gpu.memdesc<2x1x32x4x4xi8, #shared, #triton_gpu.shared_memory, mutable> -> tensor<2x1x32x4x4xi8, #blocked>
%2 = tt.trans %1 {order = array<i32: 0, 3, 2, 1, 4>} : tensor<2x1x32x4x4xi8, #blocked> -> tensor<2x4x32x1x4xi8, #blocked1>
%3 = triton_gpu.convert_layout %2 : tensor<2x4x32x1x4xi8, #blocked1> -> tensor<2x4x32x1x4xi8, #blocked2>
tt.return %3 : tensor<2x4x32x1x4xi8, #blocked2>
}
}

0 comments on commit 84ced0e

Please sign in to comment.