Skip to content

Commit

Permalink
[Codegen] Re-Enable transform dialect configuration strategy round 2 (#…
Browse files Browse the repository at this point in the history
…16427)

This time it just drops all transform dialect usage outside of transform
library file path + entry point name. This reduces code complexity in
`MaterializeUserConfigs`.

Also cleans up some of the transform dialect tests to stop lit testing
at the same time. We might want to consider dropping some of them as
they aren't being maintained (the only thing they verify at the moment
is that the transform scripts are valid for CUDA).
  • Loading branch information
qedawkins authored Feb 16, 2024
1 parent 045bca1 commit b9fdcce
Show file tree
Hide file tree
Showing 55 changed files with 393 additions and 898 deletions.
168 changes: 96 additions & 72 deletions compiler/src/iree/compiler/Codegen/Common/MaterializeUserConfigs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,25 +25,47 @@

namespace mlir::iree_compiler {

llvm::cl::opt<std::string> clCodegenTransformDialectStrategyName(
"iree-codegen-use-transform-dialect-strategy",
llvm::cl::desc(
"Broadcasts the given transform dialect strategy specification to all "
"dispatches. The specification is a symbol reference to load from a"
"library of transform specs (@library_call)"),
llvm::cl::init(""));

llvm::cl::opt<std::string> clCodegenTransformDialectLibraryFileName(
"iree-codegen-transform-dialect-library",
llvm::cl::desc(
"File path to a module containing a library of transform dialect"
"strategies"),
"strategies. Can be suffixed with the name of a transform sequence"
"within the library to run as preprocessing per executable variant."
"This is specified as <file-path>@<sequence-name>. If not specified,"
"this will default to `__kernel_config`."),
llvm::cl::init(""));

namespace {

static const char kTranslationInfoAttrName[] = "translation_info";

enum StrategyRunResult {
Success = 0,
NotFound = 1,
Failed = 2,
};

static StrategyRunResult
runTransformConfigurationStrategy(Operation *payloadRoot,
StringRef entryPointName,
ModuleOp &transformLibrary) {
/// If we have a symbol, verify the existence of the symbol within the
/// transform library.
Operation *entryPoint = transform::detail::findTransformEntryPoint(
payloadRoot, transformLibrary, entryPointName);
if (!entryPoint) {
return StrategyRunResult::NotFound;
}

transform::TransformOptions options;
if (failed(transform::applyTransformNamedSequence(
payloadRoot, entryPoint, transformLibrary,
options.enableExpensiveChecks(true)))) {
return StrategyRunResult::Failed;
}
return StrategyRunResult::Success;
}

struct MaterializeUserConfigsPass
: public MaterializeUserConfigsBase<MaterializeUserConfigsPass> {
void getDependentDialects(DialectRegistry &registry) const override {
Expand All @@ -57,42 +79,73 @@ struct MaterializeUserConfigsPass
getAllEntryPoints(moduleOp);
MLIRContext *context = moduleOp.getContext();

// Parse the file path and kernel config strategy from flags. There are
// two possible usage flows for transform dialect libraries.
// 1. Use `__kernel_config` to match and annotate variants with the
// strategy to use. This could either be a transform dialect strategy
// or any other IREE codegen pipeline.
//
// 2. Use the configuration strategy to do codegen directly. At the end of
// the strategy, the variant needs to be annotated with
// "translation_info" = #iree_codegen.translation_info<None>
SmallVector<StringRef, 2> parts;
llvm::SplitString(llvm::StringRef(clCodegenTransformDialectLibraryFileName),
parts, "@");
if (parts.size() > 2) {
variantOp.emitError()
<< "Invalid transform library path and sequence name "
<< clCodegenTransformDialectLibraryFileName;
return signalPassFailure();
}
bool hasTransformLibrary = !parts.empty();

std::string libraryFileName;
if (hasTransformLibrary) {
if (parts[0].empty()) {
variantOp.emitError() << "Cannot specify an empty library path";
return signalPassFailure();
}
libraryFileName = parts[0];
}

std::string entrySequenceName;
// Check if the user specified a custom entry point name.
if (parts.size() == 2) {
if (parts[1].empty()) {
variantOp.emitError() << "Cannot specify an empty sequence name";
return signalPassFailure();
}
entrySequenceName = parts[1];
} else {
entrySequenceName = "__kernel_config";
}

LDBG("MaterializeUserConfigsPass on variant: " << variantOp);
std::optional<ModuleOp> transformLibrary = std::nullopt;
if (!clCodegenTransformDialectLibraryFileName.empty()) {
if (hasTransformLibrary) {
auto dialect =
context->getOrLoadDialect<IREE::Codegen::IREECodegenDialect>();
auto maybeTransformLibrary = dialect->getOrLoadTransformLibraryModule(
clCodegenTransformDialectLibraryFileName);
auto maybeTransformLibrary =
dialect->getOrLoadTransformLibraryModule(libraryFileName);
if (failed(maybeTransformLibrary)) {
variantOp.emitError() << "failed to load transform library module: "
<< clCodegenTransformDialectLibraryFileName;
variantOp.emitError()
<< "failed to load transform library module: " << libraryFileName;
return signalPassFailure();
}
transformLibrary = *maybeTransformLibrary;
LDBG("--found transform library @"
<< clCodegenTransformDialectLibraryFileName);
}
LDBG("--found transform library @" << libraryFileName);

IREE::Codegen::DispatchLoweringPassPipeline tdPipeline =
IREE::Codegen::DispatchLoweringPassPipeline::TransformDialectCodegen;
std::optional<IREE::Codegen::TranslationInfoAttr> clTranslationInfo;
// Here we always set the pipeline strategy to transform dialect if the
// flag is non-empty to ensure we pick the right lowering pipeline in the
// event a strategy symbol is defined.
if (!clCodegenTransformDialectLibraryFileName.empty() ||
!clCodegenTransformDialectStrategyName.empty()) {
StringRef strategyName =
(clCodegenTransformDialectStrategyName.empty())
? StringRef(
transform::TransformDialect::kTransformEntryPointSymbolName)
: clCodegenTransformDialectStrategyName;
clTranslationInfo = IREE::Codegen::TranslationInfoAttr::get(
context, tdPipeline,
/*codegenSpec=*/
SymbolRefAttr::get(context, llvm::StringRef(strategyName)),
/*configuration=*/DictionaryAttr());
LDBG("--clTranslationInfo: " << clTranslationInfo);
auto runResult = runTransformConfigurationStrategy(
variantOp, entrySequenceName, *transformLibrary);
if (runResult == StrategyRunResult::NotFound) {
variantOp.emitError() << "transform kernel config strategy `"
<< entrySequenceName << " not found";
return signalPassFailure();
} else if (runResult == StrategyRunResult::Failed) {
variantOp.emitError() << "transform kernel config strategy `"
<< entrySequenceName << "` failed to apply";
return signalPassFailure();
}
}

LDBG("--start iterating over: "
Expand All @@ -106,6 +159,11 @@ struct MaterializeUserConfigsPass
continue;
}

/// Nothing to do if the export already has a config.
if (getTranslationInfo(exportOp)) {
continue;
}

/// First, apply all user configs.
auto res = funcOp.walk([&](Operation *op) {
if (auto compilationInfo = getCompilationInfo(op)) {
Expand All @@ -120,48 +178,14 @@ struct MaterializeUserConfigsPass
moduleOp.emitOpError("error in setting user configuration");
return signalPassFailure();
}

/// Let user configs take priority over the global strategy flag.
if (IREE::Codegen::TranslationInfoAttr exportedTranslationInfo =
getTranslationInfo(exportOp)) {
if (translationInfo) {
/// Currently codegen is rooted on the variant, meaning every entry
/// must go through the same codegen pipeline. For multi-targeting we
/// will want to have multiple functions per variant, as well as
/// multiple exports per variant, meaning eventually the nesting of
/// the translation pipeline will need to change to the function, or
/// we'll need another level of module op nesting.
if (exportedTranslationInfo != translationInfo.value()) {
moduleOp.emitOpError(
"unhandled compilation of entry point functions with different "
"translation info");
return signalPassFailure();
}
} else {
translationInfo = exportedTranslationInfo;
}
} else {
if (translationInfo && translationInfo != clTranslationInfo) {
moduleOp.emitOpError(
"unhandled compilation of entry point functions with translation "
"info optionality");
return signalPassFailure();
}
if (clTranslationInfo) {
translationInfo = clTranslationInfo;
if (failed(setTranslationInfo(funcOp, translationInfo.value()))) {
moduleOp.emitOpError("failed to set command line translation info");
return signalPassFailure();
}
}
}
}

LDBG("--guaranteed unique translationInfo: " << translationInfo);
/// We only need to resolve symbols for transform dialect based strategies.
if (!translationInfo ||
translationInfo.value().getDispatchLoweringPassPipeline() !=
tdPipeline) {
IREE::Codegen::DispatchLoweringPassPipeline::
TransformDialectCodegen) {
return;
}

Expand Down
3 changes: 2 additions & 1 deletion compiler/src/iree/compiler/Codegen/Common/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,8 @@ createTileAndDistributeToWorkgroupsPass(

/// Create an IREE-specific Transform dialect interpreter pass with all
/// registrations necessary for IREE.
std::unique_ptr<Pass> createTransformDialectInterpreterPass();
std::unique_ptr<Pass>
createTransformDialectInterpreterPass(StringRef transformSequenceName = "");

/// Pass to propagate type to avoid generating load/stores of illegal types.
std::unique_ptr<InterfacePass<mlir::FunctionOpInterface>>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,19 @@ class TransformDialectInterpreterPass

namespace mlir::iree_compiler {

extern llvm::cl::opt<std::string> clCodegenTransformDialectStrategyName;
extern llvm::cl::opt<std::string> clCodegenTransformDialectLibraryFileName;

/// Create a Transform dialect interpreter pass.
std::unique_ptr<Pass> createTransformDialectInterpreterPass() {
std::unique_ptr<Pass>
createTransformDialectInterpreterPass(StringRef transformSequenceName) {
StringRef libraryPath = "";
SmallVector<StringRef, 2> parts;
llvm::SplitString(llvm::StringRef(clCodegenTransformDialectLibraryFileName),
parts, "@");
if (!parts.empty()) {
libraryPath = parts[0];
}
return std::make_unique<TransformDialectInterpreterPass>(
clCodegenTransformDialectLibraryFileName,
clCodegenTransformDialectStrategyName);
libraryPath, transformSequenceName);
}
} // namespace mlir::iree_compiler
Original file line number Diff line number Diff line change
Expand Up @@ -186,9 +186,12 @@ void LLVMCPULowerExecutableTargetPass::runOnOperation() {
break;
}
// Transform-dialect pipelines.
case IREE::Codegen::DispatchLoweringPassPipeline::TransformDialectCodegen:
addTransformDialectPasses(pipeline);
case IREE::Codegen::DispatchLoweringPassPipeline::TransformDialectCodegen: {
SymbolRefAttr codegenSpec = translationInfo.value().getCodegenSpec();
addTransformDialectPasses(
pipeline, codegenSpec ? codegenSpec.getLeafReference() : StringRef(""));
break;
}
default:
moduleOp.emitOpError("Unsupported pipeline on CPU target.");
return signalPassFailure();
Expand Down
5 changes: 3 additions & 2 deletions compiler/src/iree/compiler/Codegen/LLVMCPU/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -582,10 +582,11 @@ void addCPUDefaultPassPipeline(OpPassManager &passManager) {
addCPUBufferizePasses(nestedModulePM);
}

void addTransformDialectPasses(OpPassManager &passManager) {
void addTransformDialectPasses(OpPassManager &passManager,
StringRef entryPoint) {
// Give control to the transform dialect.
passManager.addPass(
mlir::iree_compiler::createTransformDialectInterpreterPass());
mlir::iree_compiler::createTransformDialectInterpreterPass(entryPoint));
// Dropping the schedule is needed:
// 1. if we want to embed the transform in the module: we should drop the
// schedule once applied.
Expand Down
3 changes: 2 additions & 1 deletion compiler/src/iree/compiler/Codegen/LLVMCPU/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,8 @@ void addTensorToVectorsPassPipeline(OpPassManager &passManager,
bool lowerToVectors = true);

/// Transform dialect-based common.
void addTransformDialectPasses(OpPassManager &passManager);
void addTransformDialectPasses(OpPassManager &passManager,
StringRef entryPoint);

// Populates the passes needed to do tiling, decomposing, and vectorizing the
// convolution ops.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,9 +128,12 @@ void LLVMGPULowerExecutableTargetPass::runOnOperation() {
addGPUPackUnPackPasses(pipeline);
break;
// Transform-dialect pipelines.
case IREE::Codegen::DispatchLoweringPassPipeline::TransformDialectCodegen:
addGPUTransformDialectPasses(pipeline);
case IREE::Codegen::DispatchLoweringPassPipeline::TransformDialectCodegen: {
SymbolRefAttr codegenSpec = translationInfo.value().getCodegenSpec();
addGPUTransformDialectPasses(
pipeline, codegenSpec ? codegenSpec.getLeafReference() : StringRef(""));
break;
}
// no pipeline specified, nothing to do.
case IREE::Codegen::DispatchLoweringPassPipeline::None:
return;
Expand Down
5 changes: 3 additions & 2 deletions compiler/src/iree/compiler/Codegen/LLVMGPU/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -740,9 +740,10 @@ static void addLowerToLLVMGPUPasses(OpPassManager &pm, bool forROCDL) {
extern llvm::cl::opt<std::string> clGPUCodegenTransformDialectDebugPayloadTag;
extern llvm::cl::opt<std::string> clGPUCodegenTransformDialectDebugTransformTag;

void addGPUTransformDialectPasses(OpPassManager &passManager) {
void addGPUTransformDialectPasses(OpPassManager &passManager,
StringRef entryPoint) {
passManager.addPass(
mlir::iree_compiler::createTransformDialectInterpreterPass());
mlir::iree_compiler::createTransformDialectInterpreterPass(entryPoint));

// Dropping the schedule is needed:
// 1. if we want to embed the transform in the module: we should drop the
Expand Down
2 changes: 1 addition & 1 deletion compiler/src/iree/compiler/Codegen/LLVMGPU/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ void addGPUPackUnPackPasses(OpPassManager &pm);
void addGPUSimpleDistributePassPipeline(OpPassManager &pm);

/// Transform dialect-based path.
void addGPUTransformDialectPasses(OpPassManager &pm);
void addGPUTransformDialectPasses(OpPassManager &pm, StringRef entryPoint);

/// Lowering transpose using shared memory.
void addGPUTransposePassPipeline(OpPassManager &pm);
Expand Down
5 changes: 3 additions & 2 deletions compiler/src/iree/compiler/Codegen/LLVMGPU/test/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,13 @@ iree_lit_test_suite(
"pack_shared_memory_alloc.mlir",
"tensor_pad.mlir",
"tensorcore_vectorization.mlir",
"transform_dialect_hoist_allocs.mlir",
"transform_dialect_vector_distribution.mlir",
"transform_dialect_bufferize.mlir",
"transform_dialect_eliminate_gpu_barriers.mlir",
"transform_dialect_hoist_allocs.mlir",
"transform_dialect_pack_shared_memory_alloc.mlir",
"transform_dialect_promote_operands.mlir",
"transform_dialect_vector_distribution.mlir",
"transform_dialect_vector_to_nvgpu_mma.mlir",
"transform_distribute_forall.mlir",
"transform_gpu_pipelining.mlir",
"transform_vector_to_mma.mlir",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ iree_lit_test_suite(
"transform_dialect_pack_shared_memory_alloc.mlir"
"transform_dialect_promote_operands.mlir"
"transform_dialect_vector_distribution.mlir"
"transform_dialect_vector_to_nvgpu_mma.mlir"
"transform_distribute_forall.mlir"
"transform_gpu_pipelining.mlir"
"transform_vector_to_mma.mlir"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
// RUN: iree-opt %s --pass-pipeline="builtin.module(hal.executable(hal.executable.variant(iree-codegen-llvmgpu-configuration-pipeline, iree-llvmgpu-lower-executable-target)))" \
// RUN: --iree-codegen-llvmgpu-enable-transform-dialect-jit=false \
// RUN: --iree-codegen-transform-dialect-library=%p/transform_dialect_codegen_bufferize_spec.mlir | \
// RUN: --iree-codegen-transform-dialect-library=%p/transform_dialect_codegen_bufferize_spec.mlir@__transform_main | \
// RUN: FileCheck %s

// RUN: iree-opt %s --pass-pipeline="builtin.module(hal.executable(hal.executable.variant(iree-codegen-llvmgpu-configuration-pipeline, iree-llvmgpu-lower-executable-target)))" \
// RUN: --iree-codegen-llvmgpu-enable-transform-dialect-jit=false \
// RUN: --iree-codegen-transform-dialect-library=%p/transform_dialect_codegen_foreach_to_gpu_spec.mlir | \
// RUN: --iree-codegen-transform-dialect-library=%p/transform_dialect_codegen_foreach_to_gpu_spec.mlir@__transform_main | \
// RUN: FileCheck %s --check-prefix=FOREACH-TO-GPU

#device_target_cuda = #hal.device.target<"cuda", {executable_targets = [#hal.executable.target<"cuda", "cuda-nvptx-fb", {target_arch = "sm_60"}>]}>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@ module attributes { transform.with_named_sequence } {
transform.iree.eliminate_empty_tensors %variant_op : (!transform.any_op) -> ()
%variant_op_3 = transform.iree.bufferize %variant_op : (!transform.any_op) -> !transform.any_op
%memref_func = transform.structured.match ops{["func.func"]} in %variant_op_3 : (!transform.any_op) -> !transform.any_op

// Annotate the exported function as already translated.
%exports = transform.structured.match ops{["hal.executable.export"]} in %variant_op_3 : (!transform.any_op) -> !transform.any_op
%none = transform.param.constant #iree_codegen.translation_info<None> -> !transform.any_param
transform.annotate %exports "translation_info" = %none : !transform.any_op, !transform.any_param
transform.yield
}
} // module
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,11 @@ module attributes { transform.with_named_sequence } {
} : !transform.any_op
transform.iree.apply_licm %memref_func : !transform.any_op
transform.apply_cse to %memref_func : !transform.any_op

// Annotate the exported function as already translated.
%exports = transform.structured.match ops{["hal.executable.export"]} in %variant_op_3 : (!transform.any_op) -> !transform.any_op
%none = transform.param.constant #iree_codegen.translation_info<None> -> !transform.any_param
transform.annotate %exports "translation_info" = %none : !transform.any_op, !transform.any_param
transform.yield
}
} // module
Expand Down
File renamed without changes.
Loading

0 comments on commit b9fdcce

Please sign in to comment.