Skip to content

Commit

Permalink
[mlir][bufferization][NFC] Remove yielded tensor analysis (#67126)
Browse files Browse the repository at this point in the history
Remove the yielded tensor analysis. This analysis was used to detect
cases where One-Shot Bufferize cannot deallocate buffers. Deallocation
has recently been removed from One-Shot Bufferize. Buffers are now
deallocated by the buffer deallocation pass. This analysis is no longer
needed.
  • Loading branch information
matthias-springer authored Sep 22, 2023
1 parent 07151f0 commit 1a3abc2
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 105 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -528,13 +528,6 @@ class AnalysisState {
/// Return `true` if the given tensor has undefined contents.
virtual bool hasUndefinedContents(OpOperand *opOperand) const;

/// Return true if the given tensor (or an aliasing tensor) is yielded from
/// the containing block. Also include all aliasing tensors in the same block.
///
/// Note: In the absence of an analysis, an implementation may return true for
/// any given tensor.
virtual bool isTensorYielded(Value tensor) const;

/// Return a reference to the BufferizationOptions.
const BufferizationOptions &getOptions() const { return options; }

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,10 +101,6 @@ class OneShotAnalysisState : public AnalysisState {
/// and store them in `undefinedTensorUses`.
void gatherUndefinedTensorUses(Operation *op);

/// Find all tensors that are yielded/returned from a block and store them in
/// `yieldedTensors`. Also include all aliasing tensors in the same block.
void gatherYieldedTensors(Operation *op);

int64_t getStatNumTensorOutOfPlace() const { return statNumTensorOutOfPlace; }
int64_t getStatNumTensorInPlace() const { return statNumTensorInPlace; }

Expand All @@ -114,10 +110,6 @@ class OneShotAnalysisState : public AnalysisState {
/// Return `true` if the given OpResult has been decided to bufferize inplace.
bool isInPlace(OpOperand &opOperand) const override;

/// Return true if the given tensor (or an aliasing tensor) is yielded from
/// the containing block. Also include all aliasing tensors in the same block.
bool isTensorYielded(Value tensor) const override;

/// Return true if the buffer of the given tensor value is written to. Must
/// not be called for values inside not yet analyzed functions.
bool isValueWritten(Value value) const;
Expand Down Expand Up @@ -261,10 +253,6 @@ class OneShotAnalysisState : public AnalysisState {
int64_t statNumTensorOutOfPlace = 0;
int64_t statNumTensorInPlace = 0;

/// A set of all tensors (and maybe aliasing tensors) that yielded from a
/// block.
DenseSet<Value> yieldedTensors;

/// A set of uses of tensors that have undefined contents.
DenseSet<OpOperand *> undefinedTensorUses;

Expand Down
47 changes: 0 additions & 47 deletions mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -628,53 +628,6 @@ bool AnalysisState::hasUndefinedContents(OpOperand *opOperand) const {
return false;
}

bool AnalysisState::isTensorYielded(Value tensor) const {
// In the absence of analysis information, the conservative answer is "true".
if (!tensor.getDefiningOp<AllocTensorOp>())
return true;

// For AllocTensorOp results, we can do better: They do not alias with any
// preceding value, so we can follow SSA use-def chains and do a simple
// analysis.
SmallVector<OpOperand *> worklist;
DenseSet<OpOperand *> visited;
for (OpOperand &use : tensor.getUses())
worklist.push_back(&use);

while (!worklist.empty()) {
OpOperand *operand = worklist.pop_back_val();
if (visited.contains(operand))
continue;
visited.insert(operand);
Operation *op = operand->getOwner();

// If the op is not bufferizable, we can safely assume that the value is not
// yielded. (When bufferizing that op, it must handle such cases.)
if (!options.dynCastBufferizableOp(op))
continue;

// We cannot analyze through ToMemrefOps, so we have to conservatively
// assume that the value is yielded.
if (isa<ToMemrefOp>(op))
return true;

// Check if the op is returning/yielding.
if (isa<RegionBranchTerminatorOpInterface>(op))
return true;

// Add all aliasing Values to the worklist.
// Note: In the absence of detailed analysis information (e.g., there may be
// no function call analysis information), this `getAliasingValues` is
// conservative and may report additional Values as potentially aliasing.
for (AliasingValue alias : getAliasingValues(*operand))
for (OpOperand &use : alias.value.getUses())
worklist.push_back(&use);
}

// No ReturnLike op found: The value is not yielded.
return false;
}

// bufferization.to_memref is not allowed to change the rank.
static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
#ifndef NDEBUG
Expand Down
39 changes: 0 additions & 39 deletions mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -181,40 +181,6 @@ void OneShotAnalysisState::createAliasInfoEntry(Value v) {
equivalentInfo.insert(v);
}

// Gather yielded tensors in `yieldedTensors` by querying all aliases. This is
// to ensure that such information is available during bufferization time.
// Alias information can no longer be queried once we have started modifying
// the IR.
void OneShotAnalysisState::gatherYieldedTensors(Operation *op) {
op->walk([&](Operation *returnOp) {
if (!isa<RegionBranchTerminatorOpInterface>(returnOp) ||
!getOptions().isOpAllowed(returnOp))
return WalkResult::advance();

for (OpOperand &returnValOperand : returnOp->getOpOperands()) {
Value returnVal = returnValOperand.get();
// Skip non-tensor values.
if (!isa<TensorType>(returnVal.getType()))
continue;

// Add all aliases of the returned value. But only the ones that are in
// the same block.
applyOnAliases(returnVal, [&](Value v) {
if (auto bbArg = dyn_cast<BlockArgument>(v)) {
if (bbArg.getOwner()->getParentOp() == returnOp->getParentOp())
yieldedTensors.insert(bbArg);
return;
}
Operation *definingOp = v.getDefiningOp();
if (definingOp->getParentOp() == returnOp->getParentOp())
yieldedTensors.insert(v);
});
}

return WalkResult::advance();
});
}

void OneShotAnalysisState::gatherUndefinedTensorUses(Operation *op) {
op->walk([&](Operation *op) {
// Skip unknown ops.
Expand Down Expand Up @@ -246,10 +212,6 @@ bool OneShotAnalysisState::isInPlace(OpOperand &opOperand) const {
return inplaceBufferized.contains(&opOperand);
}

bool OneShotAnalysisState::isTensorYielded(Value tensor) const {
return yieldedTensors.contains(tensor);
}

bool OneShotAnalysisState::isValueWritten(Value value) const {
bool isWritten = false;
applyOnAliases(value, [&](Value val) {
Expand Down Expand Up @@ -1328,7 +1290,6 @@ LogicalResult bufferization::analyzeOp(Operation *op,
bool failedAnalysis = false;

// Gather some extra analysis data.
state.gatherYieldedTensors(op);
state.gatherUndefinedTensorUses(op);

// Analysis verification: After setting up alias/equivalence sets, each op
Expand Down

0 comments on commit 1a3abc2

Please sign in to comment.