Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce new optimizer MatMul + BatchNormalization #17915

Merged
merged 27 commits into from
Oct 25, 2023
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
de5bc5e
Add new fusion Matmul + BN
sumitsays Oct 12, 2023
4cb3d7e
Update comments
sumitsays Oct 12, 2023
c797f40
Remove redundant code
sumitsays Oct 12, 2023
2024d64
Remove extra method scale_to_axis
sumitsays Oct 12, 2023
6ea436f
Refactored the code as per ORT style
sumitsays Oct 12, 2023
f63bd11
Added testcase
sumitsays Oct 13, 2023
7cc2013
Added test file
sumitsays Oct 13, 2023
c92ed58
Added extra assertion
sumitsays Oct 13, 2023
8bf29cf
Merge branch 'main' into user/sumita/matmulbn
sumitsays Oct 16, 2023
7ddeecf
Use inlinedVector instead of initializer_list
sumitsays Oct 16, 2023
d1842c9
Add override specifier
sumitsays Oct 16, 2023
2ef8343
Merge branch 'main' into user/sumita/matmulbn
sumitsays Oct 17, 2023
57ea97f
Merge branch 'main' into user/sumita/matmulbn
sumitsays Oct 17, 2023
f367a36
Addressed bot PR feedback
sumitsays Oct 17, 2023
e604ea4
Update the pattern as mentioned by Jeff
sumitsays Oct 18, 2023
96d0137
Apply LintRunner formatting changes
sumitsays Oct 18, 2023
79984f1
Addressed PR comment
sumitsays Oct 20, 2023
b306623
Modified pattern matching to incoroprate any combination
sumitsays Oct 20, 2023
0d7f524
updated comment
sumitsays Oct 20, 2023
23c23da
Apply lintrunner changes
sumitsays Oct 20, 2023
1a26722
Replaced recursion with iteration
sumitsays Oct 20, 2023
95e3efb
updated test model
sumitsays Oct 20, 2023
009b86c
Addressed PR comment
sumitsays Oct 21, 2023
490dec8
Added comments
sumitsays Oct 21, 2023
65e067d
Updated comment
sumitsays Oct 21, 2023
018cdfb
Add test case without batchnormalization
sumitsays Oct 23, 2023
d79a607
Apply lintrunner
sumitsays Oct 23, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions onnxruntime/core/optimizer/graph_transformer_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
#include "core/optimizer/matmul_integer_to_float.h"
#include "core/optimizer/matmul_scale_fusion.h"
#include "core/optimizer/matmul_transpose_fusion.h"
#include "core/optimizer/matmul_bn_fusion.h"
#include "core/optimizer/nchwc_transformer.h"
#include "core/optimizer/noop_elimination.h"
#include "core/optimizer/not_where_fusion.h"
Expand Down Expand Up @@ -127,6 +128,7 @@ InlinedVector<std::unique_ptr<RewriteRule>> GenerateRewriteRules(
rules.push_back(std::make_unique<ConvAddFusion>());
rules.push_back(std::make_unique<ConvMulFusion>());
rules.push_back(std::make_unique<ConvBNFusion>());
rules.push_back(std::make_unique<MatmulBNFusion>());
rules.push_back(std::make_unique<ClipQuantFusion>());
rules.push_back(std::make_unique<ReluQuantFusion>());
break;
Expand Down
75 changes: 75 additions & 0 deletions onnxruntime/core/optimizer/initializer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,30 @@ struct ScalarAdd {
}
};

//template <typename T>
//struct Broadcast {
// void operator()(Tensor& tensor, const onnxruntime::TensorShape& destShape) const {
// ToNumeric<T> to_numeric;
//
// size_t newSize = Tensor::CalculateTensorStorageSize(tensor.DataType(), destShape);
// std::shared_ptr<IAllocator> allocator = std::make_shared<CPUAllocator>();
// void* newData = nullptr;
// if (len > 0) {
// newData = allocator->Alloc(newSize);
// }
// Tensor newTensor(tensor.DataType(), destShape, newData, allocator);
//
// // because broadcasting only works for 1-D tensor
// const size_t block_size = tensor.Shape().GetDims().front();
// const size_t num_blocks = destShape.Size() / block_size;
//
// auto span = tensor.MutableDataAsSpan<T>();
// for (auto& dst : span) {
// dst = T(to_numeric(dst) + v);
// }
// }
//};

template <typename T>
struct Sqrt {
void operator()(Tensor& tensor) const {
Expand Down Expand Up @@ -280,6 +304,26 @@ Initializer& Initializer::div(const Initializer& other) {
return *this;
}

/*
* It only broadcast 1-D tensor if the dimension of that 1-d tensor either equals to
* 1st or last dimension of destShape.
*/
//Initializer& Initializer::mulBy1dInitialer(const Initializer& other) {
// ORT_ENFORCE(other.size() == 1, "The multipier tensor should be 1-D tensor");
// ORT_ENFORCE(other.dims().front() == dims().front() || other.dims().front() == dims().back(),
// "Dimension of the multiplier tensor should be equal to either 1st or last dimension of the multiplicand tensor.");
//
// const size_t block_size = narrow<size_t>(data_.Shape().SizeFromDimension(gsl::narrow_cast<size_t>(axis)));
// const size_t num_blocks = size() / block_size;
// ORT_ENFORCE(scalers.size() == 1 || scalers.size() == num_blocks, "Invalid other(scalers) size");
// utils::MLTypeCallDispatcher<MLFloat16, BFloat16, float, double, int32_t, int64_t> t_disp(data_.GetElementType());
// t_disp.Invoke<ScaleByAxis>(data_, scalers.data_, block_size, num_blocks);
//
// utils::MLTypeCallDispatcher<MLFloat16, BFloat16, float, double, int32_t, int64_t> t_disp(data_.GetElementType());
// //data_ = t_disp.Invoke<Broadcast>(data_, destShape);
// return *this;
//}

Initializer& Initializer::sqrt() {
utils::MLTypeCallDispatcher<MLFloat16, BFloat16, float, double> t_disp(data_.GetElementType());
t_disp.Invoke<Sqrt>(data_);
Expand Down Expand Up @@ -310,6 +354,28 @@ struct ScaleByAxis {
}
};

template <typename T>
struct ScaleToAxis {
void operator()(Tensor& data, const Tensor& scalers, const size_t block_size, const size_t num_blocks) const {
ToNumeric<T> to_numeric;
const auto scaler_size = scalers.Shape().Size();
T* dst = data.MutableData<T>();
const T* scalers_data = scalers.Data<T>();
if (scaler_size == 1) {
const auto numeric_scaler = to_numeric(scalers_data[0]);
for (size_t block_offset = 0, limit = block_size * num_blocks; block_offset < limit; ++block_offset) {
dst[block_offset] = T(to_numeric(dst[block_offset]) * numeric_scaler);
}
} else {
for (size_t block_offset = 0, i = 0; i < num_blocks; i++) {
for (size_t j = 0; j < block_size; ++j, ++block_offset) {
const auto numeric_scaler = to_numeric(scalers_data[j]);
dst[block_offset] = T(to_numeric(dst[block_offset]) * numeric_scaler);
}
}
}
}
};
} // namespace

void Initializer::scale_by_axis(const Initializer& scalers, int axis) {
Expand All @@ -320,5 +386,14 @@ void Initializer::scale_by_axis(const Initializer& scalers, int axis) {
utils::MLTypeCallDispatcher<MLFloat16, BFloat16, float, double, int32_t, int64_t> t_disp(data_.GetElementType());
t_disp.Invoke<ScaleByAxis>(data_, scalers.data_, block_size, num_blocks);
}

void Initializer::scale_to_axis(const Initializer& scalers, int axis) {
ORT_ENFORCE(axis >= 0, "Axis must be non-negative");
const size_t block_size = narrow<size_t>(data_.Shape().SizeFromDimension(gsl::narrow_cast<size_t>(axis)));
const size_t num_blocks = size() / block_size;
ORT_ENFORCE(scalers.size() == 1 || scalers.size() == block_size, "Invalid other(scalers) size");
utils::MLTypeCallDispatcher<MLFloat16, BFloat16, float, double, int32_t, int64_t> t_disp(data_.GetElementType());
t_disp.Invoke<ScaleToAxis>(data_, scalers.data_, block_size, num_blocks);
}
#endif // ORT_EXTENDED_MINIMAL_BUILD
} // namespace onnxruntime
2 changes: 2 additions & 0 deletions onnxruntime/core/optimizer/initializer.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ class Initializer final {
Initializer& sqrt();

void scale_by_axis(const Initializer& other, int axis);

void scale_to_axis(const Initializer& other, int axis);
#endif // ORT_EXTENDED_MINIMAL_BUILD
private:
std::string name_;
Expand Down
Loading