Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WebNN] Support SkipSimplifiedLayerNormalization op #23151

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion js/web/docs/webnn-operators.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,10 @@ operators and the supported opset domain/versions in **WebNN EP** by ONNX Runtim
| ScatterElements | ai.onnx(11-12, 13-15, 16-17, 18+) | scatterElements | ✗ | ✓ | Only supports 'reduction' == 'none' |
| ScatterND | ai.onnx(11-12, 13-15, 16-17, 18+) | scatterND | ✗ | ✓ | Only supports 'reduction' == 'none' |
| Shape | ai.onnx(7-12, 13-14, 15-18, 19-20, 21+) | slice | ✓ | ✓ | |
| SimplifiedLayerNormalization | ai.onnx(1+) | pow + reduceMean + add + sqrt + div + mul | ✓ | ✓ | |
| SimplifiedLayerNormalization | com.microsoft(1+) | pow, reduceMean, add, sqrt, div, mul | ✓ | ✓ | |
| Sigmoid | ai.onnx(7-12, 13+) | sigmoid | ✓ | ✓ | |
| Sign | ai.onnx(9-12, 13+) | sign | ✓ | ✓ | |
| SkipSimplifiedLayerNormalization | com.microsoft(1+) | pow, reduceMean, add, sqrt, div, mul | ✓ | ✓ | |
| Softplus | ai.onnx(7+) | softplus | ✓ | ✓ | |
| Softsign | ai.onnx(7+) | softsign | ✓ | ✓ | |
| Sin | ai.onnx(7+) | sin | ✓ | ✓ | |
Expand Down
1 change: 1 addition & 0 deletions onnxruntime/core/providers/webnn/builders/helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,7 @@ static const InlinedHashMap<std::string, std::string> op_map = {
{"Softplus", "softplus"},
{"Softsign", "softsign"},
{"Sin", "sin"},
{"SkipSimplifiedLayerNormalization", "layerNormalization"},
{"Slice", "slice"},
{"Softmax", "softmax"},
{"Split", "split"},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
const logging::Logger& logger) const {
const auto& op_type = node.OpType();
const auto& input_defs = node.InputDefs();
const auto& output_defs = node.OutputDefs();
ORT_RETURN_IF_NOT(input_defs.size() >= 2, op_type, " requires at least two inputs.");

emscripten::val input = model_builder.GetOperand(input_defs[0]->Name());
Expand All @@ -45,7 +46,8 @@
options.set("label", node.Name());

std::vector<int64_t> scale_shape;
ORT_RETURN_IF_NOT(GetShape(*input_defs[1], scale_shape, logger), "Cannot get scale shape");
const size_t scale_input_index = op_type == "SkipSimplifiedLayerNormalization" ? 2 : 1;
ORT_RETURN_IF_NOT(GetShape(*input_defs[scale_input_index], scale_shape, logger), "Cannot get scale shape");
const auto scale_size = scale_shape.size();
// Except LayerNormalization, other normalization ops' scale input should be 1-D.
if (op_type == "LayerNormalization") {
Expand All @@ -55,19 +57,17 @@
ORT_RETURN_IF_NOT(scale_size == 1, "The scale size should be one.");
}

if (input_defs.size() >= 3 && !input_defs[2]->Name().empty()) {
emscripten::val scale = model_builder.GetOperand(input_defs[scale_input_index]->Name());
options.set("scale", scale);

const size_t bias_input_index = op_type == "SkipSimplifiedLayerNormalization" ? 3 : 2;
emscripten::val bias = emscripten::val::undefined();
if (input_defs.size() > bias_input_index && input_defs[bias_input_index]->Exists()) {
Copy link
Contributor

@fdwr fdwr Dec 19, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(minor recommendation) 🤔 I see this so often that it could boost concise readability and decrease potential typos to add a helper.

Suggested change
if (input_defs.size() > bias_input_index && input_defs[bias_input_index]->Exists()) {
if (TensorExists(input_defs, bias_input_index)) {
-      if (op_type == "SkipSimplifiedLayerNormalization" && output_defs.size() > 3 && output_defs[3]->Exists()) {
+      if (op_type == "SkipSimplifiedLayerNormalization" && TensorExists(output_defs, 3)) {
bool TensorExists(gsl::span<const NodeArg*> defs, size_t tensor_index) noexcept {
  return tensor_index < defs.size() && defs[tensor_index]->Exists()) {
}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, good proposal!

// Bias input exists, and bias's shape should be the same as scale's shape.
std::vector<int64_t> bias_shape;
ORT_RETURN_IF_NOT(GetShape(*input_defs[2], bias_shape, logger), "Cannot get bias shape");
ORT_RETURN_IF_NOT(GetShape(*input_defs[bias_input_index], bias_shape, logger), "Cannot get bias shape");
ORT_RETURN_IF_NOT(bias_shape == scale_shape, "The bias' shape should be equal to scale's shape.");
}

emscripten::val scale = model_builder.GetOperand(input_defs[1]->Name());
options.set("scale", scale);

if (input_defs.size() >= 3 && !input_defs[2]->Name().empty()) {
// Bias input exists, and bias's shape is the same as scale's shape.
emscripten::val bias = model_builder.GetOperand(input_defs[2]->Name());
bias = model_builder.GetOperand(input_defs[bias_input_index]->Name());
options.set("bias", bias);
}

Expand All @@ -76,6 +76,8 @@
options.set("epsilon", epsilon);

emscripten::val output = emscripten::val::undefined();
// SkipSimplifiedLayerNormalization's output: input_skip_bias_sum.
emscripten::val input_skip_bias_sum = emscripten::val::undefined();
if (op_type == "BatchNormalization") {
ORT_RETURN_IF_NOT(input_defs.size() == 5, "BatchNormalization requires five inputs.");
emscripten::val mean = model_builder.GetOperand(input_defs[3]->Name());
Expand All @@ -85,7 +87,9 @@
}

output = model_builder.GetBuilder().call<emscripten::val>("batchNormalization", input, mean, variance, options);
} else if (op_type == "LayerNormalization" || op_type == "SimplifiedLayerNormalization") {
} else if (op_type == "LayerNormalization" ||
op_type == "SimplifiedLayerNormalization" ||
op_type == "SkipSimplifiedLayerNormalization") {
int64_t axis = helper.Get("axis", -1);
axis = HandleNegativeAxis(axis, rank);
std::vector<uint32_t> axes(rank - SafeInt<uint32_t>(axis));
Expand All @@ -94,13 +98,17 @@
if (op_type == "LayerNormalization") {
options.set("axes", emscripten::val::array(axes));
output = model_builder.GetBuilder().call<emscripten::val>("layerNormalization", input, options);
} else { // SimplifiedLayerNormalization
} else { // SimplifiedLayerNormalization or SkipSimplifiedLayerNormalization
/**
WebNN doesn't support SimplifiedLayerNormalization. So decompose it into a series of ops:
X --> Pow --> ReduceMean --> Add --> Sqrt --> Div -> Mul
^ ^ ^ ^ ^
| | | | |
Y:2 axis B:epsilon A:X A:scale
WebNN doesn't support SimplifiedLayerNormalization or SkipSimplifiedLayerNormalization.
So decompose it into a series of ops:
X --> Pow --> ReduceMean --> Add --> Sqrt --> Div -> Mul -> Add (optional)
^ ^ ^ ^ ^ ^
| | | | | |
Y:2 axis B:epsilon A:X A:scale B:bias

If it is SkipSimplifiedLayerNormalization and its output input_skip_bias_sum exists,
input_skip_bias_sum = X + skip + bias (if it exists)
*/

int32_t input_type;
Expand Down Expand Up @@ -137,6 +145,25 @@
// Mul
common_options.set("label", node.Name() + "_mul");
output = model_builder.GetBuilder().call<emscripten::val>("mul", scale, div, common_options);

// Add (if bias exits)
if (!bias.isUndefined()) {
common_options.set("label", node.Name() + "_add_bias");
fdwr marked this conversation as resolved.
Show resolved Hide resolved
output = model_builder.GetBuilder().call<emscripten::val>("add", output, bias, common_options);
}

// SkipSimplifiedLayerNormalization's output input_skip_bias_sum is the sum of input, skip, and bias.
if (op_type == "SkipSimplifiedLayerNormalization" && output_defs.size() > 3 && output_defs[3]->Exists()) {
emscripten::val skip = model_builder.GetOperand(input_defs[1]->Name());
common_options.set("label", node.Name() + "_add_skip");
input_skip_bias_sum = model_builder.GetBuilder().call<emscripten::val>("add", input, skip, common_options);
if (!bias.isUndefined()) {
fdwr marked this conversation as resolved.
Show resolved Hide resolved
common_options.set("label", node.Name() + "_add_skip_bias");
input_skip_bias_sum = model_builder.GetBuilder().call<emscripten::val>(
"add", input_skip_bias_sum, bias, common_options);
}
model_builder.AddOperand(output_defs[3]->Name(), std::move(input_skip_bias_sum));
}
}
} else if (op_type == "InstanceNormalization") {
// WebNN spec only supports 4D input for instanceNormalization.
Expand Down Expand Up @@ -188,7 +215,7 @@
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Unsupported normalization op: ", op_type);
}
model_builder.AddOperand(node.OutputDefs()[0]->Name(), std::move(output));
model_builder.AddOperand(output_defs[0]->Name(), std::move(output));

Check warning on line 218 in onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <utility> for move [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc:218: Add #include <utility> for move [build/include_what_you_use] [4]

return Status::OK();
}
Expand All @@ -215,9 +242,19 @@
}

const auto& output_defs = node.OutputDefs();
if (output_defs.size() != 1) {
LOGS(logger, VERBOSE) << op_type << " output count must be one.";
return false;
if (op_type == "SkipSimplifiedLayerNormalization") {
for (size_t i = 1; i < output_defs.size(); i++) {
if (output_defs[i]->Exists() && i < 3) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should it also be an error if there are more than 3 outputs? Currently it loops through all outputs, including ones beyond 3, but it doesn't give an error for them.

  if (op_type == "SkipSimplifiedLayerNormalization") {
    if (output_defs.size() > 3) {
        LOGS(logger, VERBOSE) << "WebNN's SkipSimplifiedLayerNormalization does not support " << output_defs.size() << " outputs, only 1.";
    }
    for (size_t i = 1, count = std::min(output_defs.size(), 3); i < count; i++) {

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I will add the check for output count.

// Output mean and inv_std_var are used for training mode, which is not supported.
const auto output_name = i == 1 ? "mean" : "inv_std_var";
LOGS(logger, VERBOSE) << "SkipSimplifiedLayerNormalization's output: " << output_name << " is not supported.";
}
}
} else {
if (output_defs.size() != 1) {
LOGS(logger, VERBOSE) << op_type << " output count must be one.";
return false;
}
}

if (op_type == "BatchNormalization" && helper.Get("training_mode", 0)) {
Expand Down Expand Up @@ -277,6 +314,7 @@
"InstanceNormalization",
"LayerNormalization",
"SimplifiedLayerNormalization",
"SkipSimplifiedLayerNormalization",
};

op_registrations.builders.push_back(std::make_unique<NormalizationOpBuilder>());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,7 @@ static OpBuilderRegistrations CreateOpBuilderRegistrations() {
CreateNormalizationOpBuilder("InstanceNormalization", op_registrations);
CreateNormalizationOpBuilder("LayerNormalization", op_registrations);
CreateNormalizationOpBuilder("SimplifiedLayerNormalization", op_registrations);
CreateNormalizationOpBuilder("SkipSimplifiedLayerNormalization", op_registrations);
}

{ // Pad
Expand Down
Loading