Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin' into prathikrao/slice-op-webgpu…
Browse files Browse the repository at this point in the history
…-native
  • Loading branch information
prathikr committed Dec 18, 2024
2 parents 20fb3e1 + e76bd2f commit 3dc9dcf
Show file tree
Hide file tree
Showing 16 changed files with 375 additions and 448 deletions.
4 changes: 1 addition & 3 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v5
with:
# Version range or exact version of Python to use, using SemVer's version range syntax. Reads from .python-version if unset.
# Use the version configured in target-version of [tool.black] section in pyproject.toml.
python-version: "3.10"
- name: Setup Rust
uses: actions-rs/toolchain@v1
Expand All @@ -55,12 +55,10 @@ jobs:
- name: Update PATH
run: |
echo "$HOME/.local/bin" >> "$GITHUB_PATH"
- name: Install dependencies
run: |
set -e -x
python -m pip install --user -r requirements-dev.txt
python -m pip install --user lintrunner lintrunner-adapters
lintrunner init
- name: Run lintrunner on all files
run: |
Expand Down
18 changes: 5 additions & 13 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,31 +2,23 @@
# You can install the dependencies and initialize with
#
# ```sh
# pip install lintrunner lintrunner-adapters
# pip install -r requirements-lintrunner.txt
# lintrunner init
# ```
#
# This will install lintrunner on your system and download all the necessary
# dependencies to run linters locally.
# If you want to see what lintrunner init will install, run
# `lintrunner init --dry-run`.
#
# To lint local changes:
# To format local changes:
#
# ```bash
# lintrunner
# lintrunner -a
# ```
#
# To lint all files:
# To format all files:
#
# ```bash
# lintrunner --all-files
# ```
#
# To format files:
#
# ```bash
# lintrunner f --all-files
# lintrunner -a --all-files
# ```
#
# To read more about lintrunner, see [wiki](https://github.com/pytorch/pytorch/wiki/lintrunner).
Expand Down
4 changes: 0 additions & 4 deletions CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,6 @@
/onnxruntime/core/graph/contrib_ops/quantization_defs.* @microsoft/onnxruntime-mlas
/onnxruntime/core/mlas/** @microsoft/onnxruntime-mlas

# build pipelines and workflows
/tools/ci_build/github/azure-pipelines @microsoft/onnxruntime-es
/.github/workflows @microsoft/onnxruntime-es

# Dependencies
requirements-dev.txt @microsoft/onnxruntime-admin
requirements-doc.txt @microsoft/onnxruntime-admin
Expand Down
12 changes: 3 additions & 9 deletions docs/Coding_Conventions_and_Standards.md
Original file line number Diff line number Diff line change
Expand Up @@ -164,22 +164,16 @@ dependencies to run linters locally.
If you want to see what lintrunner init will install, run
`lintrunner init --dry-run`.

To lint local changes:

```bash
lintrunner
```

To format files and apply suggestions:
To format local changes:

```bash
lintrunner -a
```

To lint all files:
To format all files:

```bash
lintrunner --all-files
lintrunner -a --all-files
```

To show help text:
Expand Down
4 changes: 2 additions & 2 deletions js/web/docs/webnn-operators.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ operators and the supported opset domain/versions in **WebNN EP** by ONNX Runtim
| Cos | ai.onnx(7+) | cos ||| |
| CumSum | ai.onnx(11-13, 14+) | cumulativeSum ||| 'axis' input should be a constant |
| Div | ai.onnx(7-12, 13, 14+) | div ||| |
| DequantizeLinear | ai.onnx(10-12, 13-18, 19-20, 21-22, 23+) | dequantizeLinear | || |
| DequantizeLinear | ai.onnx(10-12, 13-18, 19-20, 21-22, 23+) | dequantizeLinear | || The shape of x_scale should be a subsample of the shape of input |
| Dropout | ai.onnx(7-9, 10-11, 12, 13-21, 22+) | identity ||| Only supports test mode |
| Einsum | ai.onnx(12+) | reshape, transpose, matmul, reduceSum, mul, triangular ||| |
| Elu | ai.onnx(7+) | elu ||| WebNN CPU backend only supports 'alpha' value is 1.0 |
Expand Down Expand Up @@ -71,7 +71,7 @@ operators and the supported opset domain/versions in **WebNN EP** by ONNX Runtim
| Pad | ai.onnx(7-10, 11-12, 13-17, 18, 19-20, 21+) | pad ||| modes == 'wrap' is not supported |
| Pow | ai.onnx(7-11, 12, 13-14, 15+) | pow ||| |
| PRelu | ai.onnx(7-8, 9-15, 16+) | prelu ||| WebNN CPU backend restricts the last dimension of input and slope to be same (Chromium issue: https://issues.chromium.org/issues/335517470) |
| QuantizeLinear | ai.onnx(10-12, 13-18, 19-20, 21-22, 23+) | quantizeLinear | || |
| QuantizeLinear | ai.onnx(10-12, 13-18, 19-20, 21-22, 23+) | quantizeLinear | || The shape of x_scale should be a subsample of the shape of input |
| Reciprocal | ai.onnx(7-12, 13+) | reciprocal ||| |
| ReduceL1 | ai.onnx(7-10, 11-12, 13-17, 18+) | reduceL1 ||| Input 'axes' if present should be a constant |
| ReduceL2 | ai.onnx(7-10, 11-12, 13-17, 18+) | reduceL2 ||| Input 'axes' if present should be a constant |
Expand Down
12 changes: 7 additions & 5 deletions js/web/lib/wasm/jsep/webnn/tensor-manager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,9 @@ class TensorWrapper {
return this.mlContext.readTensor(this.mlTensor);
}

public sameTypeAndShape(dataType: MLOperandDataType, shape: readonly number[]): boolean {
public canReuseTensor(context: MLContext, dataType: MLOperandDataType, shape: readonly number[]): boolean {
return (
this.mlContext === context &&
this.dataType === dataType &&
this.tensorShape.length === shape.length &&
this.tensorShape.every((v, i) => v === shape[i])
Expand Down Expand Up @@ -176,12 +177,13 @@ class TensorIdTracker {
}

public async ensureTensor(
context: MLContext,
dataType: MLOperandDataType,
shape: readonly number[],
copyOld: boolean,
): Promise<MLTensor> {
if (this.wrapper) {
if (this.wrapper.sameTypeAndShape(dataType, shape)) {
if (this.wrapper.canReuseTensor(context, dataType, shape)) {
return this.wrapper.tensor;
} else {
if (copyOld) {
Expand Down Expand Up @@ -288,7 +290,7 @@ class TensorManagerImpl implements TensorManager {
if (!tensor) {
throw new Error('Tensor not found.');
}
return tensor.ensureTensor(dataType, shape, copyOld);
return tensor.ensureTensor(this.backend.currentContext, dataType, shape, copyOld);
}

public upload(tensorId: TensorId, data: Uint8Array): void {
Expand Down Expand Up @@ -354,15 +356,15 @@ class TensorManagerImpl implements TensorManager {
readable: boolean,
): Promise<TensorWrapper> {
const sessionId = this.backend.currentSessionId;
const context = this.backend.currentContext;
for (const [index, tensor] of this.freeTensors.entries()) {
if (tensor.sameTypeAndShape(dataType, shape)) {
if (tensor.canReuseTensor(context, dataType, shape)) {
LOG_DEBUG('verbose', () => `[WebNN] Reusing tensor {dataType: ${dataType}, shape: ${shape}}`);
const wrapper = this.freeTensors.splice(index, 1)[0];
wrapper.sessionId = sessionId;
return wrapper;
}
}
const context = this.backend.currentContext;
LOG_DEBUG('verbose', () => `[WebNN] MLContext.createTensor {dataType: ${dataType}, shape: ${shape}}`);
const tensor = await context.createTensor({
dataType,
Expand Down
160 changes: 80 additions & 80 deletions js/web/test/data/ops/conv.jsonc
Original file line number Diff line number Diff line change
Expand Up @@ -391,48 +391,48 @@
}
]
},
{
"name": "conv - vectorize group - B",
"operator": "Conv",
"inputShapeDefinitions": "rankOnly",
"opset": { "domain": "", "version": 17 },
"attributes": [
{ "name": "kernel_shape", "data": [2, 2], "type": "ints" },
{ "name": "group", "data": 3, "type": "int" }
],
"cases": [
{
"name": "T[0]",
"inputs": [
{
"data": [
0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0,
19.0, 20.0, 21.0, 22.0, 23.0, 0, 0, 0
],
"dims": [1, 3, 3, 3],
"type": "float32"
},
{
"data": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
"dims": [3, 1, 2, 2],
"type": "float32"
},
{
"data": [0.1, 0.2, 0.3],
"dims": [3],
"type": "float32"
}
],
"outputs": [
{
"data": [27.1, 37.1, 57.1, 67.1, 293.2, 319.2, 371.2, 397.2, 847.3, 889.3, 409.3, 428.3],
"dims": [1, 3, 2, 2],
"type": "float32"
}
]
}
]
},
// {
// "name": "conv - vectorize group - B",
// "operator": "Conv",
// "inputShapeDefinitions": "rankOnly",
// "opset": { "domain": "", "version": 17 },
// "attributes": [
// { "name": "kernel_shape", "data": [2, 2], "type": "ints" },
// { "name": "group", "data": 3, "type": "int" }
// ],
// "cases": [
// {
// "name": "T[0]",
// "inputs": [
// {
// "data": [
// 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0,
// 19.0, 20.0, 21.0, 22.0, 23.0, 0, 0, 0
// ],
// "dims": [1, 3, 3, 3],
// "type": "float32"
// },
// {
// "data": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
// "dims": [3, 1, 2, 2],
// "type": "float32"
// },
// {
// "data": [0.1, 0.2, 0.3],
// "dims": [3],
// "type": "float32"
// }
// ],
// "outputs": [
// {
// "data": [27.1, 37.1, 57.1, 67.1, 293.2, 319.2, 371.2, 397.2, 847.3, 889.3, 409.3, 428.3],
// "dims": [1, 3, 2, 2],
// "type": "float32"
// }
// ]
// }
// ]
// },
{
"name": "conv - vectorize group - C",
"operator": "Conv",
Expand Down Expand Up @@ -470,44 +470,44 @@
}
]
},
{
"name": "conv - vectorize group - D",
"operator": "Conv",
"inputShapeDefinitions": "rankOnly",
"opset": { "domain": "", "version": 17 },
"attributes": [
{ "name": "kernel_shape", "data": [2, 2], "type": "ints" },
{ "name": "group", "data": 3, "type": "int" },
{ "name": "strides", "data": [2, 2], "type": "ints" }
],
"cases": [
{
"name": "T[0] strides = [2, 2]",
"inputs": [
{
"data": [
0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0,
19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0
],
"dims": [1, 3, 3, 4],
"type": "float32"
},
{
"data": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
"dims": [3, 1, 2, 2],
"type": "float32"
}
],
"outputs": [
{
"data": [34, 54, 386, 438, 1122, 1206],
"dims": [1, 3, 1, 2],
"type": "float32"
}
]
}
]
},
// {
// "name": "conv - vectorize group - D",
// "operator": "Conv",
// "inputShapeDefinitions": "rankOnly",
// "opset": { "domain": "", "version": 17 },
// "attributes": [
// { "name": "kernel_shape", "data": [2, 2], "type": "ints" },
// { "name": "group", "data": 3, "type": "int" },
// { "name": "strides", "data": [2, 2], "type": "ints" }
// ],
// "cases": [
// {
// "name": "T[0] strides = [2, 2]",
// "inputs": [
// {
// "data": [
// 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0,
// 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0
// ],
// "dims": [1, 3, 3, 4],
// "type": "float32"
// },
// {
// "data": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
// "dims": [3, 1, 2, 2],
// "type": "float32"
// }
// ],
// "outputs": [
// {
// "data": [34, 54, 386, 438, 1122, 1206],
// "dims": [1, 3, 1, 2],
// "type": "float32"
// }
// ]
// }
// ]
// },
{
"name": "conv - pointwise",
"operator": "Conv",
Expand Down
Loading

0 comments on commit 3dc9dcf

Please sign in to comment.