Skip to content

Commit

Permalink
Replace all numpy.bool by python builtin bool (#14014)
Browse files Browse the repository at this point in the history
`numpy.bool` has been removed as from 1.24.0.

It was before an alias for python's `bool`.

Fixes huggingface/optimum#610

### Motivation and Context

Numpy 1.24.0 breaks for example IO binding helpers.
  • Loading branch information
fxmarty authored Dec 22, 2022
1 parent 1b58331 commit 4d2dc8b
Show file tree
Hide file tree
Showing 6 changed files with 8 additions and 8 deletions.
6 changes: 3 additions & 3 deletions onnxruntime/python/tools/transformers/io_binding_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def ort_type_to_numpy_type(ort_type: str):
"tensor(int32)": numpy.intc,
"tensor(float)": numpy.float32,
"tensor(float16)": numpy.float16,
"tensor(bool)": numpy.bool,
"tensor(bool)": bool,
}
if ort_type not in ort_type_to_numpy_type_map:
raise ValueError(f"{ort_type} not found in map")
Expand Down Expand Up @@ -61,7 +61,7 @@ def numpy_type_to_torch_type(numpy_type: numpy.dtype):
numpy.int32: torch.int32,
numpy.float32: torch.float32,
numpy.float16: torch.float16,
numpy.bool: torch.bool,
bool: torch.bool,
}
if numpy_type not in numpy_type_to_torch_type_map:
raise ValueError(f"{numpy_type} not found in map")
Expand All @@ -75,7 +75,7 @@ def torch_type_to_numpy_type(torch_type: torch.dtype):
torch.int32: numpy.intc,
torch.float32: numpy.float32,
torch.float16: numpy.float16,
torch.bool: numpy.bool,
torch.bool: bool,
}
if torch_type not in torch_type_to_numpy_type_map:
raise ValueError(f"{torch_type} not found in map")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def test_torch_embedding_scale_grad_by_freq(self):
x = np.random.randn(3, 4).astype(np.int64)
w = np.random.randn(10, 3).astype(np.float32)
padding_idx = np.random.randint(3, size=1).astype(np.int64)
scale = np.array([1]).astype(np.bool)
scale = np.array([1]).astype(bool)
y = torch_embedding_reference_implementation(w, x, padding_idx, scale)
expect(
node,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
dropout_np_vals = np.asarray([0.1], dtype=np.float32).reshape(())
dropout_initializer = numpy_helper.from_array(dropout_np_vals, "ratio")

dropout_mode_np_vals = np.array([False], dtype=np.bool).reshape(())
dropout_mode_np_vals = np.array([False], dtype=bool).reshape(())
dropout_mode_initializer = numpy_helper.from_array(dropout_mode_np_vals, "mode")

b_weight_np_vals = (0.01 * np.arange(hidden_size * weight_dim_to_split, dtype=np.float32)).reshape(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@
dropout_np_vals = np.asarray([0.1], dtype=np.float32).reshape(())
dropout_initializer = numpy_helper.from_array(dropout_np_vals, "ratio")

dropout_mode_np_vals = np.array([False], dtype=np.bool).reshape(())
dropout_mode_np_vals = np.array([False], dtype=bool).reshape(())
dropout_mode_initializer = numpy_helper.from_array(dropout_mode_np_vals, "mode")

shape_initializer3 = numpy_helper.from_array(
Expand Down
2 changes: 1 addition & 1 deletion orttraining/orttraining/python/ort_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def dtype_torch_to_numpy(torch_dtype):
elif torch_dtype == torch.int16 or torch_dtype == torch.short:
return np.int16
elif torch_dtype == torch.bool:
return np.bool
return bool
else:
raise Exception("Torch type to numpy type mapping unavailable for: " + str(torch_dtype))

Expand Down
2 changes: 1 addition & 1 deletion orttraining/tools/scripts/opset12_model_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def process_trainabledropout(model):
)
index += 1
# add training_mode output
mode_scalar = np.asarray([True]).astype(np.bool).reshape(())
mode_scalar = np.asarray([True]).astype(bool).reshape(())
mode_value = numpy_helper.from_array(mode_scalar, "training_mode")
training_mode_node = add_const(
model, "dropout_training_mode_node_%d" % index, "dropout_training_mode_%d" % index, t_value=mode_value
Expand Down

0 comments on commit 4d2dc8b

Please sign in to comment.