Skip to content

Commit

Permalink
[CODEMOD][caffe2] replace numpy.bool with bool (pytorch#111432)
Browse files Browse the repository at this point in the history
Test Plan:
numpy.bool is long deprecated and removed starting numpy-1.20.0 [1]. This replaces all references with equivalent `bool` type using the following oneliner:
```
rg -l 'np\.bool' caffe2 | grep '\.py$' | xargs perl -pi -e 's,\bnp\.bool\b,bool,'
```
1. https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations

Differential Revision: D50372711

Pull Request resolved: pytorch#111432
Approved by: https://github.com/Skylion007
  • Loading branch information
Igor Sugak authored and pytorchmergebot committed Oct 18, 2023
1 parent fa99562 commit 93e5065
Show file tree
Hide file tree
Showing 17 changed files with 49 additions and 49 deletions.
2 changes: 1 addition & 1 deletion caffe2/python/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1645,7 +1645,7 @@ def do_set(operator):
return do_set(self.GivenTensorInt64Fill)
elif array.dtype == str:
return do_set(self.GivenTensorStringFill)
elif array.dtype == np.bool:
elif array.dtype == bool:
return do_set(self.GivenTensorBoolFill)
else:
return do_set(self.GivenTensorFill)
Expand Down
20 changes: 10 additions & 10 deletions caffe2/python/hypothesis_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def _tensor_and_indices(min_dim=1, max_dim=4, dtype=np.float32,
_NUMPY_TYPE_TO_ENUM = {
np.float32: core.DataType.FLOAT,
np.int32: core.DataType.INT32,
np.bool: core.DataType.BOOL,
bool: core.DataType.BOOL,
np.uint8: core.DataType.UINT8,
np.int8: core.DataType.INT8,
np.uint16: core.DataType.UINT16,
Expand Down Expand Up @@ -514,7 +514,7 @@ def test_last_n_windows(self, X, gc, dc):
import numpy.testing as npt
npt.assert_almost_equal(output, new_output, decimal=5)

@given(dtype=st.sampled_from([np.float32, np.float64, np.int32, np.bool]))
@given(dtype=st.sampled_from([np.float32, np.float64, np.int32, bool]))
@settings(deadline=1000)
def test_print(self, dtype):
data = np.random.permutation(6).astype(dtype)
Expand Down Expand Up @@ -2134,7 +2134,7 @@ def test_cast(self, a, src, dst, use_name, gc, dc):
# Casting from a float type outside the range of the integral
# type is UB.
ftypes = [np.float32, np.float64]
if src in ftypes and dst not in ftypes and dst is not np.bool:
if src in ftypes and dst not in ftypes and dst is not bool:
info = np.iinfo(dst)
a = np.clip(a, info.min, info.max)

Expand Down Expand Up @@ -2189,7 +2189,7 @@ def ref(data):
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)

@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
has_input=st.booleans(),
Expand All @@ -2201,9 +2201,9 @@ def ref(data):
def test_constant_fill(self, data, has_input, has_extra_shape, extra_shape,
gc, dc):
dtype = data.dtype.type
# in opt mode, np.bool is converted into np.bool_
if data.dtype == np.dtype(np.bool):
dtype = np.bool
# in opt mode, bool is converted into np.bool_
if data.dtype == np.dtype(bool):
dtype = bool

value = data.item(0)
gt_shape = data.shape
Expand Down Expand Up @@ -2236,15 +2236,15 @@ def ref(inputs=None):
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)

@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
**hu.gcs)
@settings(deadline=1000)
def test_constant_fill_from_tensor(self, data, gc, dc):
dtype = data.dtype.type
if data.dtype == np.dtype(np.bool):
dtype = np.bool
if data.dtype == np.dtype(bool):
dtype = bool

value = np.array([data.item(0)], dtype=dtype)
inputs = [data, value]
Expand Down
2 changes: 1 addition & 1 deletion caffe2/python/hypothesis_test_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def elements_of_type(dtype=np.float32, filter_=None):
elems = st.integers(min_value=0, max_value=2 ** 31 - 1)
elif dtype is np.int64:
elems = st.integers(min_value=0, max_value=2 ** 63 - 1)
elif dtype is np.bool:
elif dtype is bool:
elems = st.booleans()
else:
raise ValueError("Unexpected dtype without elements provided")
Expand Down
4 changes: 2 additions & 2 deletions caffe2/python/ideep/dropout_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def test_dropout_is_test(self, X, in_place, ratio, gc, dc):
# No sense in checking gradients for test phase

def reference_dropout_test(x):
return x, np.ones(x.shape, dtype=np.bool)
return x, np.ones(x.shape, dtype=bool)
self.assertReferenceChecks(
gc, op, [X], reference_dropout_test,
# The 'mask' output may be uninitialized
Expand All @@ -51,7 +51,7 @@ def test_dropout_ratio0(self, X, in_place, output_mask, gc, dc):
self.assertDeviceChecks(dc, op, [X], [0])

def reference_dropout_ratio0(x):
return (x,) if is_test else (x, np.ones(x.shape, dtype=np.bool))
return (x,) if is_test else (x, np.ones(x.shape, dtype=bool))
self.assertReferenceChecks(
gc, op, [X], reference_dropout_ratio0, outputs_to_check=[0])

Expand Down
2 changes: 1 addition & 1 deletion caffe2/python/layer_model_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def _get_global_constant_initializer_op(
op_name = 'GivenTensorInt64Fill'
elif array.dtype == str:
op_name = 'GivenTensorStringFill'
elif array.dtype == np.bool:
elif array.dtype == bool:
op_name = 'GivenTensorBoolFill'
else:
op_name = 'GivenTensorFill'
Expand Down
2 changes: 1 addition & 1 deletion caffe2/python/operator_test/apmeter_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def calculate_ap(predictions, labels):
sortind = np.argsort(-scores, kind='mergesort')
truth = label[sortind]
precision = np.cumsum(truth) / num_range
ap[k] = precision[truth.astype(np.bool)].sum() / max(1, truth.sum())
ap[k] = precision[truth.astype(bool)].sum() / max(1, truth.sum())
return ap


Expand Down
6 changes: 3 additions & 3 deletions caffe2/python/operator_test/dropout_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_dropout_is_test(self, X, in_place, ratio, engine, gc, dc):
# No sense in checking gradients for test phase

def reference_dropout_test(x):
return x, np.ones(x.shape, dtype=np.bool)
return x, np.ones(x.shape, dtype=bool)
self.assertReferenceChecks(
gc, op, [X], reference_dropout_test,
# The 'mask' output may be uninitialized
Expand Down Expand Up @@ -69,7 +69,7 @@ def test_dropout_ratio0(self, X, in_place, output_mask, engine, gc, dc):
self.assertGradientChecks(gc, op, [X], 0, [0])

def reference_dropout_ratio0(x):
return (x,) if is_test else (x, np.ones(x.shape, dtype=np.bool))
return (x,) if is_test else (x, np.ones(x.shape, dtype=bool))
self.assertReferenceChecks(
gc, op, [X], reference_dropout_ratio0,
# Don't check the mask with cuDNN because it's packed data
Expand Down Expand Up @@ -101,7 +101,7 @@ def test_dropout_ratio1(self, X, in_place, output_mask, engine, gc, dc):
self.assertGradientChecks(gc, op, [X], 0, [0])

def reference_dropout_ratio1(x):
return (x,) if is_test else (np.zeros(x.shape, dtype=np.float), np.zeros(x.shape, dtype=np.bool))
return (x,) if is_test else (np.zeros(x.shape, dtype=np.float), np.zeros(x.shape, dtype=bool))
self.assertReferenceChecks(
gc, op, [X], reference_dropout_ratio1,
# Don't check the mask with cuDNN because it's packed data
Expand Down
2 changes: 1 addition & 1 deletion caffe2/python/operator_test/elementwise_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -994,7 +994,7 @@ def reciprocal_op(X):
gc, op, [X], 0, [0], stepsize=1e-3, threshold=0.05,
ensure_outputs_are_inferred=True)

@given(X=hu.tensor(dtype=np.bool), **hu.gcs)
@given(X=hu.tensor(dtype=bool), **hu.gcs)
@settings(deadline=10000)
def test_not(self, X, gc, dc):
def not_op(X):
Expand Down
30 changes: 15 additions & 15 deletions caffe2/python/operator_test/feature_maps_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def test_merge_dense_feature_tensors(self):
)
workspace.FeedBlob(
"in1_presence",
np.array([[True, False, False, True], [False, True, True, False]], dtype=np.bool)
np.array([[True, False, False, True], [False, True, True, False]], dtype=bool)
)

workspace.RunOperatorOnce(op)
Expand Down Expand Up @@ -66,7 +66,7 @@ def test_merge_single_scalar_feature_tensors(self):
)
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
np.array([True, False], dtype=bool)
)
# Input 2.
workspace.FeedBlob(
Expand All @@ -75,7 +75,7 @@ def test_merge_single_scalar_feature_tensors(self):
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
np.array([True, True], dtype=bool)
)

workspace.RunOperatorOnce(op)
Expand Down Expand Up @@ -110,15 +110,15 @@ def test_merge_single_scalar_feature_tensors_gradient(self):
# Inputs 1, 2 & 3.
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
np.array([True, False], dtype=bool)
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
np.array([True, True], dtype=bool)
)
workspace.FeedBlob(
"in3_presence",
np.array([False, True], dtype=np.bool)
np.array([False, True], dtype=bool)
)
# Input 4.
workspace.FeedBlob(
Expand Down Expand Up @@ -158,15 +158,15 @@ def test_merge_single_scalar_feature_tensors_gradient_with_strings(self):
# Inputs 1, 2 & 3.
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
np.array([True, False], dtype=bool)
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
np.array([True, True], dtype=bool)
)
workspace.FeedBlob(
"in3_presence",
np.array([False, True], dtype=np.bool)
np.array([False, True], dtype=bool)
)
# Input 4.
workspace.FeedBlob(
Expand Down Expand Up @@ -214,7 +214,7 @@ def test_merge_single_list_feature_tensors(self):
)
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
np.array([True, False], dtype=bool)
)
# Input 2.
workspace.FeedBlob(
Expand All @@ -227,7 +227,7 @@ def test_merge_single_list_feature_tensors(self):
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
np.array([True, True], dtype=bool)
)

workspace.RunOperatorOnce(op)
Expand Down Expand Up @@ -280,7 +280,7 @@ def _test_merge_single_list_or_map_feature_tensors_gradient(self, op_name):
)
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
np.array([True, False], dtype=bool)
)
# Input 2.
workspace.FeedBlob(
Expand All @@ -289,7 +289,7 @@ def _test_merge_single_list_or_map_feature_tensors_gradient(self, op_name):
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
np.array([True, True], dtype=bool)
)
workspace.FeedBlob(
"out_values_values_grad",
Expand Down Expand Up @@ -336,7 +336,7 @@ def test_merge_single_map_feature_tensors(self):
)
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
np.array([True, False], dtype=bool)
)
# Input 2.
workspace.FeedBlob(
Expand All @@ -353,7 +353,7 @@ def test_merge_single_map_feature_tensors(self):
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
np.array([True, True], dtype=bool)
)

workspace.RunOperatorOnce(op)
Expand Down
6 changes: 3 additions & 3 deletions caffe2/python/operator_test/load_save_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(self, methodName, db_type='minidb'):
def load_save(self, src_device_type, src_gpu_id,
dst_device_type, dst_gpu_id):
workspace.ResetWorkspace()
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8,
dtypes = [np.float16, np.float32, np.float64, bool, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T)
for T in dtypes]
Expand Down Expand Up @@ -127,7 +127,7 @@ def _LoadTest(keep_device, device_type, gpu_id, blobs, loadAll):
def saveFile(
self, tmp_folder: Path, db_name: str, db_type: str, start_blob_id: int
) -> Tuple[str, List[np.ndarray]]:
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8,
dtypes = [np.float16, np.float32, np.float64, bool, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T)
for T in dtypes]
Expand All @@ -153,7 +153,7 @@ def testLoadSave(self):
self.load_save()

def testRepeatedArgs(self):
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8,
dtypes = [np.float16, np.float32, np.float64, bool, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T)
for T in dtypes]
Expand Down
2 changes: 1 addition & 1 deletion caffe2/python/operator_test/onnx_while_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def test_onnx_while_fibb(
disable_scopes=disable_scopes,
)

condition_arr = np.array(condition).astype(np.bool)
condition_arr = np.array(condition).astype(bool)
max_trip_count_arr = np.array(max_trip_count).astype(np.int64)
first_init = np.array([1]).astype(np.float32)
second_init = np.array([1]).astype(np.float32)
Expand Down
4 changes: 2 additions & 2 deletions caffe2/python/operator_test/pack_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def pack_segments_ref(lengths, data, max_length=max_length):
pad_length = max_length - length
presence_arr.append(
np.pad(
np.ones((length), dtype=np.bool), ((0, pad_length)),
np.ones((length), dtype=bool), ((0, pad_length)),
mode=str("constant")
)
)
Expand Down Expand Up @@ -305,7 +305,7 @@ def test_presence_mask(self, gc, dc):
presence_mask = workspace.FetchBlob('p')
expected_presence_mask = np.array(
[[True, False, False], [True, True, False], [True, True, True]],
dtype=np.bool
dtype=bool
)
self.assertEqual(presence_mask.shape, expected_presence_mask.shape)
np.testing.assert_array_equal(presence_mask, expected_presence_mask)
Expand Down
2 changes: 1 addition & 1 deletion caffe2/python/operator_test/shape_inference_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ def testCast(self):
model = model_helper.ModelHelper(name="test_model")

types = [
('bool', np.bool, caffe2_pb2.TensorProto.BOOL),
('bool', bool, caffe2_pb2.TensorProto.BOOL),
#('byte', None, caffe2_pb2.TensorProto.BYTE),
('int8', np.int8, caffe2_pb2.TensorProto.INT8),
('uint8', np.uint8, caffe2_pb2.TensorProto.UINT8),
Expand Down
2 changes: 1 addition & 1 deletion caffe2/python/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -1256,7 +1256,7 @@ def InitEmptyRecord(net, schema_or_record, enforce_types=False):
(np.float16, core.DataType.FLOAT16),
(np.float32, core.DataType.FLOAT),
(np.float64, core.DataType.DOUBLE),
(np.bool, core.DataType.BOOL),
(bool, core.DataType.BOOL),
(np.int8, core.DataType.INT8),
(np.int16, core.DataType.INT16),
(np.int32, core.DataType.INT32),
Expand Down
2 changes: 1 addition & 1 deletion caffe2/python/sparse_to_dense_mask_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def test_sparse_to_dense_mask_presence_mask(self):
expected_output = np.array([[11, -1], [-1, 12]], dtype=np.float)
expected_presence_mask = np.array(
[[True, False], [False, True]],
dtype=np.bool)
dtype=bool)
self.assertEqual(output.shape, expected_output.shape)
np.testing.assert_array_equal(output, expected_output)
self.assertEqual(presence_mask.shape, expected_presence_mask.shape)
Expand Down
8 changes: 4 additions & 4 deletions caffe2/python/workspace_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def testFetchFeedBlobTypes(self):
np.float16,
np.float32,
np.float64,
np.bool,
bool,
np.int8,
np.int16,
np.int32,
Expand All @@ -205,20 +205,20 @@ def testFetchFeedBlobTypes(self):

def testFetchFeedBlobBool(self):
"""Special case for bool to ensure coverage of both true and false."""
data = np.zeros((2, 3, 4)).astype(np.bool)
data = np.zeros((2, 3, 4)).astype(bool)
data.flat[::2] = True
self.assertEqual(workspace.FeedBlob("testblob_types", data), True)
fetched_back = workspace.FetchBlob("testblob_types")
self.assertEqual(fetched_back.shape, (2, 3, 4))
self.assertEqual(fetched_back.dtype, np.bool)
self.assertEqual(fetched_back.dtype, bool)
np.testing.assert_array_equal(fetched_back, data)

def testGetBlobSizeBytes(self):
for dtype in [
np.float16,
np.float32,
np.float64,
np.bool,
bool,
np.int8,
np.int16,
np.int32,
Expand Down
2 changes: 1 addition & 1 deletion torch/_torch_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -4396,7 +4396,7 @@ def merge_dicts(*dicts):
It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
and ``numpy.bool``.
and ``bool``.
.. warning::
Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
Expand Down

0 comments on commit 93e5065

Please sign in to comment.