Skip to content

Commit bb1319e

Browse files
ppwwyyxxpruthvistony
authored andcommitted
fix numpy1.24 deprecations in unittests (pytorch#93997)
Fixes pytorch#91329 Pull Request resolved: pytorch#93997 Approved by: https://github.com/ngimel, https://github.com/jerryzh168
1 parent 9d6676d commit bb1319e

File tree

6 files changed

+10
-10
lines changed

6 files changed

+10
-10
lines changed

test/quantization/core/test_quantized_op.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3007,7 +3007,7 @@ def test_qlinear(self, batch_size, input_channels, output_channels,
30073007
# W_scale = 1.0
30083008
# W_zp = 0
30093009
W_scales = np.ones(output_channels)
3010-
W_zps = np.zeros(output_channels).astype(np.int)
3010+
W_zps = np.zeros(output_channels).astype(int)
30113011
W_value_min = -128
30123012
W_value_max = 127
30133013
W_q0 = np.round(
@@ -3571,9 +3571,9 @@ def _test_qlinear_impl(self, batch_size, input_channels, output_channels, use_bi
35713571
# xnnpack forces W_zp to 0 when using symmetric quantization
35723572
# ONEDNN only supports symmetric quantization of weight
35733573
if dtype == torch.qint8 or qengine_is_onednn():
3574-
W_zps = np.zeros(output_channels).astype(np.int)
3574+
W_zps = np.zeros(output_channels).astype(int)
35753575
else:
3576-
W_zps = np.round(np.random.rand(output_channels) * 100 - 50).astype(np.int)
3576+
W_zps = np.round(np.random.rand(output_channels) * 100 - 50).astype(int)
35773577
# when using symmetric quantization
35783578
# special restriction for xnnpack fully connected op weight
35793579
# [-127, 127] instead of [-128, 127]

test/test_reductions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1434,7 +1434,7 @@ def test_prod_bool(self, device):
14341434
vals = [[True, True], [True, False], [False, False], []]
14351435
for val in vals:
14361436
result = torch.prod(torch.tensor(val, device=device), dtype=torch.bool).item()
1437-
expect = np.prod(np.array(val), dtype=np.bool)
1437+
expect = np.prod(np.array(val), dtype=bool)
14381438
self.assertEqual(result, expect)
14391439

14401440
result = torch.prod(torch.tensor(val, device=device)).item()

test/test_tensor_creation_ops.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1444,14 +1444,14 @@ def test_linlogspace_mem_overlap(self, device):
14441444
def test_ctor_with_numpy_array(self, device):
14451445
correct_dtypes = [
14461446
np.double,
1447-
np.float,
1447+
float,
14481448
np.float16,
14491449
np.int64,
14501450
np.int32,
14511451
np.int16,
14521452
np.int8,
14531453
np.uint8,
1454-
np.bool,
1454+
bool,
14551455
]
14561456

14571457
incorrect_byteorder = '>' if sys.byteorder == 'little' else '<'

test/test_tensorboard.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -806,7 +806,7 @@ def test_caffe2_simple_model(self):
806806
model = ModelHelper(name="mnist")
807807
# how come those inputs don't break the forward pass =.=a
808808
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
809-
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
809+
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(int))
810810

811811
with core.NameScope("conv1"):
812812
conv1 = brew.conv(model, "data", 'conv1', dim_in=1, dim_out=20, kernel=5)
@@ -841,7 +841,7 @@ def test_caffe2_simple_model(self):
841841
def test_caffe2_simple_cnnmodel(self):
842842
model = cnn.CNNModelHelper("NCHW", name="overfeat")
843843
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
844-
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
844+
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(int))
845845
with core.NameScope("conv1"):
846846
conv1 = model.Conv("data", "conv1", 3, 96, 11, stride=4)
847847
relu1 = model.Relu(conv1, conv1)

test/test_torch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6344,7 +6344,7 @@ def test_parsing_intlist(self):
63446344
# fail parse with float variables
63456345
self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3.), torch.tensor(4))))
63466346
# fail parse with numpy floats
6347-
self.assertRaises(TypeError, lambda: torch.ones((np.float(3.), torch.tensor(4))))
6347+
self.assertRaises(TypeError, lambda: torch.ones((3., torch.tensor(4))))
63486348
self.assertRaises(TypeError, lambda: torch.ones((np.array(3.), torch.tensor(4))))
63496349

63506350
# fail parse with > 1 element variables

torch/utils/tensorboard/summary.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,7 @@ def make_histogram(values, bins, max_bins=None):
383383
limits = new_limits
384384

385385
# Find the first and the last bin defining the support of the histogram:
386-
cum_counts = np.cumsum(np.greater(counts, 0, dtype=np.int32))
386+
cum_counts = np.cumsum(np.greater(counts, 0, dtype=int32))
387387
start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side="right")
388388
start = int(start)
389389
end = int(end) + 1

0 commit comments

Comments
 (0)