Skip to content

Commit fedaa36

Browse files
yxy235Ubuntu
andauthored
[Example] Fix numpy errors in examples. (#6554)
Co-authored-by: Ubuntu <ubuntu@ip-172-31-0-133.us-west-2.compute.internal>
1 parent eb43489 commit fedaa36

File tree

7 files changed

+15
-15
lines changed

7 files changed

+15
-15
lines changed

examples/mxnet/scenegraph/train_freq_prior.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def box_ious(boxes):
8989
for col, row in zip(cols, rows):
9090
bg_matrix[gt_classes[col], gt_classes[row]] += 1
9191
else:
92-
all_possib = np.ones_like(iou_mat, dtype=np.bool)
92+
all_possib = np.ones_like(iou_mat, dtype=np.bool_)
9393
np.fill_diagonal(all_possib, 0)
9494
cols, rows = np.where(all_possib)
9595
for col, row in zip(cols, rows):

examples/pytorch/bgrl/eval_function.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ def fit_logistic_regression(X, y, data_random_seed=1, repeat=1):
1111
# transform targets to one-hot vector
1212
one_hot_encoder = OneHotEncoder(categories="auto", sparse=False)
1313

14-
y = one_hot_encoder.fit_transform(y.reshape(-1, 1)).astype(np.bool)
14+
y = one_hot_encoder.fit_transform(y.reshape(-1, 1)).astype(np.bool_)
1515

1616
# normalize x
1717
X = normalize(X, norm="l2")
@@ -42,7 +42,7 @@ def fit_logistic_regression(X, y, data_random_seed=1, repeat=1):
4242
y_pred = clf.predict_proba(X_test)
4343
y_pred = np.argmax(y_pred, axis=1)
4444
y_pred = one_hot_encoder.transform(y_pred.reshape(-1, 1)).astype(
45-
np.bool
45+
np.bool_
4646
)
4747

4848
test_acc = metrics.accuracy_score(y_test, y_pred)
@@ -55,7 +55,7 @@ def fit_logistic_regression_preset_splits(
5555
):
5656
# transform targets to one-hot vector
5757
one_hot_encoder = OneHotEncoder(categories="auto", sparse=False)
58-
y = one_hot_encoder.fit_transform(y.reshape(-1, 1)).astype(np.bool)
58+
y = one_hot_encoder.fit_transform(y.reshape(-1, 1)).astype(np.bool_)
5959

6060
# normalize x
6161
X = normalize(X, norm="l2")
@@ -84,7 +84,7 @@ def fit_logistic_regression_preset_splits(
8484
y_pred = clf.predict_proba(X_val)
8585
y_pred = np.argmax(y_pred, axis=1)
8686
y_pred = one_hot_encoder.transform(y_pred.reshape(-1, 1)).astype(
87-
np.bool
87+
np.bool_
8888
)
8989
val_acc = metrics.accuracy_score(y_val, y_pred)
9090
if val_acc > best_acc:
@@ -93,7 +93,7 @@ def fit_logistic_regression_preset_splits(
9393
y_pred = np.argmax(y_pred, axis=1)
9494
y_pred = one_hot_encoder.transform(
9595
y_pred.reshape(-1, 1)
96-
).astype(np.bool)
96+
).astype(np.bool_)
9797
best_test_acc = metrics.accuracy_score(y_test, y_pred)
9898

9999
accuracies.append(best_test_acc)

examples/pytorch/dimenet/qm9.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def _load_graph(self):
181181
dist = np.linalg.norm(R[:, None, :] - R[None, :, :], axis=-1)
182182
# keep all edges that don't exceed the cutoff and delete self-loops
183183
adj = sp.csr_matrix(dist <= self.cutoff) - sp.eye(
184-
n_atoms, dtype=np.bool
184+
n_atoms, dtype=np.bool_
185185
)
186186
adj = adj.tocoo()
187187
u, v = torch.tensor(adj.row), torch.tensor(adj.col)

examples/pytorch/grace/eval.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def wrapper(*args, **kwargs):
3434

3535

3636
def prob_to_one_hot(y_pred):
37-
ret = np.zeros(y_pred.shape, np.bool)
37+
ret = np.zeros(y_pred.shape, np.bool_)
3838
indices = np.argmax(y_pred, axis=1)
3939
for i in range(y_pred.shape[0]):
4040
ret[i][indices[i]] = True
@@ -61,7 +61,7 @@ def label_classification(
6161
Y = y.detach().cpu().numpy()
6262
Y = Y.reshape(-1, 1)
6363
onehot_encoder = OneHotEncoder(categories="auto").fit(Y)
64-
Y = onehot_encoder.transform(Y).toarray().astype(np.bool)
64+
Y = onehot_encoder.transform(Y).toarray().astype(np.bool_)
6565

6666
X = normalize(X, norm="l2")
6767

examples/pytorch/graphsaint/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,14 +70,14 @@ def load_data(args, multilabel):
7070
DataType = namedtuple("Dataset", ["num_classes", "train_nid", "g"])
7171

7272
adj_full = scipy.sparse.load_npz("./{}/adj_full.npz".format(prefix)).astype(
73-
np.bool
73+
np.bool_
7474
)
7575
g = dgl.from_scipy(adj_full)
7676
num_nodes = g.num_nodes()
7777

7878
adj_train = scipy.sparse.load_npz(
7979
"./{}/adj_train.npz".format(prefix)
80-
).astype(np.bool)
80+
).astype(np.bool_)
8181
train_nid = np.array(list(set(adj_train.nonzero()[0])))
8282

8383
role = json.load(open("./{}/role.json".format(prefix)))

examples/pytorch/model_zoo/geometric/coarsening.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ def HEM_one_level(rr, cc, vv, rid, weights):
157157
nnz = rr.shape[0]
158158
N = rr[nnz - 1] + 1
159159

160-
marked = np.zeros(N, np.bool)
160+
marked = np.zeros(N, np.bool_)
161161
rowstart = np.zeros(N, np.int32)
162162
rowlength = np.zeros(N, np.int32)
163163
cluster_id = np.zeros(N, np.int32)

examples/pytorch/pinsage/data_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
# takes. It essentially follows the intuition of "training on the past and predict the future".
1212
# One can also change the threshold to make validation and test set take larger proportions.
1313
def train_test_split_by_time(df, timestamp, user):
14-
df["train_mask"] = np.ones((len(df),), dtype=np.bool)
15-
df["val_mask"] = np.zeros((len(df),), dtype=np.bool)
16-
df["test_mask"] = np.zeros((len(df),), dtype=np.bool)
14+
df["train_mask"] = np.ones((len(df),), dtype=np.bool_)
15+
df["val_mask"] = np.zeros((len(df),), dtype=np.bool_)
16+
df["test_mask"] = np.zeros((len(df),), dtype=np.bool_)
1717
df = dd.from_pandas(df, npartitions=10)
1818

1919
def train_test_split(df):

0 commit comments

Comments
 (0)