Skip to content

Commit 850a594

Browse files
authored
Merge pull request #241 from PaddlePaddle/fix_tensor
Fix zero dimension tensor issue
2 parents 1bb0387 + 64af89f commit 850a594

File tree

17 files changed

+29
-29
lines changed

17 files changed

+29
-29
lines changed

apps/drug_drug_synergy/RGCN/train.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def train(num_subgraph, graph, label_idx, epochs, sub_neighbours=[10, 10], init=
8787
fpr, tpr, _ = roc_curve(y_true=ground_truth, y_score=pred_prob)
8888
auc_v = auc(fpr, tpr)
8989
print("sub_graph index : {} | epoch: {} | training loss: {:.4f} | AUC: {:.3f}".format(
90-
sub_g, epoch, train_loss.numpy()[0], auc_v))
90+
sub_g, epoch, float(train_loss), auc_v))
9191

9292
return model
9393

apps/drug_target_interaction/batchdta/pairwise/DeepDTA/utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ def model_eval(model,val_dataloader):
312312

313313
for i_target_score in range(batch_smiles.shape[0]):
314314

315-
i_target_len = int(batch_len[i_target_score].numpy()[0])
315+
i_target_len = int(batch_len[i_target_score])
316316
smiles = batch_smiles[i_target_score][0:i_target_len]
317317
target = batch_protein[i_target_score][0:i_target_len]
318318
y_label = batch_y[i_target_score][0:i_target_len].numpy()

apps/drug_target_interaction/batchdta/pairwise/GraphDTA/run_pairwise_GraphDTA_CV.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -195,9 +195,9 @@ def model_eval(model,val_dataloader,device):
195195
i_data = i_data.to(device)
196196
pred_scores = model.forward_single(i_data)
197197
# get the predicted labels
198-
i_target_pred_scores.append(pred_scores.cpu().numpy()[0])
198+
i_target_pred_scores.append(float(pred_scores))
199199
# get the true labels
200-
i_target_y_label.append(i_data.y.cpu().numpy()[0])
200+
i_target_y_label.append(float(i_data.y.cpu()))
201201

202202
i_target_pred_scores = np.array(i_target_pred_scores)
203203
i_target_y_label = np.array(i_target_y_label)

apps/drug_target_interaction/batchdta/pairwise/Moltrans/helper/utils/paddle_tensor.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def item(self):
3232
"""
3333
Item function
3434
"""
35-
return self.numpy()[0]
35+
return float(self)
3636

3737

3838
@add_tensor_function

apps/drug_target_interaction/batchdta/pairwise/Moltrans/run_pairwise_Moltrans_CV.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ def model_eval(model,val_dataloader,len_SMILES,len_target):
297297

298298
for i_target_score in range(batch_x.shape[0]):
299299

300-
i_target_len = int(batch_len[i_target_score].numpy()[0])
300+
i_target_len = int(batch_len[i_target_score])
301301
smiles = batch_x_smiles[i_target_score][0:i_target_len]
302302
target = batch_x_protein[i_target_score][0:i_target_len]
303303
smiles_mask = batch_x_smiles_mask[i_target_score][0:i_target_len]

apps/drug_target_interaction/batchdta/pairwise/Moltrans/run_pairwise_Moltrans_bindingDB.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ def model_eval(model,val_dataloader,len_SMILES,len_target):
282282

283283
for i_target_score in range(batch_x.shape[0]):
284284

285-
i_target_len = int(batch_len[i_target_score].numpy()[0])
285+
i_target_len = int(batch_len[i_target_score])
286286
smiles = batch_x_smiles[i_target_score][0:i_target_len]
287287
target = batch_x_protein[i_target_score][0:i_target_len]
288288
smiles_mask = batch_x_smiles_mask[i_target_score][0:i_target_len]

apps/drug_target_interaction/batchdta/pointwise/DeepDTA/train_bindingdb.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def training(model, training_loader, optim):
6060
optim.clear_grad()
6161
loss.backward()
6262
optim.step()
63-
res_loss = loss.numpy()[0]
63+
res_loss = float(loss)
6464
return res_loss
6565

6666

apps/drug_target_interaction/batchdta/pointwise/DeepDTA/train_davis.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def training(model, training_loader, optim):
6060
optim.clear_grad()
6161
loss.backward()
6262
optim.step()
63-
res_loss = loss.numpy()[0]
63+
res_loss = float(loss)
6464
return res_loss
6565

6666

apps/drug_target_interaction/batchdta/pointwise/DeepDTA/train_kiba.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def training(model, training_loader, optim):
6363
optim.clear_grad()
6464
loss.backward()
6565
optim.step()
66-
res_loss = loss.numpy()[0]
66+
res_loss = float(loss.numpy())
6767
return res_loss
6868

6969

apps/drug_target_interaction/batchdta/pointwise/Moltrans/helper/utils/paddle_tensor.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def item(self):
3232
"""
3333
Item function
3434
"""
35-
return self.numpy()[0]
35+
return float(self)
3636

3737

3838
@add_tensor_function

apps/drug_target_interaction/moltrans_dti/helper/utils/paddle_tensor.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def item(self):
3232
"""
3333
Item function
3434
"""
35-
return self.numpy()[0]
35+
return float(self.numpy())
3636

3737

3838
@add_tensor_function

apps/fewshot_molecular_property/chem_lib/models/trainer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -294,7 +294,7 @@ def train_step(self):
294294
losses_eval.backward()
295295
self.optimizer.step()
296296

297-
print('Train Epoch:',self.train_epoch,', train update step:', k, ', loss_eval:', losses_eval.numpy()[0])
297+
print('Train Epoch:',self.train_epoch,', train update step:', k, ', loss_eval:', float(losses_eval))
298298

299299
return self.model.layers
300300

apps/molecular_generation/SD_VAE/train_zinc.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -122,9 +122,9 @@ def _train_epoch(model, data_loader, epoch, kl_weight, optimizer=None):
122122
optimizer.clear_grad()
123123

124124
# Log
125-
kl_loss_values.append(kl_loss.numpy()[0])
126-
perplexity_loss_values.append(perplexity.numpy()[0])
127-
loss_values.append(loss.numpy()[0])
125+
kl_loss_values.append(float(kl_loss))
126+
perplexity_loss_values.append(float(perplexity))
127+
loss_values.append(float(loss))
128128
lr = (optimizer.get_lr()
129129
if optimizer is not None
130130
else 0)

apps/pretrained_compound/ChemRL/GEM-2/src/paddle_utils.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,8 @@ def dist_mean(array, distributed=False):
3737
n = len(array)
3838
x_sum = 0 if n == 0 else np.sum(array)
3939
if distributed:
40-
n = dist_all_reduce(paddle.to_tensor(n, dtype='int64')).numpy()[0]
41-
x_sum = dist_all_reduce(paddle.to_tensor(x_sum, dtype='float32')).numpy()[0]
40+
n = int(dist_all_reduce(paddle.to_tensor(n, dtype='int64')))
41+
x_sum = float(dist_all_reduce(paddle.to_tensor(x_sum, dtype='float32')))
4242
x_mean = 0 if n == 0 else x_sum / n
4343
return x_mean
4444

@@ -47,14 +47,14 @@ def dist_sum(array, distributed=False):
4747
n = len(array)
4848
x_sum = 0 if n == 0 else np.sum(array)
4949
if distributed:
50-
x_sum = dist_all_reduce(paddle.to_tensor(x_sum, dtype='float32')).numpy()[0]
50+
x_sum = float(dist_all_reduce(paddle.to_tensor(x_sum, dtype='float32')))
5151
return x_sum
5252

5353

5454
def dist_length(array, distributed=False):
5555
n = len(array)
5656
if distributed:
57-
n = dist_all_reduce(paddle.to_tensor(n, dtype='int64')).numpy()[0]
57+
n = int(dist_all_reduce(paddle.to_tensor(n, dtype='int64')))
5858
return n
5959

6060

apps/pretrained_compound/ChemRL/GEM-2/train_gem2.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def get_train_steps_per_epoch(dataset_len, args):
8080
min_data_len = paddle.to_tensor(dataset_len)
8181
from paddle.distributed import ReduceOp
8282
dist.all_reduce(min_data_len, ReduceOp.MIN)
83-
dataset_len = min_data_len.numpy()[0]
83+
dataset_len = int(min_data_len)
8484
logging.info(f'min dataset len: {dataset_len}')
8585
return int(dataset_len / args.batch_size) - 5
8686

apps/protein_folding/helixfold-single/tape/others/utils.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,8 @@ def dist_all_reduce(x, return_num=False, distributed=False):
4848
n = len(x)
4949
x_sum = 0 if n == 0 else np.sum(x)
5050
if distributed:
51-
n = dist.all_reduce(paddle.to_tensor(n, dtype='int64')).numpy()[0]
52-
x_sum = dist.all_reduce(paddle.to_tensor(x_sum, dtype='float32')).numpy()[0]
51+
n = int(dist.all_reduce(paddle.to_tensor(n, dtype='int64')))
52+
x_sum = float(dist.all_reduce(paddle.to_tensor(x_sum, dtype='float32')))
5353
x_mean = 0 if n == 0 else x_sum / n
5454
if return_num:
5555
return x_mean, n
@@ -62,8 +62,8 @@ def dist_mean(x, distributed=False):
6262
n = len(x)
6363
x_sum = 0 if n == 0 else np.sum(x)
6464
if distributed:
65-
n = dist.all_reduce(paddle.to_tensor(n, dtype='int64')).numpy()[0]
66-
x_sum = dist.all_reduce(paddle.to_tensor(x_sum, dtype='float32')).numpy()[0]
65+
n = int(dist.all_reduce(paddle.to_tensor(n, dtype='int64')))
66+
x_sum = float(dist.all_reduce(paddle.to_tensor(x_sum, dtype='float32')))
6767
x_mean = 0 if n == 0 else x_sum / n
6868
return x_mean
6969

@@ -73,15 +73,15 @@ def dist_sum(x, distributed=False):
7373
n = len(x)
7474
x_sum = 0 if n == 0 else np.sum(x)
7575
if distributed:
76-
x_sum = dist.all_reduce(paddle.to_tensor(x_sum, dtype='float32')).numpy()[0]
76+
x_sum = float(dist.all_reduce(paddle.to_tensor(x_sum, dtype='float32')))
7777
return x_sum
7878

7979

8080
def dist_length(x, distributed=False):
8181
"""tbd"""
8282
n = len(x)
8383
if distributed:
84-
n = dist.all_reduce(paddle.to_tensor(n, dtype='int64')).numpy()[0]
84+
n = int(dist.all_reduce(paddle.to_tensor(n, dtype='int64')))
8585
return n
8686

8787

apps/protein_folding/helixfold/utils/metric.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ def dist_all_reduce(x, return_num=False, distributed=False):
3030
x_num = len(x)
3131
x_sum = 0 if x_num == 0 else np.sum(x)
3232
if distributed:
33-
x_num = dp.all_reduce(paddle.to_tensor(x_num, dtype='int64')).numpy()[0]
34-
x_sum = dp.all_reduce(paddle.to_tensor(x_sum, dtype='float32')).numpy()[0]
33+
x_num = int(dp.all_reduce(paddle.to_tensor(x_num, dtype='int64')))
34+
x_sum = float(dp.all_reduce(paddle.to_tensor(x_sum, dtype='float32')))
3535
x_mean = 0 if x_num == 0 else x_sum / x_num
3636
if return_num:
3737
return x_mean, x_num

0 commit comments

Comments
 (0)