From 8e3c85bda4d7ade770799938a63a9166c7529ce5 Mon Sep 17 00:00:00 2001 From: Xiaomin Fang Date: Thu, 17 Dec 2020 14:21:51 +0800 Subject: [PATCH] fix tape_dynamic --- apps/pretrained_protein/tape_dynamic/demos/run_train.sh | 8 ++++---- .../tape_dynamic/protein_sequence_model_dynamic.py | 4 ++-- apps/pretrained_protein/tape_dynamic/train.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/apps/pretrained_protein/tape_dynamic/demos/run_train.sh b/apps/pretrained_protein/tape_dynamic/demos/run_train.sh index 6e3971a5..0bd532db 100644 --- a/apps/pretrained_protein/tape_dynamic/demos/run_train.sh +++ b/apps/pretrained_protein/tape_dynamic/demos/run_train.sh @@ -2,7 +2,7 @@ source ~/.bashrc -batch_size="16" +batch_size="64" lr="0.001" regularization="0" thread_num="8" # thread_num is for cpu, please set CUDA_VISIBLE_DEVICES for gpu @@ -16,7 +16,7 @@ distributed="false" # candidates: true/false train_data="./toy_data/${task}/npz" test_data="./toy_data/${task}/npz/valid" -# export PYTHONPATH="../../../../" +export PYTHONPATH="../../../../" if [ "${distributed}" == "true" ]; then if [ "${use_cuda}" == "true" ]; then @@ -42,7 +42,7 @@ if [ "${distributed}" == "true" ]; then fi else if [ "${use_cuda}" == "true" ]; then - export CUDA_VISIBLE_DEVICES="2" + export CUDA_VISIBLE_DEVICES="0" python ../train.py \ --train_data ${train_data} \ --test_data ${test_data} \ @@ -64,4 +64,4 @@ else --model_config ${model_config} \ --model_dir ${model_dir} fi -fi \ No newline at end of file +fi diff --git a/apps/pretrained_protein/tape_dynamic/protein_sequence_model_dynamic.py b/apps/pretrained_protein/tape_dynamic/protein_sequence_model_dynamic.py index 9d7588e0..c1a584de 100644 --- a/apps/pretrained_protein/tape_dynamic/protein_sequence_model_dynamic.py +++ b/apps/pretrained_protein/tape_dynamic/protein_sequence_model_dynamic.py @@ -47,7 +47,7 @@ def __init__(self, epsilon (float, optional): epsilon. Defaults to 1e-5. dropout_rate (float, optional): dropout rate. Defaults to 0.1. """ - super().__init__() + super(LstmSeqClassificationModel, self).__init__() self.padding_idx = padding_idx self.embedder = nn.Embedding(vocab_size, emb_dim, @@ -195,7 +195,7 @@ def __init__(self, epsilon (float, optional): epsilon. Defaults to 1e-5. dropout_rate (float, optional): dropout rate. Defaults to 0.1. """ - super().__init__() + super(TransformerSeqClassificationModel, self).__init__() self.padding_idx = padding_idx self.embedder = nn.Embedding(vocab_size, emb_dim, diff --git a/apps/pretrained_protein/tape_dynamic/train.py b/apps/pretrained_protein/tape_dynamic/train.py index 4aba08df..4e11a62c 100644 --- a/apps/pretrained_protein/tape_dynamic/train.py +++ b/apps/pretrained_protein/tape_dynamic/train.py @@ -181,9 +181,9 @@ def main(args): print("Average loss: %.5f" % avg_loss) print("Save model epoch%d." % epoch) - param_path = os.path.join(args.model, 'epoch%d' % epoch, + param_path = os.path.join(args.model_dir, 'epoch%d' % epoch, 'saved_params.pdparams') - opt_path = os.path.join(args.model, 'epoch%d' % epoch, + opt_path = os.path.join(args.model_dir, 'epoch%d' % epoch, 'saved_opt.pdopt') paddle.save(model.state_dict(), param_path) paddle.save(optimizer.state_dict(), opt_path)