Skip to content

Commit 71defaf

Browse files
authored
Fix compatibility issue in machine translation models (#1542)
1 parent c052dc6 commit 71defaf

File tree

4 files changed

+28
-8
lines changed

4 files changed

+28
-8
lines changed

modules/text/machine_translation/transformer/en-de/README.md

+4
Original file line numberDiff line numberDiff line change
@@ -119,3 +119,7 @@ paddlehub >= 2.1.0
119119
* 1.0.0
120120

121121
初始发布
122+
123+
* 1.0.1
124+
125+
修复模型初始化的兼容性问题

modules/text/machine_translation/transformer/en-de/module.py

+10-4
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,15 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from typing import List
1615
import os
16+
from packaging.version import Version
17+
from typing import List
1718

1819
import paddle
1920
import paddle.nn as nn
2021
from paddlehub.env import MODULE_HOME
2122
from paddlehub.module.module import moduleinfo, serving
23+
import paddlenlp
2224
from paddlenlp.data import Pad, Vocab
2325
from paddlenlp.transformers import InferTransformerModel, position_encoding_init
2426

@@ -27,7 +29,7 @@
2729

2830
@moduleinfo(
2931
name="transformer_en-de",
30-
version="1.0.0",
32+
version="1.0.1",
3133
summary="",
3234
author="PaddlePaddle",
3335
author_email="",
@@ -42,8 +44,6 @@ class MTTransformer(nn.Layer):
4244

4345
# Model config
4446
model_config = {
45-
# Number of sub-layers to be stacked in the encoder and decoder.
46-
"n_layer": 6,
4747
# Number of head used in multi-head attention.
4848
"n_head": 8,
4949
# The dimension for word embeddings, which is also the last dimension of
@@ -59,6 +59,12 @@ class MTTransformer(nn.Layer):
5959
'dropout': 0
6060
}
6161

62+
# Number of sub-layers to be stacked in the encoder and decoder.
63+
if Version(paddlenlp.__version__) <= Version('2.0.5'):
64+
model_config.update({"n_layer": 6})
65+
else:
66+
model_config.update({"num_encoder_layers": 6, "num_decoder_layers": 6})
67+
6268
# Vocab config
6369
vocab_config = {
6470
# Used to pad vocab size to be multiple of pad_factor.

modules/text/machine_translation/transformer/zh-en/README.md

+4
Original file line numberDiff line numberDiff line change
@@ -117,3 +117,7 @@ paddlehub >= 2.1.0
117117
* 1.0.0
118118

119119
初始发布
120+
121+
* 1.0.1
122+
123+
修复模型初始化的兼容性问题

modules/text/machine_translation/transformer/zh-en/module.py

+10-4
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,15 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from typing import List
1615
import os
16+
from packaging.version import Version
17+
from typing import List
1718

1819
import paddle
1920
import paddle.nn as nn
2021
from paddlehub.env import MODULE_HOME
2122
from paddlehub.module.module import moduleinfo, serving
23+
import paddlenlp
2224
from paddlenlp.data import Pad, Vocab
2325
from paddlenlp.transformers import InferTransformerModel, position_encoding_init
2426

@@ -27,7 +29,7 @@
2729

2830
@moduleinfo(
2931
name="transformer_zh-en",
30-
version="1.0.0",
32+
version="1.0.1",
3133
summary="",
3234
author="PaddlePaddle",
3335
author_email="",
@@ -42,8 +44,6 @@ class MTTransformer(nn.Layer):
4244

4345
# Model config
4446
model_config = {
45-
# Number of sub-layers to be stacked in the encoder and decoder.
46-
"n_layer": 6,
4747
# Number of head used in multi-head attention.
4848
"n_head": 8,
4949
# The dimension for word embeddings, which is also the last dimension of
@@ -59,6 +59,12 @@ class MTTransformer(nn.Layer):
5959
'dropout': 0
6060
}
6161

62+
# Number of sub-layers to be stacked in the encoder and decoder.
63+
if Version(paddlenlp.__version__) <= Version('2.0.5'):
64+
model_config.update({"n_layer": 6})
65+
else:
66+
model_config.update({"num_encoder_layers": 6, "num_decoder_layers": 6})
67+
6268
# Vocab config
6369
vocab_config = {
6470
# Used to pad vocab size to be multiple of pad_factor.

0 commit comments

Comments
 (0)