Skip to content

Commit fd5135a

Browse files
authored
add unittest for qwen-vl (PaddlePaddle#455)
1 parent 59a8068 commit fd5135a

File tree

1 file changed

+144
-0
lines changed

1 file changed

+144
-0
lines changed

tests/models/test_qwenvl.py

Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import os
16+
import sys
17+
18+
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))
19+
import unittest
20+
21+
import paddle
22+
from paddlenlp.transformers.qwen.configuration import QWenConfig
23+
24+
from paddlemix import QWenLMHeadModel, QwenVLProcessor, QWenVLTokenizer
25+
from tests.models.test_configuration_common import ConfigTester
26+
from tests.models.test_modeling_common import ModelTesterMixin
27+
from tests.testing_utils import slow
28+
29+
30+
class QWenLMHeadModelTester:
31+
def __init__(self, parent):
32+
self.parent = parent
33+
self.model_name_or_path = "qwen-vl/qwen-vl-chat-7b"
34+
self.tokenizer = QWenVLTokenizer.from_pretrained(self.model_name_or_path, dtype="float16")
35+
self.processor = QwenVLProcessor(tokenizer=self.tokenizer)
36+
37+
def get_config(self):
38+
config = {
39+
"_name_or_path": "./",
40+
"architectures": ["QWenLMHeadModel"],
41+
"llm_pretrained_model_name_or_path": "qwen/qwen-7b",
42+
"attn_dropout_prob": 0.0,
43+
"auto_map": {"AutoConfig": "QWenConfig", "AutoModelForCausalLM": "QWenLMHeadModel"},
44+
"emb_dropout_prob": 0.0,
45+
"hidden_size": 4096,
46+
"initializer_range": 0.02,
47+
"intermediate_size": 1,
48+
"kv_channels": 1,
49+
"layer_norm_epsilon": 1e-06,
50+
"max_position_embeddings": 2,
51+
"model_type": "qwen",
52+
"no_bias": True,
53+
"num_attention_heads": 1,
54+
"num_hidden_layers": 1,
55+
"onnx_safe": None,
56+
"rotary_emb_base": 1,
57+
"rotary_pct": 1.0,
58+
"scale_attn_weights": True,
59+
"seq_length": 2,
60+
"tie_word_embeddings": False,
61+
"tokenizer_type": "QWenTokenizer",
62+
"dtype": "float16",
63+
"transformers_version": "4.31.0",
64+
"use_cache": True,
65+
"recompute": True,
66+
"use_dynamic_ntk": True,
67+
"use_flash_attn": False,
68+
"use_logn_attn": True,
69+
"use_flash_attention": True,
70+
"use_fused_rms_norm": True,
71+
"use_fused_rope": True,
72+
"visual": {
73+
"heads": 1,
74+
"image_size": 448,
75+
"image_start_id": 151857,
76+
"layers": 1,
77+
"mlp_ratio": 1,
78+
"output_dim": 128,
79+
"patch_size": 14,
80+
"width": 1664,
81+
},
82+
"vocab_size": 2,
83+
}
84+
85+
return QWenConfig(**config)
86+
87+
def prepare_config_and_inputs(self):
88+
query = []
89+
query.append({"image": "https://bj.bcebos.com/v1/paddlenlp/models/community/GroundingDino/000000004505.jpg"})
90+
query.append({"text": "Generate the caption in English with grounding:"})
91+
inputs = self.processor(query=query, return_tensors="pd")
92+
config = self.get_config()
93+
94+
return config, inputs
95+
96+
def prepare_config_and_inputs_for_common(self):
97+
config, inputs = self.prepare_config_and_inputs()
98+
return config, inputs
99+
100+
def create_and_check_model(self, kwargs):
101+
model = QWenLMHeadModel(config=self.get_config())
102+
model.eval()
103+
with paddle.no_grad():
104+
result = model(**kwargs)
105+
106+
self.parent.assertIsNotNone(result)
107+
108+
109+
class QWenLMHeadModelTest(ModelTesterMixin, unittest.TestCase):
110+
all_model_classes = (QWenLMHeadModel,)
111+
fx_compatible = False
112+
test_head_masking = False
113+
test_pruning = False
114+
test_resize_embeddings = False
115+
test_attention_outputs = False
116+
use_test_model_name_list = False
117+
use_test_inputs_embeds: bool = False
118+
119+
def setUp(self):
120+
self.model_tester = QWenLMHeadModelTester(self)
121+
self.config_tester = ConfigTester(
122+
self,
123+
config_class=QWenConfig,
124+
)
125+
126+
def test_config(self):
127+
self.config_tester.run_common_tests()
128+
129+
def test_model(self):
130+
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
131+
self.model_tester.create_and_check_model(inputs_dict)
132+
133+
@unittest.skip(reason="Hidden_states is tested in individual model tests")
134+
def test_hidden_states_output(self):
135+
pass
136+
137+
@slow
138+
def test_model_from_pretrained(self):
139+
model = QWenLMHeadModel.from_pretrained("qwen-vl/qwen-vl-chat-7b")
140+
self.assertIsNotNone(model)
141+
142+
143+
if __name__ == "__main__":
144+
unittest.main()

0 commit comments

Comments
 (0)