Skip to content

Commit c98163b

Browse files
[AutoParallel] Support intermediate_api baichuan test (#9988)
1 parent 47e3266 commit c98163b

File tree

3 files changed

+89
-3
lines changed

3 files changed

+89
-3
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
param="model_item=intermediate_api_baichuan-inc-baichuan-2-13b_pretrain_dy2st "
16+
param+="run_mode=DP1_MP4_PP1_Sharding8_Stage1 "
17+
param+="device_num=N4C32 "
18+
param+="global_batch_size=32 "
19+
param+="nnodes=4 "
20+
param+="model_type=baichuan2_13b "
21+
param+="intermediate_api=intermediate_api_ "
22+
23+
cd ./tests
24+
bash ./test_tipc/static/auto_parallel/baichuan2/benchmark_common/prepare.sh
25+
26+
bash -c "${param} bash ./test_tipc/static/auto_parallel/baichuan2/benchmark_common/run_benchmark.sh"

tests/test_tipc/static/auto_parallel/baichuan2/benchmark_common/run_benchmark.sh

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,9 @@ function _set_params(){
2424
fp_item="bf16"
2525
MODEL_TYPE=${model_type:-"baichuan2_13b"}
2626

27+
# for intermediate api
28+
intermediate_api=${intermediate_api:-""}
29+
2730
ip_lists=($(echo $TRAINER_INSTANCES | tr ',' ' '))
2831
master_ip=${ip_lists[0]}
2932
nnodes=${nnodes:-1}
@@ -170,17 +173,17 @@ function _train(){
170173
train_cmd="python -u -m paddle.distributed.launch --gpus=0,1,2,3,4,5,6,7 \
171174
--nnodes 1 --nproc_per_node 8 \
172175
--log_dir mylog run_pretrain_auto.py \
173-
./pretrain_config_${MODEL_TYPE}/pretrain-${MODEL_TYPE}.json"
176+
./pretrain_config_${MODEL_TYPE}/${intermediate_api}pretrain-${MODEL_TYPE}.json"
174177
;;
175178
N4C32) echo "Run with: device_num=${device_num} run_mode=${run_mode}"
176179
train_cmd="python -u -m paddle.distributed.launch --gpus=0,1,2,3,4,5,6,7 \
177180
--log_dir mylog run_pretrain_auto.py \
178-
./pretrain_config_${MODEL_TYPE}/pretrain-${MODEL_TYPE}.json"
181+
./pretrain_config_${MODEL_TYPE}/${intermediate_api}pretrain-${MODEL_TYPE}.json"
179182
;;
180183
*) echo "Run with: device_num=${device_num}, run_mode=${run_mode}"
181184
train_cmd="python -u -m paddle.distributed.launch --gpus=0,1,2,3,4,5,6,7 \
182185
--log_dir mylog run_pretrain_auto.py \
183-
./pretrain_config_${MODEL_TYPE}/pretrain-${MODEL_TYPE}.json"
186+
./pretrain_config_${MODEL_TYPE}/${intermediate_api}pretrain-${MODEL_TYPE}.json"
184187
;;
185188
esac
186189
cd ../llm/auto_parallel/llama
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
{
2+
"model_name_or_path": "baichuan-inc/Baichuan2-13B-Base",
3+
"tokenizer_name_or_path": "baichuan-inc/Baichuan2-13B-Base",
4+
"input_dir": "./data",
5+
"output_dir": "./checkpoints/baichuan2_13b_ckpts",
6+
"split": "949,50,1",
7+
"to_static": true,
8+
"pipeline_parallel_degree": 1,
9+
"tensor_parallel_degree": 4,
10+
"virtual_pp_degree": 1,
11+
"weight_decay": 0.01,
12+
"warmup_ratio": 0.01,
13+
"max_grad_norm": 1.0,
14+
"learning_rate": 0.00003,
15+
"min_learning_rate": 0.000003,
16+
"max_steps": 200,
17+
"logging_steps": 5,
18+
"eval_steps": 10000,
19+
"save_steps": 1000,
20+
"continue_training": 0,
21+
"do_train": true,
22+
"do_eval": false,
23+
"do_predict": false,
24+
"disable_tqdm": true,
25+
"save_total_limit": 2,
26+
"device": "gpu",
27+
"dataloader_num_workers": 1,
28+
"distributed_dataloader": 0,
29+
"enable_auto_parallel": 1,
30+
"per_device_train_batch_size": 2,
31+
"gradient_accumulation_steps": 2,
32+
"per_device_eval_batch_size": 1,
33+
"recompute": false,
34+
"recompute_use_reentrant": true,
35+
"recompute_granularity": "full",
36+
"pp_recompute_interval": 0,
37+
"bf16": true,
38+
"fp16_opt_level": "O2",
39+
"amp_master_grad": true,
40+
"fuse_attention_ffn": true,
41+
"fuse_attention_qkv": true,
42+
"use_flash_attention": true,
43+
"fused_linear": 1,
44+
"fused_linear_param_grad_add": 1,
45+
"use_fused_rope": true,
46+
"use_fused_rms_norm": true,
47+
"max_seq_length": 4096,
48+
"sequence_parallel": 1,
49+
"sharding": "stage1",
50+
"sharding_parallel_degree": 8,
51+
"sharding_parallel_config": "enable_tensor_fusion enable_overlap",
52+
"tensor_parallel_config": "enable_mp_async_allreduce replace_with_parallel_cross_entropy",
53+
"data_parallel_config": "enable_allreduce_avg_in_gradinent_scale gradient_sync_after_accumulate",
54+
"pipeline_parallel_config": "enable_send_recv_overlap enable_split_backward",
55+
"model_type": "llama_network",
56+
"use_intermediate_api": true
57+
}

0 commit comments

Comments
 (0)