Skip to content

Commit 162d8d3

Browse files
[AutoParallel] Add benchmark for llama-7b-dy2st. (#8559)
* [AutoParallel] Add benchmark for llama-7b-dy2st.
1 parent f89c91d commit 162d8d3

File tree

2 files changed

+79
-0
lines changed

2 files changed

+79
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
param="model_item=meta-llama-Llama-2-7b_pretrain_dy2st "
16+
param+="run_mode=Sharding_Stage2 "
17+
param+="device_num=N4C32 "
18+
param+="global_batch_size=32 "
19+
param+="nnodes=4 "
20+
param+="model_type=llama2_7b "
21+
22+
cd ./tests
23+
bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/prepare.sh
24+
25+
bash -c "${param} bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/run_benchmark.sh"
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
{
2+
"model_name_or_path": "meta-llama/Llama-2-7b",
3+
"tokenizer_name_or_path": "meta-llama/Llama-2-7b",
4+
"input_dir": "./data",
5+
"output_dir": "./checkpoints/llama2_pretrain_ckpts",
6+
"per_device_train_batch_size": 1,
7+
"gradient_accumulation_steps": 1,
8+
"per_device_eval_batch_size": 2,
9+
"tensor_parallel_degree": 1,
10+
"pipeline_parallel_degree": 1,
11+
"sharding": "stage2",
12+
"sharding_parallel_config": "enable_stage2_overlap",
13+
"tensor_parallel_config": "enable_mp_async_allreduce",
14+
"pipeline_parallel_config": "",
15+
"virtual_pp_degree": 1,
16+
"sequence_parallel": 0,
17+
"use_flash_attention": true,
18+
"use_fused_rms_norm": true,
19+
"fuse_attention_ffn": true,
20+
"fuse_attention_qkv": true,
21+
"use_fused_rope": true,
22+
"fused_linear_param_grad_add": true,
23+
"max_seq_length": 4096,
24+
"learning_rate": 3e-05,
25+
"min_learning_rate": 3e-06,
26+
"warmup_steps": 30,
27+
"logging_steps": 1,
28+
"max_steps": 50,
29+
"save_steps": 5000,
30+
"eval_steps": 1000,
31+
"weight_decay": 0.01,
32+
"bf16": true,
33+
"fp16_opt_level": "O2",
34+
"amp_master_grad": true,
35+
"amp_custom_black_list": ["reduce_sum", "c_softmax_with_cross_entropy"],
36+
"amp_custom_white_list": ["lookup_table", "lookup_table_v2"],
37+
"warmup_ratio": 0.01,
38+
"max_grad_norm": 1.0,
39+
"dataloader_num_workers": 1,
40+
"continue_training": 0,
41+
"do_train": true,
42+
"do_eval": false,
43+
"do_predict": false,
44+
"disable_tqdm": true,
45+
"skip_profile_timer": true,
46+
"recompute": false,
47+
"recompute_use_reentrant": true,
48+
"distributed_dataloader": 0,
49+
"recompute_granularity": "full",
50+
"save_total_limit": 2,
51+
"device": "gpu",
52+
"to_static": true,
53+
"enable_auto_parallel": true
54+
}

0 commit comments

Comments
 (0)