Skip to content

Commit 423cd8e

Browse files
[AutoParallel] Add benchmark for llama-7b-dy2st.
1 parent b0a8cdd commit 423cd8e

File tree

2 files changed

+77
-0
lines changed

2 files changed

+77
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
param="model_item=meta-llama-Llama-2-7b_pretrain_dy2st "
16+
param+="run_mode=Sharding_Stage1 "
17+
param+="device_num=N1C8 "
18+
param+="global_batch_size=8 "
19+
param+="nnodes=1 "
20+
param+="model_type=llama2_7b "
21+
22+
cd ./tests
23+
bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/prepare.sh
24+
25+
bash -c "${param} bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/run_benchmark.sh"
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
{
2+
"model_name_or_path": "meta-llama/Llama-2-7b",
3+
"tokenizer_name_or_path": "meta-llama/Llama-2-7b",
4+
"input_dir": "./data",
5+
"output_dir": "./checkpoints/llama2_pretrain_ckpts",
6+
"per_device_train_batch_size": 1,
7+
"gradient_accumulation_steps": 1,
8+
"per_device_eval_batch_size": 2,
9+
"tensor_parallel_degree": 1,
10+
"pipeline_parallel_degree": 1,
11+
"sharding": "stage1",
12+
"sharding_parallel_config": "enable_stage1_overlap",
13+
"tensor_parallel_config": "enable_delay_scale_loss enable_mp_async_allreduce enable_mp_skip_c_identity enable_mp_fused_linear_param_grad_add",
14+
"pipeline_parallel_config": "enable_delay_scale_loss enable_release_grads disable_partial_send_recv",
15+
"virtual_pp_degree": 1,
16+
"sequence_parallel": 0,
17+
"use_flash_attention": true,
18+
"use_fused_rms_norm": true,
19+
"fuse_attention_ffn": true,
20+
"fuse_attention_qkv": true,
21+
"use_fused_rope": true,
22+
"fused_linear_param_grad_add": true,
23+
"max_seq_length": 4096,
24+
"learning_rate": 3e-05,
25+
"min_learning_rate": 3e-06,
26+
"warmup_steps": 30,
27+
"logging_steps": 1,
28+
"max_steps": 50,
29+
"save_steps": 5000,
30+
"eval_steps": 1000,
31+
"weight_decay": 0.01,
32+
"bf16": true,
33+
"fp16_opt_level": "O2",
34+
"amp_master_grad": true,
35+
"warmup_ratio": 0.01,
36+
"max_grad_norm": 1.0,
37+
"dataloader_num_workers": 1,
38+
"continue_training": 0,
39+
"do_train": true,
40+
"do_eval": false,
41+
"do_predict": false,
42+
"disable_tqdm": true,
43+
"skip_profile_timer": true,
44+
"recompute": false,
45+
"recompute_use_reentrant": true,
46+
"distributed_dataloader": 0,
47+
"recompute_granularity": "full",
48+
"save_total_limit": 2,
49+
"device": "gpu",
50+
"to_static": true,
51+
"enable_auto_parallel": true
52+
}

0 commit comments

Comments
 (0)