diff --git a/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-7b_pretrain_dy2st_bs32_Sharding32_Stage2.sh b/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-7b_pretrain_dy2st_bs32_Sharding32_Stage2.sh new file mode 100644 index 000000000000..87fcdfa2dbb5 --- /dev/null +++ b/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-7b_pretrain_dy2st_bs32_Sharding32_Stage2.sh @@ -0,0 +1,25 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +param="model_item=meta-llama-Llama-2-7b_pretrain_dy2st " +param+="run_mode=Sharding_Stage2 " +param+="device_num=N4C32 " +param+="global_batch_size=32 " +param+="nnodes=4 " +param+="model_type=llama2_7b " + +cd ./tests +bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/prepare.sh + +bash -c "${param} bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/run_benchmark.sh" diff --git a/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_7b/pretrain-llama2_7b.json b/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_7b/pretrain-llama2_7b.json new file mode 100644 index 000000000000..fc64dae136b7 --- /dev/null +++ b/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_7b/pretrain-llama2_7b.json @@ -0,0 +1,54 @@ +{ + "model_name_or_path": "meta-llama/Llama-2-7b", + "tokenizer_name_or_path": "meta-llama/Llama-2-7b", + "input_dir": "./data", + "output_dir": "./checkpoints/llama2_pretrain_ckpts", + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 1, + "per_device_eval_batch_size": 2, + "tensor_parallel_degree": 1, + "pipeline_parallel_degree": 1, + "sharding": "stage2", + "sharding_parallel_config": "enable_stage2_overlap", + "tensor_parallel_config": "enable_mp_async_allreduce", + "pipeline_parallel_config": "", + "virtual_pp_degree": 1, + "sequence_parallel": 0, + "use_flash_attention": true, + "use_fused_rms_norm": true, + "fuse_attention_ffn": true, + "fuse_attention_qkv": true, + "use_fused_rope": true, + "fused_linear_param_grad_add": true, + "max_seq_length": 4096, + "learning_rate": 3e-05, + "min_learning_rate": 3e-06, + "warmup_steps": 30, + "logging_steps": 1, + "max_steps": 50, + "save_steps": 5000, + "eval_steps": 1000, + "weight_decay": 0.01, + "bf16": true, + "fp16_opt_level": "O2", + "amp_master_grad": true, + "amp_custom_black_list": ["reduce_sum", "c_softmax_with_cross_entropy"], + "amp_custom_white_list": ["lookup_table", "lookup_table_v2"], + "warmup_ratio": 0.01, + "max_grad_norm": 1.0, + "dataloader_num_workers": 1, + "continue_training": 0, + "do_train": true, + "do_eval": false, + "do_predict": false, + "disable_tqdm": true, + "skip_profile_timer": true, + "recompute": false, + "recompute_use_reentrant": true, + "distributed_dataloader": 0, + "recompute_granularity": "full", + "save_total_limit": 2, + "device": "gpu", + "to_static": true, + "enable_auto_parallel": true +}