From 6355a66a017cfd6c2153fcabc6409902e04daefc Mon Sep 17 00:00:00 2001 From: ZhenxingLi Date: Mon, 9 Jun 2025 15:02:20 +0800 Subject: [PATCH 1/7] Update pretrain-llama2_13b.json --- .../llama2/pretrain_config_llama2_13b/pretrain-llama2_13b.json | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b.json b/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b.json index aa86a1875597..43d3a4c789b3 100644 --- a/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b.json +++ b/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b.json @@ -18,6 +18,7 @@ "sequence_parallel": 0, "use_flash_attention": true, "use_fused_rms_norm": true, + "use_fast_layer_norm": true, "fuse_attention_ffn": true, "fuse_attention_qkv": true, "use_fused_rope": true, From d71997eaefe9152719eb70c1168c440f5c13068f Mon Sep 17 00:00:00 2001 From: ZhenxingLi Date: Mon, 9 Jun 2025 15:35:05 +0800 Subject: [PATCH 2/7] Create meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh --- ..._bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 tests/test_tipc/static/auto_parallel/llama2/N4C32/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh diff --git a/tests/test_tipc/static/auto_parallel/llama2/N4C32/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh b/tests/test_tipc/static/auto_parallel/llama2/N4C32/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh new file mode 100644 index 000000000000..97ff573e4862 --- /dev/null +++ b/tests/test_tipc/static/auto_parallel/llama2/N4C32/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh @@ -0,0 +1,30 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +param="model_item=meta-llama-Llama-2-13b_pretrain_dynamic_auto " +param+="run_mode=DP1_MP1_PP4_VPP5_Sharding8_Stage1 " +param+="device_num=N4C32 " +param+="global_batch_size=32 " +param+="nnodes=4 " +param+="model_type=llama2_13b " +param+='dynamic_auto=_dynamic_auto ' + +export FLAGS_fuse_reducescatter_in_opt=1 +export FLAGS_enable_sharding_overlap=1 +export FLAGS_enable_tensor_fusion=1 + +cd ./tests +bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/prepare.sh + +bash -c "${param} bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/run_benchmark.sh" From 32b57bee6621f08dd5ca05673b26c98643733ba0 Mon Sep 17 00:00:00 2001 From: ZhenxingLi Date: Mon, 9 Jun 2025 15:36:33 +0800 Subject: [PATCH 3/7] Delete tests/test_tipc/static/auto_parallel/llama2/N4C32/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh --- ..._bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh | 30 ------------------- 1 file changed, 30 deletions(-) delete mode 100644 tests/test_tipc/static/auto_parallel/llama2/N4C32/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh diff --git a/tests/test_tipc/static/auto_parallel/llama2/N4C32/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh b/tests/test_tipc/static/auto_parallel/llama2/N4C32/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh deleted file mode 100644 index 97ff573e4862..000000000000 --- a/tests/test_tipc/static/auto_parallel/llama2/N4C32/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -param="model_item=meta-llama-Llama-2-13b_pretrain_dynamic_auto " -param+="run_mode=DP1_MP1_PP4_VPP5_Sharding8_Stage1 " -param+="device_num=N4C32 " -param+="global_batch_size=32 " -param+="nnodes=4 " -param+="model_type=llama2_13b " -param+='dynamic_auto=_dynamic_auto ' - -export FLAGS_fuse_reducescatter_in_opt=1 -export FLAGS_enable_sharding_overlap=1 -export FLAGS_enable_tensor_fusion=1 - -cd ./tests -bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/prepare.sh - -bash -c "${param} bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/run_benchmark.sh" From 124c0782bfabe7b38d4bcbc2ee15d7dd0acf4120 Mon Sep 17 00:00:00 2001 From: ZhenxingLi Date: Mon, 9 Jun 2025 15:37:26 +0800 Subject: [PATCH 4/7] Create meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh --- ..._bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh diff --git a/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh b/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh new file mode 100644 index 000000000000..97ff573e4862 --- /dev/null +++ b/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-13b_pretrain_dynamic_auto_bs32_bf16_DP1_MP1_PP4_VPP5_Sharding8_Stage2.sh @@ -0,0 +1,30 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +param="model_item=meta-llama-Llama-2-13b_pretrain_dynamic_auto " +param+="run_mode=DP1_MP1_PP4_VPP5_Sharding8_Stage1 " +param+="device_num=N4C32 " +param+="global_batch_size=32 " +param+="nnodes=4 " +param+="model_type=llama2_13b " +param+='dynamic_auto=_dynamic_auto ' + +export FLAGS_fuse_reducescatter_in_opt=1 +export FLAGS_enable_sharding_overlap=1 +export FLAGS_enable_tensor_fusion=1 + +cd ./tests +bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/prepare.sh + +bash -c "${param} bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/run_benchmark.sh" From 7ca8734a6f36f728fd30f3cbdc9c55807aeda83b Mon Sep 17 00:00:00 2001 From: ZhenxingLi Date: Mon, 9 Jun 2025 15:38:45 +0800 Subject: [PATCH 5/7] Create pretrain-llama2_13b_dynamic_auto.json --- .../pretrain-llama2_13b_dynamic_auto.json | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b_dynamic_auto.json diff --git a/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b_dynamic_auto.json b/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b_dynamic_auto.json new file mode 100644 index 000000000000..f25a5c0041ba --- /dev/null +++ b/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b_dynamic_auto.json @@ -0,0 +1,58 @@ +{ + "model_name_or_path": "meta-llama/Llama-2-13b", + "tokenizer_name_or_path": "meta-llama/Llama-2-13b", + "input_dir": "./data", + "output_dir": "./checkpoints/llama2_pretrain_ckpts", + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 4, + "per_device_eval_batch_size": 4, + "tensor_parallel_degree": 1, + "pipeline_parallel_degree": 4, + "sharding": "stage1", + "data_parallel_config": "enable_allreduce_avg_in_gradinent_scale gradient_sync_after_accumulate", + "sharding_parallel_config": "enable_overlap enable_tensor_fusion", + "tensor_parallel_config": "enable_mp_async_allreduce", + "pipeline_parallel_config": "enable_send_recv_overlap enable_split_backward", + "pipeline_schedule_mode": "FThenB", + "virtual_pp_degree": 1, + "sequence_parallel": 0, + "use_flash_attention": true, + "use_fused_rms_norm": true, + "use_fast_layer_norm": true, + "fuse_attention_ffn": true, + "fuse_attention_qkv": true, + "use_fused_rope": true, + "fused_linear_param_grad_add": 0, + "enable_linear_fused_grad_add": 0, + "max_seq_length": 4096, + "learning_rate": 3e-05, + "min_learning_rate": 3e-06, + "warmup_steps": 30, + "logging_steps": 10, + "max_steps": 500, + "save_steps": 5000, + "eval_steps": 1000, + "weight_decay": 0.01, + "bf16": true, + "fp16_opt_level": "O2", + "amp_custom_black_list": ["reduce_sum", "c_softmax_with_cross_entropy"], + "amp_custom_white_list": ["lookup_table", "lookup_table_v2"], + "amp_master_grad": true, + "warmup_ratio": 0.01, + "max_grad_norm": 1.0, + "dataloader_num_workers": 1, + "continue_training": 0, + "do_train": true, + "do_eval": false, + "do_predict": false, + "disable_tqdm": true, + "skip_profile_timer": true, + "recompute": false, + "recompute_use_reentrant": true, + "distributed_dataloader": 0, + "recompute_granularity": "full", + "save_total_limit": 2, + "device": "gpu", + "to_static": false, + "enable_auto_parallel": true +} From 0505845f298e669caa7039283911934dc29188f2 Mon Sep 17 00:00:00 2001 From: ZhenxingLi Date: Mon, 9 Jun 2025 15:39:02 +0800 Subject: [PATCH 6/7] Update ln_api.cpp --- .../gpt-3/external_ops/fast_ln/ln_api.cpp | 27 ++++++++++--------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/slm/model_zoo/gpt-3/external_ops/fast_ln/ln_api.cpp b/slm/model_zoo/gpt-3/external_ops/fast_ln/ln_api.cpp index 45d77190a499..90cb36046a5f 100644 --- a/slm/model_zoo/gpt-3/external_ops/fast_ln/ln_api.cpp +++ b/slm/model_zoo/gpt-3/external_ops/fast_ln/ln_api.cpp @@ -253,12 +253,10 @@ std::vector RMSLnFwd(const paddle::Tensor &x, auto sizes = x.shape(); PD_CHECK(sizes.size() >= 2); - int rows = 1; - for (size_t i = 0; i + 1 < sizes.size(); ++i) { - rows *= sizes[i]; - } + std::vector row_sizes(sizes.begin(), sizes.begin() + sizes.size() - 1); const int cols = sizes[sizes.size() - 1]; + const int rows = x.numel() / cols; auto hidden_size = scale.numel(); PD_CHECK(hidden_size == cols); @@ -267,7 +265,7 @@ std::vector RMSLnFwd(const paddle::Tensor &x, auto place = x.place(); auto y = paddle::empty(sizes, output_type, place); - auto invvar = paddle::empty({rows}, compute_type, place); + auto invvar = paddle::empty({row_sizes}, compute_type, place); LaunchNormFwd(x.stream(), place, @@ -491,11 +489,8 @@ std::vector> RMSLnFwdInferShape( std::vector x_shape, std::vector scale_shape, float epsilon) { - int64_t rows = 1; - for (size_t i = 0; i + 1 < x_shape.size(); ++i) { - rows *= x_shape[i]; - } - return {x_shape, {rows}}; + std::vector row_shape(x_shape.begin(), x_shape.begin() + x_shape.size() - 1); + return {x_shape, row_shape}; } std::vector LnFwdInferDtype(paddle::DataType x_dtype, @@ -566,11 +561,19 @@ PD_BUILD_OP(fast_rms_norm) .Attrs({"epsilon: float"}) .SetKernelFn(PD_KERNEL(RMSLnFwd)) .SetInferShapeFn(PD_INFER_SHAPE(RMSLnFwdInferShape)) - .SetInferDtypeFn(PD_INFER_DTYPE(RMSLnFwdInferDtype)); + .SetInferDtypeFn(PD_INFER_DTYPE(RMSLnFwdInferDtype)) +#ifdef CUSTOM_OP_WITH_SPMD + .SetInferSpmdFn(PD_INFER_SPMD_RULE(phi::distributed::RmsNormInferSpmd)) +#endif +; PD_BUILD_GRAD_OP(fast_rms_norm) .Inputs({"x", "scale", "invvar", paddle::Grad("y")}) .Outputs({paddle::Grad("x"), paddle::Grad("scale")}) .Attrs({"epsilon: float"}) .SetKernelFn(PD_KERNEL(RMSLnBwd)) - .SetInferShapeFn(PD_INFER_SHAPE(RMSLnBwdInferShape)); + .SetInferShapeFn(PD_INFER_SHAPE(RMSLnBwdInferShape)) +#ifdef CUSTOM_OP_WITH_SPMD + .SetInferSpmdFn(PD_INFER_SPMD_RULE(phi::distributed::RmsNormGradInferSpmd)) +#endif +; From e6ac6dae69997bb6a6f86f78d59ad4a9ecedf46f Mon Sep 17 00:00:00 2001 From: ZhenxingLi Date: Mon, 9 Jun 2025 15:39:35 +0800 Subject: [PATCH 7/7] Update pretrain-llama2_13b.json --- .../llama2/pretrain_config_llama2_13b/pretrain-llama2_13b.json | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b.json b/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b.json index 43d3a4c789b3..aa86a1875597 100644 --- a/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b.json +++ b/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_13b/pretrain-llama2_13b.json @@ -18,7 +18,6 @@ "sequence_parallel": 0, "use_flash_attention": true, "use_fused_rms_norm": true, - "use_fast_layer_norm": true, "fuse_attention_ffn": true, "fuse_attention_qkv": true, "use_fused_rope": true,