From 57bfad8773baeb9eb0ba3993a3dead532ea3a620 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Mon, 20 May 2024 17:32:55 +0800 Subject: [PATCH] fix ci. --- tests/generation/test_synced_gpus.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tests/generation/test_synced_gpus.py b/tests/generation/test_synced_gpus.py index ffdd1a1887c1..26dca7dcc927 100644 --- a/tests/generation/test_synced_gpus.py +++ b/tests/generation/test_synced_gpus.py @@ -13,7 +13,9 @@ # limitations under the License. import os +import sys import tempfile +from pathlib import Path import paddle @@ -21,17 +23,16 @@ from paddlenlp.trainer import PdArgumentParser, Trainer, TrainingArguments from paddlenlp.transformers import AutoModelForCausalLM, AutoTokenizer -try: - from tests.parallel_launch import TestMultipleGpus - from tests.transformers.test_modeling_common import ids_tensor +sys.path.append(str(Path(__file__).parent.parent.parent)) +from tests.parallel_launch import TestMultipleGpus +from tests.transformers.test_modeling_common import ids_tensor - class ShardingStage3Tester(TestMultipleGpus): - def test_synced_gpus_greedy(self): - # test this file - self.run_2gpu(__file__) -except: - pass +class ShardingStage3Tester(TestMultipleGpus): + def test_synced_gpus_greedy(self): + # test this file + self.run_2gpu(__file__) + if __name__ == "__main__": tokenizer = AutoTokenizer.from_pretrained("__internal_testing__/tiny-random-llama")