diff --git a/tests/generation/test_synced_gpus.py b/tests/generation/test_synced_gpus.py index f0a2ffc96185..ffdd1a1887c1 100644 --- a/tests/generation/test_synced_gpus.py +++ b/tests/generation/test_synced_gpus.py @@ -20,15 +20,18 @@ from paddlenlp.generation import GenerationConfig from paddlenlp.trainer import PdArgumentParser, Trainer, TrainingArguments from paddlenlp.transformers import AutoModelForCausalLM, AutoTokenizer -from tests.parallel_launch import TestMultipleGpus -from tests.transformers.test_modeling_common import ids_tensor +try: + from tests.parallel_launch import TestMultipleGpus + from tests.transformers.test_modeling_common import ids_tensor -class ShardingStage3Tester(TestMultipleGpus): - def test_synced_gpus_greedy(self): - # test this file - self.run_2gpu(__file__) + class ShardingStage3Tester(TestMultipleGpus): + def test_synced_gpus_greedy(self): + # test this file + self.run_2gpu(__file__) +except: + pass if __name__ == "__main__": tokenizer = AutoTokenizer.from_pretrained("__internal_testing__/tiny-random-llama")