From 22d04893e45e6a5717cbd8ea6fa44048d868cc95 Mon Sep 17 00:00:00 2001 From: wtmlon Date: Wed, 22 Jan 2025 17:12:44 +0800 Subject: [PATCH] wrap model when lora is ON and only do evaluation. --- paddlenlp/trainer/trainer.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/paddlenlp/trainer/trainer.py b/paddlenlp/trainer/trainer.py index a240a2f7f71e..d1ce35093084 100644 --- a/paddlenlp/trainer/trainer.py +++ b/paddlenlp/trainer/trainer.py @@ -3123,6 +3123,9 @@ def evaluation_loop( if self.model is self.model_wrapped and isinstance(self.model_wrapped, PipelineLayer): # NOTE(gongenlei): when do_train=False, do_eval=True, we need to wrap model for pipeline self.model_wrapped = fleet.distributed_model(self.model_wrapped) + if isinstance(self.model_wrapped, LoRAModel) and isinstance(self.model_wrapped.model, PipelineLayer): + # NOTE(liuting): when do_train=False, do_eval=True, lora=True, we need to wrap model for pipeline + self.model_wrapped = fleet.distributed_model(self.model_wrapped.model) model = self.model_wrapped else: model = self.model