Skip to content

Commit fbe613b

Browse files
lugimzzzwawltor
andauthored
disable lora (#8674)
* disable lora * lora disable & merge * fix * fix unitest * fix the finetune and predict in llm --------- Co-authored-by: wawltor <fangzeyang0904@hotmail.com>
1 parent 595c2fb commit fbe613b

File tree

12 files changed

+542
-849
lines changed

12 files changed

+542
-849
lines changed

docs/llm/docs/peft.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@ Prefix-tuning[论文](https://arxiv.org/abs/2101.00190)
4242
target_modules=target_modules,
4343
r=lora_rank,
4444
lora_alpha=2 * lora_rank,
45-
merge_weights=True
4645
)
4746
model = LoRAModel(model, lora_config)
4847
model.mark_only_lora_as_trainable()
@@ -92,7 +91,7 @@ Parameters:
9291
默认为 0.0,dropout的比例设置,float 类型
9392
9493
--merge_weights
95-
默认为 False,模型推理时,是否进行base model 权重和 LoRA 权重的合参操作,bool 类型
94+
默认为 False,接口将被废弃。请使用model.merge()或model.unmerge()替代。
9695
9796
--trainable_bias
9897
指定可训练的 bias, 可选项 ['lora', 'all']

llm/predict/predictor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,6 @@ def __init__(
270270
if config.lora_path is not None:
271271
lora_config = LoRAConfig.from_pretrained(config.lora_path)
272272
dtype = lora_config.dtype
273-
lora_config.merge_weights = True
274273
elif config.prefix_path is not None:
275274
prefix_config = PrefixConfig.from_pretrained(config.prefix_path)
276275
dtype = prefix_config.dtype
@@ -292,6 +291,7 @@ def __init__(
292291
self.model = LoRAModel.from_pretrained(
293292
model=self.model, lora_path=config.lora_path, lora_config=lora_config
294293
)
294+
self.model.merge()
295295
if config.prefix_path is not None:
296296
prefix_tuning_params = get_prefix_tuning_params(self.model)
297297
self.model = PrefixModelForCausalLM.from_pretrained(

llm/tools/merge_lora_params.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,13 +113,17 @@ def lora_process(name, lora_config, state_dict, device, lora_state_dict=None):
113113

114114

115115
def merge_old_lora(lora_config, args):
116-
lora_config.merge_weight = True
116+
lora_config.merge_weights = True
117117
model = AutoModelForCausalLM.from_pretrained(
118118
args.model_name_or_path,
119119
dtype=lora_config.dtype,
120120
)
121121
model = LoRAModel.from_pretrained(model, args.lora_path)
122-
model.eval()
122+
try:
123+
model.merge()
124+
model.eval()
125+
except:
126+
model.eval()
123127
model_state_dict = model.model.state_dict()
124128
for key in list(model_state_dict):
125129
if "lora" in key:

paddlenlp/peft/lora/__init__.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,5 @@
1313
# limitations under the License.
1414

1515
from .lora_config import LoRAConfig
16-
from .lora_layers import (
17-
ColumnParallelLoRALinear,
18-
ColumnParallelLoRAMergedLinear,
19-
LoRALinear,
20-
LoRAMergedLinear,
21-
RowParallelLoRALinear,
22-
)
16+
from .lora_layers import ColumnParallelLoRALinear, LoRALinear, RowParallelLoRALinear
2317
from .lora_model import LoRAModel

paddlenlp/peft/lora/lora_config.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,11 @@ def __post_init__(self):
9494
"We will automatically set `use_quick_lora` to `False` to avoid potential inconsistencies."
9595
)
9696
self.use_quick_lora = False
97+
if self.merge_weights:
98+
logger.error(
99+
"'merge_weights' is deprecated and will be removed in a future version. "
100+
"Please apply model.merge() or model.unmerge() to merge/unmerge LoRA weight to base model."
101+
)
97102

98103
@property
99104
def scaling(self):

0 commit comments

Comments
 (0)