From c71fb92c29045f3e278e34a5f886102e55460b8f Mon Sep 17 00:00:00 2001 From: whucsgs Date: Fri, 2 Feb 2024 08:03:30 +0000 Subject: [PATCH 01/46] Add Pipeline Parallel for PPO training. --- examples/RLHF/models/model_pp.py | 396 ++++ examples/RLHF/models/ppo_model.py | 117 ++ examples/RLHF/models/ppo_model_utils.py | 162 ++ examples/RLHF/models/score_model.py | 1 + examples/RLHF/models/score_model_utils.py | 41 +- examples/RLHF/new_ppo_trainer.py | 1805 +++++++++++++++++++ examples/RLHF/ppo_config.json | 13 +- examples/RLHF/ppo_main.py | 121 +- paddlenlp/transformers/llama/modeling.py | 6 +- paddlenlp/transformers/llama/modeling_pp.py | 12 +- paddlenlp/transformers/model_utils.py | 3 +- 11 files changed, 2599 insertions(+), 78 deletions(-) create mode 100644 examples/RLHF/models/model_pp.py create mode 100644 examples/RLHF/models/ppo_model.py create mode 100644 examples/RLHF/models/ppo_model_utils.py create mode 100644 examples/RLHF/new_ppo_trainer.py diff --git a/examples/RLHF/models/model_pp.py b/examples/RLHF/models/model_pp.py new file mode 100644 index 000000000000..c7b567db1988 --- /dev/null +++ b/examples/RLHF/models/model_pp.py @@ -0,0 +1,396 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import types + +import paddle +import paddle.nn as nn +from paddle.distributed.fleet.meta_parallel import LayerDesc + +from paddlenlp.transformers import LlamaForCausalLM, LlamaForCausalLMPipe +from paddlenlp.transformers.llama.modeling_pp import ( + LlamaRMSNormPipe, + parse_args, + return_args, +) + +from .ppo_model_utils import RLHFPPOMixedLoss, RLHFValueLoss +from .score_model_utils import ScoreModelMixin + + +def print_patch(func, output, *args, **kwargs): + return + print("=" * 20, func.__name__, output) + + +def fwd_step_patch(func, output, self, *args, **kwargs): + # training patch + if self.training and self.is_pipeline_last_stage(): + if getattr(self, "_step_losses", None): + self._step_losses.append(output.detach()) + else: + self._step_losses = [output.detach()] + + +# def fwd_step_eval_patch(func, output, self, *args, **kwargs): +# # eval patch for actor/reference model +# logits = output +# # sequence = self. +# log_probs = gather_log_probabilities(logits[:, :-1], sequence[:, 1:]) +# if self.is_pipeline_first_stage(): +# if getattr(self, "_step_losses", None): +# self._step_losses.append(output.detach()) +# else: +# self._step_losses = [output.detach()] +# print("=" * 20, "fwd_step_patch", len(self._step_losses)) + + +def make_wrapper(func, pre_patch=None, post_patch=None): + def wrapper(*args, **kwargs): + if pre_patch is not None: + pre_patch(func, None, *args, **kwargs) + output = func(*args, **kwargs) + # print("=" * 20, func.__name__, output) + if post_patch is not None: + post_patch(func, output, *args, **kwargs) + return output + + return wrapper + + +funcs = [ + paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_forward_recv_backward, + paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_backward_recv_forward, + paddle.distributed.fleet.model.PipelineParallel._backward_step, + paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.recv_backward, + paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_backward, + (paddle.distributed.fleet.model.PipelineParallel._forward_step, fwd_step_patch), + paddle.distributed.fleet.meta_parallel.pipeline_parallel.FakeMicroDataset._load_micro_batch, + paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.recv_forward, + paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_forward, + paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.recv_meta, + paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.send_meta, +] + +for func in funcs: + if isinstance(func, tuple): + fun, patch = func + else: + fun, patch = func, print_patch + module = importlib.import_module(fun.__module__) + cls_name = fun.__qualname__[: -len(fun.__name__) - 1] + wrap_fun = make_wrapper(fun, post_patch=patch) + cls_obj = getattr(module, cls_name) + setattr(cls_obj, fun.__name__, wrap_fun) + + +# _raw_load_micro_batch = paddle.distributed.fleet.meta_parallel.pipeline_parallel.FakeMicroDataset._load_micro_batch +# _raw_forward_step = paddle.distributed.fleet.model.PipelineParallel._forward_step +# _raw_recv_forward = paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.recv_forward +# _raw_send_forward = paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_forward +# _raw_recv_meta = paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.recv_meta +# _raw_send_meta = paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.send_meta + + +# def _load_micro_batch(self, micro_step): +# output = _raw_load_micro_batch(self, micro_step) +# print("=" * 20, "_load_micro_batch", output) +# return output + +# def _forward_step(self, input_tensor, micro_dataset, chunk_id=None): +# if True: # self.is_pipeline_first_stage(): +# print("=" * 20, "_forward_step input", input_tensor, self._p2p_helper._use_cache) +# output = _raw_forward_step(self, input_tensor, micro_dataset, chunk_id) +# print("=" * 20, "_forward_step output", output, self._p2p_helper._use_cache) +# return output + + +# def recv_forward(self, pp_first_stage, sync_recv=True): +# input_tensor = _raw_recv_forward(self, pp_first_stage, sync_recv) +# print("=" * 20, "recv_forward", input_tensor) +# return input_tensor + + +# def send_forward(self, output_tensor, pp_last_stage): +# output = _raw_send_forward(self, output_tensor, pp_last_stage) +# print("=" * 20, "send_forward", output_tensor) +# return output + + +# def recv_meta(self, group): +# output = _raw_recv_meta(self, group) +# print("=" * 20, "recv_meta", self.recv_shape_message, self.recv_dtype_message) +# return output + + +# def send_meta(self, tensor, group): +# output = _raw_send_meta(self, tensor, group) +# print("=" * 20, "send_meta", self.send_shape_message, self.send_dtype_message) +# return output + +# paddle.distributed.fleet.model.PipelineParallel._forward_step = _forward_step +# paddle.distributed.fleet.meta_parallel.pipeline_parallel.FakeMicroDataset._load_micro_batch = _load_micro_batch +# paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.recv_forward = recv_forward +# paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_forward = send_forward +# paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.recv_meta = recv_meta +# paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.send_meta = send_meta + + +def loss_fwd_wrapper(loss_maker): + def _wrapper(*args, **kwargs): + loss = loss_maker(*args, **kwargs) + ori_fwd = loss.forward + + def _fwd(self, output, label_info): + if isinstance(label_info, tuple): + loss = ori_fwd(self, output, *label_info) + else: + loss = ori_fwd(self, output, label_info) + return loss + + loss.forward = types.MethodType(_fwd, loss) + return loss + + return _wrapper + + +@paddle.no_grad() +def make_position_ids(attention_mask): + attention_mask = attention_mask.cast(paddle.int64) + position_ids = attention_mask.cumsum(-1) - 1 + position_ids = paddle.where(position_ids == -1, attention_mask, position_ids) + return position_ids + + +@paddle.no_grad() +def pad_batches_inputs(inputs, padding_value=0, max_len=None): + """Pad length for tensors shaped [bs, seq_len] to [bs, max(seq_lens)]""" + if max_len is None: + # max_len = max([x.shape[-1] for x in inputs if x is not None]) + max_len = max([x.shape[-1] if isinstance(x, paddle.Tensor) else 0 for x in inputs]) + for i in range(len(inputs)): + x = inputs[i] + # if x is None or x.shape[-1] == max_len: + if not isinstance(x, paddle.Tensor) or x.shape[-1] == max_len: + continue + inputs[i] = paddle.concat( + [x, paddle.full([x.shape[0], max_len - x.shape[-1]], padding_value, dtype=x.dtype)], -1 + ) + return inputs + + +def get_expected_keys(inputs, keys): + ret = tuple([inputs.get(k, None) for k in keys if k in inputs]) + if len(ret) == 1: + ret = ret[0] + return ret + + +# patches for base pipe model +# non-pipe model class, can be used to parse and convert forward args +LlamaForCausalLMPipe._non_pipe_model_class = LlamaForCausalLM + + +def fwd_args_to_dict(fun): + def _impl(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except TypeError: + # otherwise, inputs is any valid format of non_pipe_model forward args, + # convert to dict, to support more args format in prediction_pipeline_step + import inspect + + arg_dict = ( + inspect.signature(self._non_pipe_model_class.forward).bind(*((self,) + args), **kwargs).arguments + ) + arg_dict.pop("self") + return fun(self, arg_dict) + + return _impl + + +class LlamaPolicyPipe(LlamaForCausalLMPipe): + # TODO(guosheng): maybe make a Mixin is better + + @fwd_args_to_dict + def _prepare_pipeline_inputs_func(self, inputs): + first_stage_keys = ["input_ids", "attention_mask"] + # last_stage_keys = [ + # "labels", "input_ids", "log_probs", "advantages", "sequence_mask" + # ] + # TODO(guosheng): make input keys same with model arg names, maybe we + # can use inspect and set as global var which can then be used here and + # in PPOTrainer. + last_stage_keys = ["labels", "input_ids", "old_log_probs", "reward_advantages", "sequence_mask"] + + if type(inputs) is dict: + # ppo-loss and ptx-loss need different labels, and data iter provides + # corrensponding data, thus add the not provided fields here. + for key in last_stage_keys: + if key not in inputs: + inputs[key] = None + return [ + get_expected_keys(inputs, first_stage_keys), + get_expected_keys(inputs, last_stage_keys), + ] + + for data in inputs: + for key in last_stage_keys: + if key not in data: + data[key] = None + # keys = list(inputs[0].keys()) + inputs_batch = {key: [data.get(key) for data in inputs] for key in first_stage_keys + last_stage_keys} + # NOTE(guosheng): PipelineParallel requires send/recv tensors among + # micro-batches/accu-steps have the same shape. Thus pad here, maybe + # should make data collator do padding and pad optionally here, since + # padding strategy may not be clear here. + # Some data fields, such as input_ids/attention_mask/labels, should + # have same shape after padding, and each of them cannot pad only + # according to its own max length which might be different since the + # filed value is None for different batches/tasks. + max_len = max([x.shape[-1] for x in inputs_batch["input_ids"]]) + for key, value in inputs_batch.items(): + padding_value = self._ignore_index if key == "labels" else 0 + max_len = max_len if key in ["input_ids", "attention_mask", "labels"] else None + inputs_batch[key] = pad_batches_inputs(value, padding_value, max_len) + return [ + get_expected_keys(inputs_batch, first_stage_keys), + get_expected_keys(inputs_batch, last_stage_keys), + ] + + def __init__(self, config, **kwargs): + # NOTE: make _sequential_layers/_single_to_pp_mapping/_pp_to_single_mapping + # instance attrs instead of class attrs to support more than one pipeline + # models. Maybe make all sequential_layers add once. + self._sequential_layers = [] + self._single_to_pp_mapping = None + self._pp_to_single_mapping = None + # To be consistent with score model init and allow hyper-param be passed + # using __init__/from_pretrained + self._init_kwargs = kwargs + super().__init__(config) + self._ignore_index = self._loss_fn.sft_criterion.ignore_index + + # @loss_fwd_wrapper + def get_loss_fn(self, config): + init_kwargs = self._init_kwargs + return RLHFPPOMixedLoss(config, **init_kwargs) + + @property + def head_out_meta(self): + # None means to use actual data info + return paddle.static.InputSpec(shape=[None, None, self.config.vocab_size], dtype=None) + + +class _LlamaRMSNormPipe(LlamaRMSNormPipe): + def __init__(self, config): + super().__init__(config) + + def forward(self, args): + hidden_states, attention_mask, position_ids, alibi = parse_args(args) + return return_args(self.norm(hidden_states), attention_mask, position_ids) + + +# LayerDesc of PipelineParallel requires head to be a nn.Layer +class ValueHead(nn.Layer, ScoreModelMixin): + def __init__(self, config, **kwargs): + super().__init__() + self.config = config + self.init_score_head(config, hidden_size=config.hidden_size, **kwargs) + + def forward(self, args): + # attention_mask passed from pre-stage is shaped (bs, 1, seq_len, seq_len) + hidden_state, attention_mask, position_ids, alibi = parse_args(args) + outputs = self.get_score( + hidden_state, attention_mask=attention_mask, position_ids=position_ids, return_dict=True + ) + return outputs + + +class LlamaValuePipe(LlamaForCausalLMPipe): + # TODO(guosheng): maybe make a Mixin is better + + @fwd_args_to_dict + def _prepare_pipeline_inputs_func(self, inputs): + # ValueHead/get_score needs original attention_mask or position_ids, + # while attention_mask passed from pre-stage is not the original, thus + # hack for position_ids here. + # Maybe add position_ids into inputs later and use position_ids instead + # of attention_mask to get score not only for pipeline parallel. + first_stage_keys = ["input_ids", "attention_mask", "position_ids"] + # TODO(guosheng): make input keys same with model arg names, maybe we + # can use inspect and set as global var which can then be used here and + # in PPOTrainer. + last_stage_keys = ["old_reward_values", "reward_returns", "sequence_mask"] + + if type(inputs) is dict: + if "position_ids" not in inputs: + inputs["position_ids"] = make_position_ids(inputs["attention_mask"]) + + return [ + get_expected_keys(inputs, first_stage_keys), + get_expected_keys(inputs, last_stage_keys), + ] + + # keys = list(inputs[0].keys()) + inputs_batch = {key: [data.get(key) for data in inputs] for key in first_stage_keys + last_stage_keys} + # NOTE(guosheng): PipelineParallel requires send/recv tensors among + # micro-batches/accu-steps have the same shape. Thus pad here, maybe + # should make data collator do padding and pad optionally here, since + # padding strategy may not be clear here. + for key, value in inputs_batch.items(): + inputs_batch[key] = pad_batches_inputs(value, padding_value=0) + if "position_ids" not in inputs: + inputs_batch["position_ids"] = [ + make_position_ids(attention_mask) for attention_mask in inputs_batch["attention_mask"] + ] + return [ + get_expected_keys(inputs_batch, first_stage_keys), + get_expected_keys(inputs_batch, last_stage_keys), + ] + + def __init__(self, config, **kwargs): + # NOTE: make _sequential_layers/_single_to_pp_mapping/_pp_to_single_mapping + # instance attrs instead of class attrs to support more than one pipeline + # models. Maybe make all sequential_layers add once. + self._sequential_layers = [] + self._single_to_pp_mapping = None + self._pp_to_single_mapping = None + # To be consistent with score model init and allow hyper-param be passed + # using __init__/from_pretrained + self._init_kwargs = kwargs + super().__init__(config) + + def add_head(self, config): + init_kwargs = self._init_kwargs + # hack to replace original RMSNormPipe to support ValueHead inputs + norm_prefix = self._sequential_layers.pop(-1)["name_prefix"] + self.add_sequential_layer(LayerDesc(_LlamaRMSNormPipe, config=config), norm_prefix) + self.add_sequential_layer(LayerDesc(ValueHead, config, **init_kwargs), "") + + # @loss_fwd_wrapper + def get_loss_fn(self, config): + init_kwargs = self._init_kwargs + # TODO(guosheng): make wraper for loss to make original loss adapt to + # pipeline only one args + return RLHFValueLoss(config, **init_kwargs) + + @property + def head_out_meta(self): + # None means to use actual data info + return ( + paddle.static.InputSpec(shape=[None, None, 1], dtype=None), + paddle.static.InputSpec(shape=[None, 1], dtype=None), + ) diff --git a/examples/RLHF/models/ppo_model.py b/examples/RLHF/models/ppo_model.py new file mode 100644 index 000000000000..486a1cc822e7 --- /dev/null +++ b/examples/RLHF/models/ppo_model.py @@ -0,0 +1,117 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from paddlenlp.transformers import LlamaForCausalLM, PretrainedConfig + +from .ppo_model_utils import PolicyOutput, RLHFPPOMixedLoss, RLHFValueLoss, ValueOutput +from .score_model import LlamaModelForScore + + +# TODO(guosheng): create Mixin and make model classes using metaclass. +class LlamaPolicyModel(LlamaForCausalLM): + def __init__(self, config: PretrainedConfig): + super().__init__(config) + self.loss_fn = RLHFPPOMixedLoss(config) + + def forward( + self, + input_ids=None, + position_ids=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + use_cache=False, + past_key_values=None, + log_probs=None, + advantages=None, + sequence_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + outputs = super().forward( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + labels=None, + use_cache=use_cache, + past_key_values=past_key_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + logits = outputs[0] + loss = None + if labels is not None or advantages is not None: + loss = self.loss_fn(logits, (labels, input_ids, log_probs, advantages, sequence_mask)) + if not return_dict: + return (loss,) + outputs if loss is not None else outputs + + return PolicyOutput( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class LlamaValueModel(LlamaModelForScore): + def __init__(self, config, **kwargs): + super().__init__(config, **kwargs) + self.loss_fn = RLHFValueLoss(config) + + def forward( + self, + input_ids=None, + position_ids=None, + attention_mask=None, + inputs_embeds=None, + use_cache=False, + past_key_values=None, + old_values=None, + returns=None, + sequence_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + outputs = super().forward( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + past_key_values=past_key_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + reward_values, rewards = outputs + loss = None + if returns is not None: + loss = self.loss_fn(reward_values, old_values, returns, sequence_mask) + if not return_dict: + return (loss,) + outputs if loss is not None else outputs + + return ValueOutput( + loss=loss, + value=reward_values, + reward=rewards, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/examples/RLHF/models/ppo_model_utils.py b/examples/RLHF/models/ppo_model_utils.py new file mode 100644 index 000000000000..cb17dfd70d86 --- /dev/null +++ b/examples/RLHF/models/ppo_model_utils.py @@ -0,0 +1,162 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# Copyright 2023 PKU-Alignment Team. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for score models.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Optional, Tuple + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +# use LlamaPretrainingCriterion as common PretrainingCriterion +from paddlenlp.transformers import LlamaPretrainingCriterion as PretrainingCriterion +from paddlenlp.transformers.model_outputs import ModelOutput + + +@dataclass +class PolicyOutput(ModelOutput): + loss: Optional[paddle.Tensor] = None + logits: paddle.Tensor = None + # logits_entropy: Optional[paddle.Tensor] = None + past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None + hidden_states: Optional[Tuple[paddle.Tensor]] = None + attentions: Optional[Tuple[paddle.Tensor]] = None + cross_attentions: Optional[Tuple[paddle.Tensor]] = None + + +@dataclass +class ValueOutput(ModelOutput): + loss: Optional[paddle.Tensor] = None + value: paddle.Tensor = None + reward: paddle.Tensor = None + past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None + hidden_states: Optional[Tuple[paddle.Tensor]] = None + attentions: Optional[Tuple[paddle.Tensor]] = None + cross_attentions: Optional[Tuple[paddle.Tensor]] = None + + +def gather_log_probabilities(logits: paddle.Tensor, labels: paddle.Tensor) -> paddle.Tensor: + """Gather log probabilities of the given labels from the logits.""" + log_probs = F.log_softmax(logits, axis=-1) + log_probs_labels = paddle.take_along_axis(log_probs, axis=-1, indices=labels.unsqueeze(axis=-1)) + return log_probs_labels.squeeze(axis=-1) + + +class RLHFPPOLoss(nn.Layer): + def __init__(self, config, **kwargs): + super().__init__() + self.clip_range_ratio = kwargs.pop("clip_range_ratio", getattr(config, "clip_range_ratio", 0.2)) + self.config = config + + def actor_loss_fn( + self, log_probs: paddle.Tensor, old_log_probs: paddle.Tensor, advantages: paddle.Tensor, mask: paddle.Tensor + ) -> paddle.Tensor: + # policy gradient loss + ratio = paddle.exp(log_probs - old_log_probs) + pg_loss1 = -advantages * ratio + pg_loss2 = -advantages * paddle.clip( + ratio, + 1.0 - self.clip_range_ratio, + 1.0 + self.clip_range_ratio, + ) + return paddle.sum(paddle.maximum(pg_loss1, pg_loss2) * mask) / mask.sum() + + def forward(self, logits, input_ids, old_log_probs, reward_advantages, sequence_mask, start=None): + log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:]) + if start is not None: + old_log_probs = old_log_probs[:, start:] + sequence_mask = sequence_mask[:, start:] + actor_loss = self.actor_loss_fn( + log_probs[:, -old_log_probs.shape[1] :], + old_log_probs, + reward_advantages, + sequence_mask, + ) + return actor_loss + + +class RLHFPPOMixedLoss(nn.Layer): + """provide two losses, one for PPO loss, the other for SFT loss.""" + + def __init__(self, config, **kwargs): + super(RLHFPPOMixedLoss, self).__init__() + self.ptx_coeff = kwargs.pop("ptx_coeff", getattr(config, "ptx_coeff", 16.0)) + self.ppo_criterion = RLHFPPOLoss(config, **kwargs) + self.sft_criterion = PretrainingCriterion(config) + + def forward(self, logits, label_info): + labels, input_ids, old_log_probs, reward_advantages, sequence_mask = label_info + + loss = None + # sft, pt loss + if labels is not None: + loss = self.ptx_coeff * self.sft_criterion(logits, labels) + # ppo loss + if reward_advantages is not None: + loss = self.ppo_criterion(logits, input_ids, old_log_probs, reward_advantages, sequence_mask) + + return loss + + +class RLHFValueLoss(nn.Layer): + def __init__(self, config, **kwargs): + super().__init__() + self.clip_range_value = kwargs.pop("clip_range_value", getattr(config, "clip_range_value", 5.0)) + self.config = config + + def critic_loss_fn( + self, + values: paddle.Tensor, + old_values: paddle.Tensor, + returns: paddle.Tensor, + mask: paddle.Tensor, + ) -> paddle.Tensor: + """Compute critic loss.""" + # TODO(guosheng): use paddle.clip when its min/max can support more than + # 0D Tensor + values_clipped = paddle.minimum( + paddle.maximum(values, old_values - self.clip_range_value), old_values + self.clip_range_value + ) + vf_loss1 = paddle.square(values - returns) + vf_loss2 = paddle.square(values_clipped - returns) + return 0.5 * paddle.sum(paddle.maximum(vf_loss1, vf_loss2) * mask) / mask.sum() + + def forward( + self, + reward_values, + # old_reward_values, + # reward_returns, + # sequence_mask, + # start=None, + label_info, + ): + if not isinstance(reward_values, paddle.Tensor): + reward_values = reward_values[0] + old_reward_values, reward_returns, sequence_mask = label_info + # if start is not None: + # old_reward_values = old_reward_values[:, start:] + # sequence_mask = sequence_mask[:, start:] + reward_values = reward_values.squeeze(axis=-1)[:, :-1] + reward_critic_loss = self.critic_loss_fn( + reward_values[:, -old_reward_values.shape[1] :], + old_reward_values, + reward_returns, + sequence_mask, + ) + + return reward_critic_loss diff --git a/examples/RLHF/models/score_model.py b/examples/RLHF/models/score_model.py index dd3741596a9e..791e2d4a19a2 100644 --- a/examples/RLHF/models/score_model.py +++ b/examples/RLHF/models/score_model.py @@ -88,6 +88,7 @@ def forward( # pylint: disable=too-many-arguments return self.get_score( hidden_states, attention_mask=attention_mask, + position_ids=position_ids, return_dict=return_dict, ) diff --git a/examples/RLHF/models/score_model_utils.py b/examples/RLHF/models/score_model_utils.py index 5d14f7995731..1161e4befb65 100644 --- a/examples/RLHF/models/score_model_utils.py +++ b/examples/RLHF/models/score_model_utils.py @@ -167,25 +167,46 @@ def init_score_head(self, config: PretrainedConfig, hidden_size: int, **kwargs: def get_score( self, hidden_state: paddle.Tensor, # size = (B, L, E) - attention_mask: paddle.Tensor, # size = (B, L) + attention_mask: paddle.Tensor | None = None, # size = (B, L) + position_ids: paddle.Tensor | None = None, # size = (B, L) return_dict: bool | None = None, ) -> ScoreModelOutput: """Forward pass of the score model.""" scores = self.score_head(hidden_state) # size = (B, L, D) - end_score = [] - for i in range(hidden_state.shape[0]): - end_index = attention_mask[i].nonzero()[-1].item() - end_score.append(scores[i, end_index]) # size = (D,) - end_score = paddle.stack(end_score, axis=0) # size = (B, D) + if position_ids is not None: + first_pos = paddle.arange(hidden_state.shape[0]).unsqueeze(-1) + # Take left padding into account, which has 0s in left and max_len + # in right. + left_pad_mask = position_ids == 0 + position_ids = paddle.where( + left_pad_mask, position_ids, position_ids + left_pad_mask.sum(-1, keepdim=True) - 1 + ) + second_pos = paddle.max(position_ids, axis=-1, keepdim=True) + end_pos = paddle.stack([first_pos, second_pos], axis=-1).squeeze(1) + end_score = scores.gather_nd(end_pos) + else: + # attention_mask passed from pipeline pre-stage is shaped (bs, 1, seq_len, seq_len) + assert attention_mask is not None and len(attention_mask.shape) == 2 + end_score = [] + end_pos = [] + for i in range(hidden_state.shape[0]): + end_index = attention_mask[i].nonzero()[-1].item() + end_pos.append((i, end_index)) + end_score.append(scores[i, end_index]) # size = (D,) + end_score = paddle.stack(end_score, axis=0) # size = (B, D) if self.training: if dist.is_initialized(): - # TODO(guosheng): maybe only need nodes in data parallel group - # when support hybird dist parallel. - gathered_end_score_list = [paddle.zeros_like(end_score) for _ in range(dist.get_world_size())] - dist.all_gather(gathered_end_score_list, end_score) + gathered_end_score_list = [] + try: + # gather among data parallel group + hcg = dist.fleet.get_hybrid_communicate_group() + group = hcg.get_sharding_parallel_group() + dist.all_gather(gathered_end_score_list, end_score, group) + except: + dist.all_gather(gathered_end_score_list, end_score) gathered_end_score = paddle.concat(gathered_end_score_list, axis=0) self.normalizer.update(gathered_end_score) else: diff --git a/examples/RLHF/new_ppo_trainer.py b/examples/RLHF/new_ppo_trainer.py new file mode 100644 index 000000000000..d72737f3db17 --- /dev/null +++ b/examples/RLHF/new_ppo_trainer.py @@ -0,0 +1,1805 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import itertools +import math +import os +import time +import types +from contextlib import contextmanager +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import tqdm +from data import DummyDataset, PromptOnlyBatch +from paddle.distributed import fleet +from paddle.io import DataLoader, Dataset, DistributedBatchSampler +from paddle.utils import map_structure +from rich.console import Console +from rich.table import Table + +from paddlenlp.data import DataCollator +from paddlenlp.generation import GenerationConfig +from paddlenlp.generation.utils import GenerationMixin +from paddlenlp.trainer.trainer import ( + TRAINER_STATE_NAME, + EvalLoopOutput, + EvalPrediction, + HybridParallelOptimizer, + NlpDistributedBatchSampler, + ShardingOption, + Trainer, + TrainerCallback, + TrainerControl, + TrainerState, + TrainingArguments, + _obtain_optimizer_parameters_list, + distributed_file, + distributed_isfile, + fused_allreduce_gradients, + logger, + reshard_util, + speed_metrics, + split_inputs_sequence_dim, +) +from paddlenlp.transformers import BatchEncoding, PretrainedModel, PretrainedTokenizer +from paddlenlp.transformers.configuration_utils import PretrainedConfig +from paddlenlp.transformers.model_outputs import ModelOutput +from paddlenlp.transformers.tokenizer_utils_base import ( + PaddingStrategy, + TruncationStrategy, +) + + +def batch_retokenize( + input_ids: paddle.Tensor, + src_tokenizer: PretrainedTokenizer, + dest_tokenizer: PretrainedTokenizer, + *, + padding: bool | str | PaddingStrategy = PaddingStrategy.LONGEST, + truncation: bool | str | TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + skip_special_tokens: bool = True, +) -> BatchEncoding: + """Re-tokenize a batch of input ids from one tokenizer to another.""" + output = dest_tokenizer( + [ + text + dest_tokenizer.eos_token + for text in src_tokenizer.batch_decode( + input_ids, + skip_special_tokens=skip_special_tokens, + ) + ], + padding=padding, + truncation=truncation, + return_tensors="pd", + ) + return output + + +def gather_log_probabilities(logits: paddle.Tensor, labels: paddle.Tensor) -> paddle.Tensor: + """Gather log probabilities of the given labels from the logits.""" + log_probs = F.log_softmax(logits, axis=-1) + log_probs_labels = paddle.take_along_axis(log_probs, axis=-1, indices=labels.unsqueeze(axis=-1)) + return log_probs_labels.squeeze(axis=-1) + + +def init_train_model_opt( + self: Trainer, max_steps: int, resume_from_checkpoint: bool = False, clear_master_weight: bool = False +) -> PretrainedModel: + # Copy of model/optimizer init and resuming related code in `Trainer.train`. + # NOTE: this `_load_from_checkpoint` is indeed to load model states in the + # following elif-else branches, though they are apart away in `Trainer.train`. + if not self.args.should_load_sharding_stage1_model: + self._load_from_checkpoint(resume_from_checkpoint) + + # delay_optimizer_creation = ( + # self.sharding is not None + # and ShardingOption.SHARD_OP in self.args.sharding + # ) + delay_optimizer_creation = False + + if not delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + + if self.args.should_load_sharding_stage1_model: + model = self._wrap_model_and_load_sharded_checkpoint(resume_from_checkpoint) + elif self.args.should_save_sharding_stage1_model: + # In the non-sharded mode, should invoke _load_from_checkpoint before _wrap_model. + # In this mode, the rank0 load all params and the _wrap_model implicitly broadcast params from rank0 to the other ranks. + model = self._wrap_model(self.model_wrapped) + if self.sharding_io is not None: + assert delay_optimizer_creation is False, "delay_optimizer_creation should be False" + # the self.optimizer should be wrapped and it is done in _wrap_model + self.sharding_io.set_optimizer(self.optimizer) + # for the rest of this function `model` is the outside model, whether it was wrapped or not + if model is not self.model: + self.model_wrapped = model + if delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + self._load_optimizer_and_scheduler(resume_from_checkpoint) + else: + model = self._wrap_model(self.model_wrapped) + # for the rest of this function `model` is the outside model, whether it was wrapped or not + if model is not self.model: + self.model_wrapped = model + if delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + self._load_optimizer_and_scheduler(resume_from_checkpoint) + + if ShardingOption.FULL_SHARD in self.args.sharding and clear_master_weight: + # for inference model to use Trainer sharding stage3, clear master_weight + # which is created in GroupShardedStage3.__init__ + self.optimizer._master_weights = None + + if self.args.device == "npu" and self.args.flatten_param_grads: + from .plugins.npu_plugin import npu_accelerate_plugin + + npu_accelerate_plugin(self.optimizer) + + return model + + +def init_train_state( + self: Trainer, + resume_from_checkpoint: bool, + train_dataloader: DataLoader, + max_steps: int, + num_train_epochs: int, + num_update_steps_per_epoch: int, +): + args = self.args + + self.state = TrainerState() + self.state.epoch = 0 + epochs_trained = 0 + steps_trained_in_current_epoch = 0 + steps_trained_progress_bar = None + + # Check if continuing training from a checkpoint + if resume_from_checkpoint is not None and distributed_isfile( + os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) + ): + self.state = TrainerState.load_from_json( + distributed_file(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) + ) + epochs_trained = self.state.global_step // num_update_steps_per_epoch + if not args.ignore_data_skip: + steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) + steps_trained_in_current_epoch *= args.gradient_accumulation_steps + else: + steps_trained_in_current_epoch = 0 + + logger.info(" Continuing training from checkpoint, will skip to saved global_step") + logger.info(f" Continuing training from epoch {epochs_trained}") + logger.info(f" Continuing training from global step {self.state.global_step}") + if not args.ignore_data_skip: + logger.info( + f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} " + "batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` " + "flag to your launch command, but you will resume the training on data already seen by your model." + ) + if self.is_local_process_zero() and not args.disable_tqdm: + steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch) + steps_trained_progress_bar.set_description("Skipping the first batches") + if not args.ignore_data_skip: + if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( + train_dataloader.batch_sampler, NlpDistributedBatchSampler + ): + consumed_samples = ( + self.state.global_step + * args.train_batch_size + * args.gradient_accumulation_steps + * args.dataset_world_size + ) + train_dataloader.batch_sampler.set_epoch(consumed_samples=consumed_samples) + logger.info(f"Set DistributedBatchSampler consumed_samples to {consumed_samples}") + + self.state.max_steps = int(max_steps) + self.state.num_train_epochs = num_train_epochs + self.state.is_local_process_zero = self.is_local_process_zero() + self.state.is_world_process_zero = self.is_world_process_zero() + + return epochs_trained, steps_trained_in_current_epoch, steps_trained_progress_bar + + +def init_train_log( + self: Trainer, + num_examples: int, + num_train_epochs: int, + total_train_batch_size: int, + max_steps: int, + num_train_samples: int, + model: PretrainedModel, +): + args = self.args + + logger.info("***** Running training *****") + logger.info(f" Num examples = {num_examples:,}") + logger.info(f" Num Epochs = {num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {max_steps:,}") + logger.info(f" Total num train samples = {num_train_samples:,}") + # per_device_trainable_numel = sum(p.numel().item() for p in model.parameters() if not p.stop_gradient) + # TODO: Temporary fix since Tensor.numel() not supported in distributed mode + per_device_trainable_numel = sum(np.prod(p.shape) for p in model.parameters() if not p.stop_gradient) + logger.info(f" Number of trainable parameters = {per_device_trainable_numel:,} (per device)") + if self.args.use_hybrid_parallel: + # todo fix for pipeline_parallel_degree + parts_num = max(self.args.tensor_parallel_degree, 1) * max(self.args.pipeline_parallel_degree, 1) + if parts_num > 1: + all_reduce_dtype = "int64" + if paddle.get_device().split(":")[0] in ["npu", "xpu"]: + # TODO(duanyanhui): fix when NPU all_reduce supports int64 + all_reduce_dtype = "float32" + trainable_numel_tensor = paddle.to_tensor(per_device_trainable_numel, dtype=all_reduce_dtype) + paddle.distributed.all_reduce(trainable_numel_tensor) + trainable_numel = int(trainable_numel_tensor.item()) // self.args.dataset_world_size + # the numel is roughly, because the tensor parallel still hold own bias or layer_norm weight without splited + # so, the trainable numel is a little bigger than real. + logger.info(f" Number of trainable parameters = {trainable_numel:,} (all devices, roughly)") + + +def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): + """ + Just a copy of single training step complete code in Trainer.train while loop + which including forward+backward+step, while wraps the inputs and outputs to + make the complicated copied code no need to change. Maybe a better way is to + add fine-grained methods including these steps to Trainer which is similar to + DeepSpeed engine. + """ + + # TODO(guosheng): step, steps_trained_in_current_epoch and steps_trained_progress_bar + # should use reference since they would be overwrite. + # for state update + epoch = kwargs.get("epoch", 0) + step = kwargs.get("step", 0) + steps_in_epoch = kwargs.get("steps_in_epoch", 0) + step_control = kwargs.get("step_control", 0) + # for step and progress update when resuming data + train_dataloader = kwargs.get("train_dataloader", None) + resume_from_checkpoint = kwargs.get("resume_from_checkpoint", None) + steps_trained_in_current_epoch = kwargs.get("steps_trained_in_current_epoch", 0) + steps_trained_progress_bar = kwargs.get("steps_trained_progress_bar", None) + # for eval output ignore to gather + ignore_keys_for_eval = kwargs.get("ignore_keys_for_eval", None) + tr_loss = kwargs.get("tr_loss", 0.0) + model = kwargs.get("model", self.model_wrapped) + + args = self.args + + if self.args.use_hybrid_parallel and self.args.sep_parallel_degree > 1: + inputs = split_inputs_sequence_dim(inputs) + self.timers and self.timers("read-data").stop() + os.environ["TRAINER_GLOBAL_STEP"] = str(self.state.global_step) + self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) + + # Skip past any already trained steps if resuming training + # for paddlenlp.utils.batch_sampler.DistributedBatchSampler + # We use consumed_samples to reset the status + if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( + train_dataloader.batch_sampler, NlpDistributedBatchSampler + ): + if step == 0: + if steps_trained_progress_bar is not None: + steps_trained_progress_bar.update(steps_trained_in_current_epoch) + steps_trained_progress_bar.close() + steps_trained_progress_bar = None + self._load_rng_state(resume_from_checkpoint) + step += steps_trained_in_current_epoch + elif steps_trained_in_current_epoch > 0: + steps_trained_in_current_epoch -= 1 + if steps_trained_progress_bar is not None: + steps_trained_progress_bar.update(1) + if steps_trained_in_current_epoch == 0: + self._load_rng_state(resume_from_checkpoint) + # continue + final_local_vars = locals() + for k in kwargs.keys(): + if k in final_local_vars: + kwargs[k] = final_local_vars[k] + return kwargs + elif steps_trained_progress_bar is not None: + steps_trained_progress_bar.close() + steps_trained_progress_bar = None + + if step_control % args.gradient_accumulation_steps == 0: + self.control = self.callback_handler.on_step_begin(args, self.state, self.control) + self.timers and self.timers("forward-backward").start() + + dp_enabled = self.args.data_parallel_degree > 1 if self.args.use_hybrid_parallel else args.local_rank != -1 + forbidden_no_sync = False + # stage2 and stage3 should not no_sync, because the is no DDP wrapper and no_sync API + # hybrid_parallel (tp or pp or sharding stage 1) should not no_sync + if self.args.use_hybrid_parallel: + forbidden_no_sync = True + + availiable_no_sync = dp_enabled and not forbidden_no_sync + + is_no_sync = ( + ((step_control + 1) % args.gradient_accumulation_steps != 0) + and availiable_no_sync + and args._no_sync_in_gradient_accumulation + ) or (args.recompute and availiable_no_sync) + # sharding + # stage1. the same as ddp + # stage2. manualy collect gradient on dp group + + dp_master_grad = self.args.world_size > 1 and self.args.amp_master_grad and not self.args.use_hybrid_parallel + if dp_master_grad: + is_no_sync = True + + if is_no_sync: + # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. + with model.no_sync(): + tr_loss_step = self.training_step(model, inputs) + else: + tr_loss_step = self.training_step(model, inputs) + + tr_loss += tr_loss_step + + if (step_control + 1) % args.gradient_accumulation_steps == 0 or ( + # last step in epoch but step is always smaller than gradient_accumulation_steps + steps_in_epoch <= args.gradient_accumulation_steps + and (step + 1) == steps_in_epoch + ): + if self.args.pipeline_parallel_degree <= 1 and self._enable_delay_scale_loss(): + tr_loss /= self.args.gradient_accumulation_steps + + self.timers and self.timers("forward-backward").stop() + # Maunally collect gradients + # Case 1: Use recompute and dp + # Case 2: Hack dp with master_grad + # Case 3: Pipeline or sharding overlap + # local_rank != -1 don't means dp in networks. + self.timers and self.timers("all-reduce").start() + + # Case 1: Use recompute and dp / sharding stage1, + # manualy collect gradient for dp. + if args.recompute and availiable_no_sync: + fused_allreduce_gradients(list(model.parameters()), None) + + # Case 2: hack dp with master_grad + if dp_master_grad and not (args.recompute and availiable_no_sync): + fused_allreduce_gradients(list(model.parameters()), None) + + # Pipeline parallel mode, handle gradient reduce here to overlap + pipeline_parallel_config = ( + set(args.pipeline_parallel_config.split(" ")) if args.pipeline_parallel_degree > 1 else set() + ) + enable_dp_comm_overlap = "enable_dp_comm_overlap" in pipeline_parallel_config + enable_release_grads = "enable_release_grads" in pipeline_parallel_config + + # Case 3: Pipeline parallel mode, overlap with dp + if isinstance(self.optimizer, HybridParallelOptimizer) and not self.do_grad_scaling: + parameters_list = _obtain_optimizer_parameters_list(self.optimizer._inner_opt) + + if not enable_dp_comm_overlap: + if self.optimizer._sharding_enable: + assert reshard_util.is_sharding_opt(self.optimizer) + self.optimizer._inner_opt.reduce_gradients(list(parameters_list), self.optimizer._hcg) + + if self.optimizer._dp_enable or getattr(self.optimizer, "_sep_enable", False): + fused_allreduce_gradients(list(parameters_list), self.optimizer._hcg) + + self.timers and self.timers("all-reduce").stop() + self.timers and self.timers("optimizer-step").start() + + if self.args.gradient_accumulation_steps > 1 and self._enable_delay_scale_loss(): + for p in model._layers.parameters(): + with paddle.no_grad(): + if hasattr(p, "main_grad") and p.main_grad is not None: + assert p.grad is None + p.main_grad.scale_(1.0 / self.args.gradient_accumulation_steps) + elif p.grad is not None: + p.grad.scale_(1.0 / self.args.gradient_accumulation_steps) + + # Optimizer step + self.callback_handler.on_optimizer_begin( + args, self.state, self.control, scaler=self.scaler if self.do_grad_scaling else None + ) + optimizer_was_run = True + if self.do_grad_scaling: + scale_before = paddle.assign(self.scaler._scale) + self.scaler.step(self.optimizer) + self.scaler.update() + scale_after = self.scaler._scale + optimizer_was_run = not self.scaler._cache_founf_inf + if not optimizer_was_run: + scale_before_value = scale_before.cpu().numpy() + scale_after_value = scale_after.cpu().numpy() + logger.warning( + f"optimizer not run, scale_before: {scale_before_value[0]}, scale_after: {scale_after_value[0]}" + ) + elif isinstance(self.optimizer, HybridParallelOptimizer): + self.optimizer._step(parameters_list) + else: + self.optimizer.step() + + self.timers and self.timers("optimizer-step").stop() + + if optimizer_was_run: + self.lr_scheduler.step() + + if enable_release_grads and args.pipeline_parallel_degree > 1: + self.optimizer.clear_grad(set_to_zero=False) + for _, buffers in model._chunk_2_comm_buffers.items(): + for buffer in buffers: + buffer._clear_grad_storage() + else: + self.optimizer.clear_grad() + + self.callback_handler.on_optimizer_end( + args, self.state, self.control, scaler=self.scaler if self.do_grad_scaling else None + ) + + self.state.global_step += 1 + self.state.epoch = epoch + (step + 1) / steps_in_epoch + self.control = self.callback_handler.on_step_end(args, self.state, self.control) + self._maybe_log_save_evaluate(tr_loss, model, epoch, ignore_keys_for_eval, inputs=inputs) + self._print_timer() + step_control = 0 + else: + self.control = self.callback_handler.on_substep_end(args, self.state, self.control) + step_control += 1 + + if self.control.should_epoch_stop or self.control.should_training_stop: + # break + final_local_vars = locals() + for k in kwargs.keys(): + if k in final_local_vars: + kwargs[k] = final_local_vars[k] + return kwargs + self.timers and self.timers("read-data").start() + + final_local_vars = locals() + for k in kwargs.keys(): + if k in final_local_vars: + kwargs[k] = final_local_vars[k] + return kwargs + + +Trainer.init_train_model_opt = init_train_model_opt +Trainer.init_train_log = init_train_log +Trainer.init_train_state = init_train_state +Trainer.full_training_step = full_training_step + + +class PolicyTrainer(Trainer): + def __init__( + self, + model: Union[PretrainedModel, nn.Layer] = None, + criterion: nn.Layer = None, + args: TrainingArguments = None, + data_collator: Optional[DataCollator] = None, + train_dataset: Optional[Dataset] = None, + eval_dataset: Union[Dataset, Dict[str, Dataset]] = None, + tokenizer: Optional[PretrainedTokenizer] = None, + compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, + callbacks: Optional[List[TrainerCallback]] = None, + optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), + preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, + ): + + super().__init__( + model, + criterion, + args, + data_collator, + train_dataset, + eval_dataset, + tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + + def actor_loss_fn( + self, + log_probs: paddle.Tensor, + old_log_probs: paddle.Tensor, + advantages: paddle.Tensor, + mask: paddle.Tensor, + ) -> paddle.Tensor: + # policy gradient loss + ratio = paddle.exp(log_probs - old_log_probs) + pg_loss1 = -advantages * ratio + pg_loss2 = -advantages * paddle.clip( + ratio, + 1.0 - self.clip_range_ratio, + 1.0 + self.clip_range_ratio, + ) + return paddle.sum(paddle.maximum(pg_loss1, pg_loss2) * mask) / mask.sum() + + def compute_loss(self, model, inputs, return_outputs=False): + """ + How the loss is computed by Trainer. By default, all models return the loss in the first element. + Subclass and override for custom behavior. + """ + labels = inputs.get("labels", None) + if labels is not None: + labels = inputs.get("labels", None) + outputs = model(**inputs) + ptx_loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] + ptx_loss = self.ptx_coeff * ptx_loss + return ptx_loss + + input_ids = inputs["input_ids"] + attention_mask = inputs["attention_mask"] + reward_advantages = inputs["reward_advantages"] + # NOTE: TensorParallel model requires non-Tensor inputs to be lists and + # broadcast them, thus do not or optionally use these inputs currently. + # use_cache = inputs["use_cache"] + # return_dict = inputs["return_dict"] + start = inputs.pop("start", None) + old_log_probs = inputs["old_log_probs"][:, start:] if start is not None else inputs["old_log_probs"] + sequence_mask = inputs["sequence_mask"][:, start:] if start is not None else inputs["sequence_mask"] + outputs = model( + input_ids=input_ids, + attention_mask=attention_mask, # use_cache=use_cache, return_dict=return_dict + ) + + logits = outputs["logits"] if isinstance(outputs, dict) else outputs + if isinstance(outputs, dict): + logits = outputs["logits"] + elif isinstance(outputs, tuple): + logits = outputs[0] + + log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:]) + actor_loss = self.actor_loss_fn( + log_probs[:, -old_log_probs.shape[1] :], + old_log_probs, + reward_advantages, + sequence_mask, + ) + + return actor_loss + + def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): + labels = inputs.get("labels", None) + if labels is not None: # use ptx + loss_name = "ptx_loss" + else: + loss_name = "actor_loss" + kwargs["model"] = kwargs.pop("policy_model") + kwargs["step_control"] = kwargs.pop("policy_step_control") + kwargs["tr_loss"] = kwargs.pop(loss_name) + kwargs = super().full_training_step(inputs, **kwargs) + kwargs["policy_model"] = kwargs.pop("model") + kwargs["policy_step_control"] = kwargs.pop("step_control") + kwargs[loss_name] = kwargs.pop("tr_loss") + return kwargs + + +class ValueTrainer(Trainer): + def __init__( + self, + model: Union[PretrainedModel, nn.Layer] = None, + criterion: nn.Layer = None, + args: TrainingArguments = None, + data_collator: Optional[DataCollator] = None, + train_dataset: Optional[Dataset] = None, + eval_dataset: Union[Dataset, Dict[str, Dataset]] = None, + tokenizer: Optional[PretrainedTokenizer] = None, + compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, + callbacks: Optional[List[TrainerCallback]] = None, + optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), + preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, + ): + + super().__init__( + model, + criterion, + args, + data_collator, + train_dataset, + eval_dataset, + tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + + def critic_loss_fn( + self, + values: paddle.Tensor, + old_values: paddle.Tensor, + returns: paddle.Tensor, + mask: paddle.Tensor, + ) -> paddle.Tensor: + """Compute critic loss.""" + # TODO(guosheng): use paddle.clip when its min/max can support more than + # 0D Tensor + values_clipped = paddle.minimum( + paddle.maximum(values, old_values - self.clip_range_value), old_values + self.clip_range_value + ) + vf_loss1 = paddle.square(values - returns) + vf_loss2 = paddle.square(values_clipped - returns) + return 0.5 * paddle.sum(paddle.maximum(vf_loss1, vf_loss2) * mask) / mask.sum() + + def compute_loss(self, model, inputs, return_outputs=False): + """ + How the loss is computed by Trainer. By default, all models return the loss in the first element. + Subclass and override for custom behavior. + """ + input_ids = inputs["input_ids"] + attention_mask = inputs["attention_mask"] + reward_returns = inputs["reward_returns"] + # NOTE: TensorParallel model requires non-Tensor inputs to be lists and + # broadcast them, thus do not or optionally use these inputs currently. + # use_cache = inputs["use_cache"] + # return_dict = inputs["return_dict"] + start = inputs.pop("start", None) + old_reward_values = ( + inputs["old_reward_values"][:, start:] if start is not None else inputs["old_reward_values"] + ) + sequence_mask = inputs["sequence_mask"][:, start:] if start is not None else inputs["sequence_mask"] + outputs = model( + input_ids=input_ids, + attention_mask=attention_mask, # use_cache=use_cache, return_dict=return_dict + ) + + # We don't use .loss here since the model may return tuples instead of ModelOutput. + reward_values = outputs["scores"] if isinstance(outputs, dict) else outputs + if isinstance(outputs, dict): + reward_values = outputs["scores"] + elif isinstance(outputs, tuple): + reward_values = outputs[0] + + reward_values = reward_values.squeeze(axis=-1)[:, :-1] + reward_critic_loss = self.critic_loss_fn( + reward_values[:, -old_reward_values.shape[1] :], + old_reward_values, + reward_returns, + sequence_mask, + ) + + return reward_critic_loss + + def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): + # TODO(guosheng): Make these training control vars mapping as class attr, + # then PPOTrainer can extract and reuse them to avoid hard code. + kwargs["model"] = kwargs.pop("value_model") + kwargs["step_control"] = kwargs.pop("value_step_control") + kwargs["tr_loss"] = kwargs.pop("reward_critic_loss") + kwargs = super().full_training_step(inputs, **kwargs) + kwargs["value_model"] = kwargs.pop("model") + kwargs["value_step_control"] = kwargs.pop("step_control") + kwargs["reward_critic_loss"] = kwargs.pop("tr_loss") + return kwargs + + +@contextmanager +def guard_set_args(args, arg_name_values): + for k, v in arg_name_values.items(): + old_value = getattr(args, k, None) + setattr(args, k, v) + arg_name_values[k] = old_value + yield + for k, v in arg_name_values.items(): + old_value = getattr(args, k) + setattr(args, k, v) + arg_name_values[k] = old_value + + +class MuteDefaultFlowCallback(TrainerCallback): + def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): + control.should_save = False + control.should_evaluate = False + control.should_log = False + return control + + +def is_same_tokenizer( + tokenizer: PretrainedTokenizer, + other_tokenizer: PretrainedTokenizer, +) -> bool: + """Check if two tokenizers are the same.""" + return tokenizer is other_tokenizer or ( + tokenizer.__class__ == other_tokenizer.__class__ and tokenizer.get_vocab() == other_tokenizer.get_vocab() + ) + + +class PipeEvalModel(GenerationMixin): + def __init__(self, trainer: Trainer): + self.model: fleet.model.PipelineParallel = trainer.model_wrapped + self.config: PretrainedConfig = trainer.model.config + self._is_gen = False + # self.gen_fn = None + # self.fwd_fn = None + # use non-pipe model generetion related methods + self.prepare_inputs_for_generation = types.MethodType( + self.model._layers._non_pipe_model_class.prepare_inputs_for_generation, self + ) + self.update_model_kwargs_for_generation = ( + self.model._layers._non_pipe_model_class.update_model_kwargs_for_generation + ) + + def eval(self): + self.model.eval() + + def train(self): + self.model.train() + + def _broadcast_outputs(self, outputs): + # outputs is PipelineParallel.eval_batch which is a list of batches. + out = [] + outputs = (outputs,) if isinstance(outputs, paddle.Tensor) else outputs + for tensors in outputs: + if not self.model.is_pipeline_last_stage(): + tensor = tensors if isinstance(tensors, paddle.Tensor) else tensors[0] + head_out_meta = ( + (self.model._layers.head_out_meta,) + if isinstance(self.model._layers.head_out_meta, paddle.static.InputSpec) + else self.model._layers.head_out_meta + ) + tensors = tuple( + paddle.empty( + shape=[ + tensor.shape[i] if (meta.shape[i] is None or meta.shape[i] < 0) else meta.shape[i] + for i in range(len(meta.shape)) + ], + dtype=tensor.dtype if meta.dtype is None else meta.dtype, + ) + for meta in head_out_meta + ) + else: + # Currently use tuple instead of ModelOutput and require the + # caller use the return result as tuple. + tensors = ( + (tensors,) + if isinstance(tensors, paddle.Tensor) + else tensors.to_tuple() + if isinstance(tensors, ModelOutput) + else tensors + ) + + # map_structure( + # lambda tensor: paddle.distributed.broadcast( + # tensor, + # src=self.model.pp_group.ranks[-1], + # group=self.model.pp_group), tensors) + for tensor in tensors: + paddle.distributed.broadcast(tensor, src=self.model.pp_group.ranks[-1], group=self.model.pp_group) + out.append(tensors[0] if len(tensors) == 1 else tensors) + return out[0] if len(out) == 1 else out + + def __call__(self, *args, **kwargs): + model = self.model + assert self.model.training is False + # TODO(guosheng): hack for post-process in eval, so we can let last stage + # do more to reduce comm overhead. + if self._is_gen: + # inputs by `prepare_inputs_for_generation` is a dict with following keys: + # "input_ids", "position_ids", "past_key_values", "use_cache", "attention_mask" + # NOTE: cache/past_key_values should be rather than pass like + pass + else: + # use _prepare_pipeline_inputs_func to convert pipeline inputs + inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) + # NOTE(guosheng): bug seems exist. pp.eval_batch(compute_loss=False) + # will set pp._compute_loss to False and would not set it back. Thus + # hack here to set it back. + with guard_set_args(model, {"_compute_loss": False}): + outputs = model.eval_batch([inputs, labels], compute_loss=False) + outputs = self._broadcast_outputs(outputs) + return outputs + + def generate(self, *args, **kwargs): + # when generate, cache should be + self._is_gen = True + super().generate(*args, **kwargs) + self._is_gen = False + + +class PPOTrainer(Trainer): + def __init__( + self, + model: Union[PretrainedModel, nn.Layer] = None, + criterion: nn.Layer = None, + args: TrainingArguments = None, + data_collator: Optional[DataCollator] = None, + train_dataset: Optional[Dataset] = None, + ptx_dataset: Optional[Dataset] = None, + eval_dataset: Union[Dataset, Dict[str, Dataset]] = None, + tokenizer: Optional[PretrainedTokenizer] = None, + compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, + callbacks: Optional[List[TrainerCallback]] = None, + optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), + preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, + ): + with guard_set_args( + args, + { + "recompute": False, + "fp16_opt_level": "O1", + "pipeline_parallel_degree": 1, # workaround for pipeline parallel model check + }, + ): + # just used to create trival attrs might be used in the training + # process of trainer, while changing some args to avoid model usage + # in __init__ such as recompute and AMP-O2 + super().__init__( + model, + criterion, + args, + data_collator, + train_dataset, + eval_dataset, + tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + + self.train_dataset = train_dataset + self.ptx_dataset = ptx_dataset + self.eval_dataset = eval_dataset + + (policy_model, reference_model, reward_model, value_model) = model + # policy_tokenizer and value_tokenizer should be same + (policy_tokenizer, reference_tokenizer, reward_tokenizer, value_tokenizer) = tokenizer + + policy_training_args = copy.deepcopy(args) + self.use_ptx = self.ptx_dataset is not None + if self.use_ptx: + policy_training_args.gradient_accumulation_steps *= 2 + self.policy_trainer = PolicyTrainer( + policy_model, + criterion, + policy_training_args, + data_collator, + train_dataset, + eval_dataset, + policy_tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + value_training_args = copy.deepcopy(args) + for attr_name in [ + "critic_learning_rate", + "critic_weight_decay", + "critic_lr_scheduler_type", + "critic_warmup_ratio", + "critic_recompute", + ]: + if getattr(value_training_args, attr_name, None) is not None: + setattr(value_training_args, attr_name[len("critic_") :], getattr(value_training_args, attr_name)) + self.value_trainer = ValueTrainer( + value_model, + criterion, + value_training_args, + data_collator, + train_dataset, + eval_dataset, + value_tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + + # use trainer for reference_model/reward_model to enable sharding stage-3 + # maybe we should allow models to use different dist strategies later + if True: # ShardingOption.FULL_SHARD in args.sharding: + self.reference_trainer = Trainer( + reference_model, + criterion, + args, + data_collator, + train_dataset, + eval_dataset, + reference_tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + self.reward_trainer = Trainer( + reward_model, + criterion, + args, + data_collator, + train_dataset, + eval_dataset, + reward_tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + # TODO(guosheng): sharding stage3 should create master weight optionally + # instead of creation and clear. + self.reference_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps + self.reward_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps + else: + self._reference_model = reference_model + self._reward_model = reward_model + self.reference_model.eval() + self.reward_model.eval() + + self.reward_tokenizer = reward_tokenizer + self.tokenizer = policy_tokenizer + if is_same_tokenizer(self.tokenizer, self.reward_tokenizer): + self.reward_tokenizer = self.tokenizer + + self.generation_config = GenerationConfig( + max_length=self.args.max_length, + num_return_sequences=self.args.num_return_sequences, + temperature=self.args.temperature, + top_p=self.args.top_p, + # top_k=self.args.top_k, + repetition_penalty=self.args.repetition_penalty, + do_sample=True, + trunc_input=False, + bos_token_id=self.tokenizer.bos_token_id, + eos_token_id=self.tokenizer.eos_token_id, + pad_token_id=self.tokenizer.pad_token_id, + ) + # Those value can be changed + self.kl_coeff = self.args.kl_coeff + self.policy_trainer.clip_range_ratio = self.clip_range_ratio = self.args.clip_range_ratio + self.clip_range_score = self.args.clip_range_score + self.value_trainer.clip_range_value = self.clip_range_value = self.args.clip_range_value + self.policy_trainer.ptx_coeff = self.ptx_coeff = self.args.ptx_coeff + self.gamma = 1.0 + self.gae_lambda = 0.95 + + # dummy class and object for model to be compaible with methods of + # Trainer, such as evaluation_loop + self.DummyPPOModel = type( + "DummyPPOModel", (object,), {"eval": lambda _: self.set_eval(), "train": lambda _: self.set_train()} + ) + self.model = self.model_wrapped = self.DummyPPOModel() + # self.optimizer = self.policy_trainer.optimizer + # self.scaler = self.reference_trainer.scaler = self.reward_trainer.scaler = None + + @property + def reference_model(self): + model = getattr(self, "_reference_model", None) + if model is not None: + return model + # use model with Trainer + if self.reference_trainer.args.pipeline_parallel_degree > 1: + # Only accept wrapped model for pipeline_parallel mode + # model = self.reference_trainer.model_wrapped + model = PipeEvalModel(self.reference_trainer) + self._reference_model = model + else: + model = self.reference_trainer.model + return model + + @property + def reward_model(self): + model = getattr(self, "_reward_model", None) + if model is not None: + return model + # use model with Trainer + if self.reward_trainer.args.pipeline_parallel_degree > 1: + # Only accept wrapped model for pipeline_parallel mode + # model = self.reward_trainer.model_wrapped + model = PipeEvalModel(self.reward_trainer) + self._reward_model = model + else: + model = self.reward_trainer.model + return model + + @property + def actor_model(self): + if self.training: + return self.policy_trainer.model_wrapped + model = getattr(self, "_actor_model", None) + if model is not None: + return model + if self.policy_trainer.args.pipeline_parallel_degree > 1: + # Only accept wrapped model for pipeline_parallel mode + # model = self.policy_trainer.model_wrapped + model = PipeEvalModel(self.policy_trainer) + self._actor_model = model + else: + model = self.policy_trainer.model + return model + + @property + def reward_critic_model(self): + if self.training: + return self.value_trainer.model_wrapped + model = getattr(self, "_reward_critic_model", None) + if model is not None: + return model + if self.value_trainer.args.pipeline_parallel_degree > 1: + # Only accept wrapped model for pipeline_parallel mode + # model = self.value_trainer.model_wrapped + model = PipeEvalModel(self.value_trainer) + self._reward_critic_model = model + else: + model = self.value_trainer.model + return model + + def set_train(self, mode: bool = True) -> None: + """Set training mode for all models.""" + if mode: + # self.is_in_train = True + self.training = True + self.actor_model.train() + self.reward_critic_model.train() + else: + self.training = False + self.actor_model.eval() + self.reward_critic_model.eval() + + def set_eval(self) -> None: + """Set model to evaluation mode.""" + self.set_train(mode=False) + + def prediction_step( + self, + model: nn.Layer, + inputs: Dict[str, Union[paddle.Tensor, Any]], + prediction_loss_only: bool, + ignore_keys: Optional[List[str]] = None, + ) -> Tuple[Optional[paddle.Tensor], Optional[paddle.Tensor], Optional[paddle.Tensor]]: + if self.args.pipeline_parallel_degree > 1: + # hack for pipeline mode + inputs = self._prepare_inputs(inputs) + return self.prediction_pipeline_step(model, inputs, prediction_loss_only, ignore_keys) + else: + inputs = self._prepare_inputs(inputs) + + with paddle.no_grad(): + with self.autocast_smart_context_manager(): + seq = self.actor_model.generate( + input_ids=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + generation_config=self.generation_config, + synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, + )[0] + attention_mask = paddle.logical_and( + seq != self.tokenizer.pad_token_id, + seq != self.tokenizer.unk_token_id, + ) + if self.reward_tokenizer is not self.tokenizer: + reward_tokenize_output = batch_retokenize( + input_ids=seq, + src_tokenizer=self.tokenizer, + dest_tokenizer=self.reward_tokenizer, + skip_special_tokens=True, + device=self.args.device, + ) + reward_input_ids = reward_tokenize_output["input_ids"] + reward_attention_mask = reward_tokenize_output["attention_mask"] + else: + reward_input_ids = seq + reward_attention_mask = attention_mask + + reward_score = self.reward_model( + reward_input_ids, attention_mask=reward_attention_mask, return_dict=True + ).end_scores.squeeze(axis=-1) + + # keep the first batch of eval output sequence to print and check + prompt = self.tokenizer.batch_decode(inputs["input_ids"], skip_special_tokens=True) + generated = self.tokenizer.batch_decode(seq, skip_special_tokens=True) + for i, text in enumerate(generated): + self._eval_out_file.write(text + "\n") + if getattr(self, "_eval_seq", None) is None: + generated = [text[len(prompt[i]) :] for i, text in enumerate(generated)] + # prompts.extend(prompt) + # generateds.extend(generated) + self._eval_seq = (prompt, generated, reward_score.tolist()) + + return reward_score.cast(paddle.float32).mean(), None, None + + def evaluation_loop( + self, + dataloader: DataLoader, + description: str, + prediction_loss_only: Optional[bool] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + max_eval_iters: Optional[int] = -1, + ) -> EvalLoopOutput: + # to save eval generated sequence + eval_out_file = os.path.join( + self.args.output_dir, f"eval_out-step{self.state.global_step}-rank{self.args.local_rank}.txt" + ) + self._eval_out_file = open(eval_out_file, "w") + + output = super().evaluation_loop( + dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix, max_eval_iters + ) + output.metrics[f"{metric_key_prefix}/reward"] = output.metrics.pop(f"{metric_key_prefix}_loss") + + columns = ["Prompt", "Generated", "Reward"] + rows = list(zip(*self._eval_seq)) + rows = [[str(item) for item in row] for row in rows] + max_num_rows = 5 + table = Table(title="Evaluating...", show_lines=True, title_justify="left") + for column in columns: + table.add_column(column) + for row in rows[:max_num_rows]: + table.add_row(*row) + Console(soft_wrap=True, markup=False, emoji=False).print(table) + self._eval_seq = None + + self._eval_out_file.close() + + return output + + def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: + with guard_set_args(self, {"data_collator": self.eval_dataset.get_collator()}): + return super().get_eval_dataloader(eval_dataset) + + def _save_checkpoint(self, model, metrics=None): + # maybe change args.output_dir of policy_trainer/value_trainer directly + with guard_set_args(self.policy_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "policy")}): + self.policy_trainer._save_checkpoint(model, metrics) + with guard_set_args(self.value_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "value")}): + self.value_trainer._save_checkpoint(model, metrics) + + # def _load_from_checkpoint(self, resume_from_checkpoint=None): + # with guard_set_args(self.policy_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "policy")}): + # self.policy_trainer._load_from_checkpoint(resume_from_checkpoint) + # with guard_set_args(self.value_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "value")}): + # self.value_trainer._load_from_checkpoint(resume_from_checkpoint) + + # def _load_optimizer_and_scheduler(self, checkpoint): + # # NOTE: `Trainer._load_optimizer_and_scheduler` would not seek the latest + # # state as in `_load_from_checkpoint``, and it just use `resume_from_checkpoint` + # # as value of `checkpoint` to load. + # self.policy_trainer._load_optimizer_and_scheduler( + # checkpoint if checkpoint is None else os.path.join(checkpoint, "policy") + # ) + # self.value_trainer._load_optimizer_and_scheduler( + # checkpoint if checkpoint is None else os.path.join(checkpoint, "value") + # ) + + def init_train_model_opt( + self: Trainer, max_steps: int, resume_from_checkpoint: bool = False, clear_master_weight: bool = False + ) -> PretrainedModel: + # resume should be triggered here + # maybe change args.output_dir of policy_trainer/value_trainer directly + with guard_set_args(self.policy_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "policy")}): + policy_model = self.policy_trainer.init_train_model_opt( + max_steps, + os.path.join(resume_from_checkpoint, "policy") + if isinstance(resume_from_checkpoint, str) + else resume_from_checkpoint, + ) + with guard_set_args(self.value_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "value")}): + value_model = self.value_trainer.init_train_model_opt( + max_steps, + os.path.join(resume_from_checkpoint, "value") + if isinstance(resume_from_checkpoint, str) + else resume_from_checkpoint, + ) + return policy_model, value_model + + @staticmethod + def load_sing_gen_data(as_batches=True): + import pickle + + from paddle.distributed import fleet + + hcg = fleet.get_hybrid_communicate_group() + data_rank = hcg.get_sharding_parallel_rank() + with open(f"rl_batch-{data_rank}.data", "rb") as f: + data = pickle.load(f) + rl_batch = map_structure(lambda x: paddle.to_tensor(x), data) + rl_batches = [rl_batch] if as_batches else rl_batch + return rl_batches + + @staticmethod + def save_single_gen_data(rl_batch): + import pickle + + import paddle.distributed as dist + + with open(f"rl_batch-{dist.get_rank()}.data", "wb") as f: + rl_batch = map_structure(lambda x: x.numpy(), rl_batch) + pickle.dump(rl_batch, f) + # exit(0) + + def get_epoch_iterator(self): + # TODO(guosheng): support iter dataset + num_prompt_only_batches = len(self.prompt_only_dataloader) + num_ptx_batches = len(self.ptx_dataloader) + num_ptx_replicas = (num_prompt_only_batches + num_ptx_batches - 1) // num_ptx_batches + + def gen_epoch_data(): + for prompt_only_batch, ptx_batch in zip( + self.prompt_only_dataloader, + itertools.chain.from_iterable([self.ptx_dataloader] * num_ptx_replicas), + ): + # generate batches + self.set_eval() + rl_batches = self.split_rl_micro_batches(prompt_only_batch) + # rl_batches = self.load_sing_gen_data(as_batches=True) + if self.use_ptx: + ptx_batches = self.split_ptx_micro_batches(ptx_batch) + else: + ptx_batches = [None for _ in range(len(rl_batches))] + paddle.device.cuda.empty_cache() + + self.set_train() + for _ in range(self.args.update_iters): + for rl_batch, ptx_batch in zip(rl_batches, ptx_batches): + yield rl_batch, ptx_batch + + class EpochIterator: + def __iter__(self): + return gen_epoch_data() + + return EpochIterator() + + def init_train_num(self: Trainer, train_dataloader: DataLoader): + args = self.args + + total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.dataset_world_size + + len_dataloader = len(train_dataloader) + num_train_sub_steps = ( + len_dataloader + * self.args.update_iters + * self.args.per_device_prompt_batch_size + * self.args.num_return_sequences + // self.args.per_device_train_batch_size + ) + num_update_steps_per_epoch = num_train_sub_steps // args.gradient_accumulation_steps + if args.max_steps > 0: + max_steps = args.max_steps + num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( + args.max_steps % num_update_steps_per_epoch > 0 + ) + else: + max_steps = int(num_update_steps_per_epoch * args.num_train_epochs) + num_train_epochs = math.ceil(args.num_train_epochs) + num_examples = num_train_samples = total_train_batch_size * max_steps + + return ( + total_train_batch_size, + len_dataloader, + max_steps, + num_train_epochs, + num_update_steps_per_epoch, + num_examples, + num_train_samples, + ) + + def train( + self, + resume_from_checkpoint: Optional[Union[str, bool]] = None, + ignore_keys_for_eval: Optional[List[str]] = None, + ) -> None: + # ##### The following code try to keep same as the Trainer.train ##### + args = self.args + self.is_in_train = True + + # ##### trainging data and related num setting ##### + # TODO(guosheng): remove the binding method get_collator of dataset + with guard_set_args( + args, {"per_device_train_batch_size": self.args.per_device_prompt_batch_size} + ), guard_set_args( + self, {"train_dataset": self.train_dataset, "data_collator": self.train_dataset.get_collator()} + ): + train_dataloader = self.prompt_only_dataloader = self.get_train_dataloader() + + if self.use_ptx: + with guard_set_args( + args, + { + "per_device_train_batch_size": self.args.per_device_prompt_batch_size + * self.args.num_return_sequences + }, + ), guard_set_args( + self, {"train_dataset": self.ptx_dataset, "data_collator": self.ptx_dataset.get_collator(shift=True)} + ): + self.ptx_dataloader = self.get_train_dataloader() + else: + self.ptx_dataloader = DataLoader(DummyDataset(len(self.prompt_only_dataloader))) + ( + total_train_batch_size, + len_dataloader, + max_steps, + num_train_epochs, + num_update_steps_per_epoch, + num_examples, + num_train_samples, + ) = self.init_train_num(train_dataloader) + + # ##### model and optimizer related setting ##### + # policy_trainer/value_trainer only init train with init_train_model_opt, + # maybe more training setting used in full_training_step should be set here, + # such as trainer.control and trainer.state + # policy_model = self.policy_trainer.init_train_model_opt(max_steps, resume_from_checkpoint) + # value_model = self.value_trainer.init_train_model_opt(max_steps, resume_from_checkpoint) + policy_model, value_model = self.init_train_model_opt(max_steps, resume_from_checkpoint) + paddle.device.cuda.empty_cache() + # disable inner trainers' callback/state/control + self.policy_trainer.add_callback(MuteDefaultFlowCallback) + self.value_trainer.add_callback(MuteDefaultFlowCallback) + + # ##### traing statistic logging ##### + # Number of trainable parameters only account for policy_model + self.init_train_log( + num_examples, num_train_epochs, total_train_batch_size, max_steps, num_train_samples, policy_model + ) + + # ##### set training state and resume ##### + # consumed_samples used to set train_dataloader.batch_sampler may not be + # correct. Thus, data cannot be resumed perfectly when not breaking at epoch end. + epochs_trained, steps_trained_in_current_epoch, steps_trained_progress_bar = self.init_train_state( + resume_from_checkpoint, train_dataloader, max_steps, num_train_epochs, num_update_steps_per_epoch + ) + + epoch_iterator = self.get_epoch_iterator() + steps_in_epoch = num_update_steps_per_epoch * args.gradient_accumulation_steps + + # self.callback_handler.model = self.model + # self.callback_handler.optimizer = self.optimizer + # self.callback_handler.lr_scheduler = self.lr_scheduler + # self.callback_handler.train_dataloader = train_dataloader + self.state.max_steps = int(max_steps) + self.state.num_train_epochs = num_train_epochs + self.state.is_local_process_zero = self.is_local_process_zero() + self.state.is_world_process_zero = self.is_world_process_zero() + + self.control = self.callback_handler.on_train_begin(args, self.state, self.control) + + actor_loss = paddle.to_tensor(0.0) + reward_critic_loss = paddle.to_tensor(0.0) + ptx_loss = paddle.to_tensor(0.0) + # used when logging and last step + self._total_actor_loss_scalar = 0.0 + self._total_reward_critic_loss_scalar = 0.0 + self._total_ptx_loss_scalar = 0.0 + self._globalstep_last_logged = self.state.global_step + + # train_step_kwargs is used to provide arguments more than model inputs + # for full_training_step which is copied from Trainer.train and needs + # these arguments to control training process. + train_step_kwargs = { + "ignore_keys_for_eval": None, # no need + # TODO(guosheng): commented args mean to resume data, not support yet + # "resume_from_checkpoint": resume_from_checkpoint, + # "train_dataloader": train_dataloader, + # "epochs_trained": epochs_trained, + # "steps_trained_in_current_epoch": steps_trained_in_current_epoch, + # "steps_trained_progress_bar": steps_trained_progress_bar, + "steps_in_epoch": steps_in_epoch, # to control training process + # the following args are corresponding to tr_loss and model used in + # Trainer.train, and they would be used as tr_loss and model in + # PolicyTranier and ValueTrainer. + "actor_loss": actor_loss, + "reward_critic_loss": reward_critic_loss, + "ptx_loss": ptx_loss, + "policy_model": policy_model, + "value_model": value_model, + } + + start_time = time.time() + self._globalstep_last_start_time = start_time # time.time() + # self.timers and self.timers("read-data").start() + + for epoch in range(epochs_trained, num_train_epochs): + if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( + train_dataloader.batch_sampler, DistributedBatchSampler + ): + train_dataloader.batch_sampler.set_epoch(epoch) + + step_control = 0 # used in loop control, reset to 0 after every step + train_step_kwargs.update({"policy_step_control": step_control, "value_step_control": step_control}) + self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) + + for step, inputs in enumerate(epoch_iterator): + # self.timers and self.timers("read-data").stop() + os.environ["TRAINER_GLOBAL_STEP"] = str(self.state.global_step) + self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) + # epoch, step and steps_in_epoch only mostly used in train_step by + # `self.state.epoch = epoch + (step + 1) / steps_in_epoch` if not + # resume data + train_step_kwargs.update({"epoch": epoch, "step": step}) + rl_batch, ptx_batch = inputs + # TODO(guosheng): make rl_step/ptx_step run with autocast_smart_context_manager + rl_info, train_step_kwargs = self.rl_step(rl_batch, **train_step_kwargs) + paddle.device.cuda.empty_cache() + if self.use_ptx: + ptx_info, train_step_kwargs = self.ptx_step(ptx_batch, **train_step_kwargs) + rl_info.update(ptx_info) + paddle.device.cuda.empty_cache() + + self.state.global_step = self.value_trainer.state.global_step + self.state.epoch = self.value_trainer.state.epoch + if train_step_kwargs["value_step_control"] == 0: + # NOTE: PipelineParallel only returns a accumulated loss after + # accumulated steps, which is a mixed loss of ppo-loss and + # ptx-loss. We hack PipelineParallel._forward_step to record + # loss metrics and postprocess the recorded losses here. + # Maybe better to make the last_stage worker log to reduce + # comm and for simplicity. + if isinstance(policy_model, fleet.model.PipelineParallel): + with paddle.no_grad(): + # TODO(guosheng): maybe move this to model_pp.py and + # using interface here is better + # interleave betweeen ppo-loss and ptx-loss + if policy_model.is_pipeline_last_stage(): + # loss is 0D tensor, use stack rather than concat + mix_loss = paddle.stack(policy_model._step_losses) + policy_model._step_losses = None + else: + # The tessor shape is not policy_model.accumulate_steps + # (args.accu_steps) but policy_trainer.args.accu_steps, + # since policy_model is created with global pp_config + # using global args.accu_steps which is only half of + # policy_trainer.args.accu_steps, and indeed trainer hack + # model.accumulate_steps in training_pipeline_step to use + # trainer.args.accu_steps. The dtype is fp32(to be check), + # thus no need to broadcast. + mix_loss = paddle.empty( + shape=[self.policy_trainer.args.gradient_accumulation_steps], dtype=paddle.float32 + ) + paddle.distributed.broadcast( + mix_loss, src=policy_model.pp_group.ranks[-1], group=policy_model.pp_group + ) + real_actor_loss = mix_loss[0::2].mean() + real_ptx_loss = mix_loss[1::2].mean() + rl_info.update({"train/actor_loss": real_actor_loss, "train/ptx_loss": real_ptx_loss}) + # on_step_end + self.control = self.callback_handler.on_step_end(args, self.state, self.control) + else: + # on_sub_step_end + self.control = self.callback_handler.on_substep_end(args, self.state, self.control) + self._maybe_log_save_evaluate(rl_info, None, epoch, ignore_keys_for_eval, inputs=inputs) + + if step < 0: + logger.warning( + f"There seems to be not a single sample in your epoch_iterator, stopping training at step" + f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" + f" num_steps ({self.state.max_steps}) higher than the number of available samples." + ) + self.control.should_training_stop = True + + self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) + # argument model is not used in _maybe_log_save_evaluate, thus use None + self._maybe_log_save_evaluate(rl_info, None, epoch, ignore_keys_for_eval, inputs=inputs) + + if self.control.should_training_stop: + break + # TODO(guosheng): add epilogue of training + + def _maybe_log_save_evaluate(self, tr_loss, model, epoch, ignore_keys_for_eval, **kwargs): + if self.control.should_log: + + logs: Dict[str, float] = {} + + for k, v in tr_loss.items(): + if isinstance(v, paddle.Tensor) and "lr" not in k and "max_generated_length" not in k: + v_scalar = self._nested_gather(v).mean().item() + # TODO(guosheng): maybe should consider self._enable_delay_scale_loss() + # and maybe should merge with loss postprocess in PP + if "train/actor_loss" == k and "train/ptx_loss" in tr_loss: + # use_ptx would double the gradient_accumulation_steps + # which causes actor_loss and ptx_loss reduced by half + v_scalar = v_scalar * 2 + elif "train/ptx_loss" == k: + # similar to actor_loss and should double, additionally + # it should be divided by ptx_coeff for logging + v_scalar = v_scalar * 2 / self.ptx_coeff + logs[k] = round(v_scalar / (self.state.global_step - self._globalstep_last_logged), 8) + v.subtract_(v) + attr_name = "_total_" + k.split("/")[-1] + "_scalar" + attr_value = getattr(self, attr_name, 0) + setattr(self, attr_name, attr_value + v_scalar) + elif "max_generated_length" in k: + v_scalar = self._nested_gather(v).max().item() + logs[k] = v_scalar + else: + logs[k] = float("{0:.3e}".format(v)) + logs["global_step"] = int(self.state.global_step) + + total_train_batch_size = ( + self.args.train_batch_size * self.args.gradient_accumulation_steps * self.args.dataset_world_size + ) + num_steps = self.state.global_step - self._globalstep_last_logged + logs.update( + speed_metrics( + "interval", + self._globalstep_last_start_time, + num_samples=total_train_batch_size * num_steps, + num_steps=num_steps, + ) + ) + + self._globalstep_last_logged = self.state.global_step + self._globalstep_last_start_time = time.time() + + self.log(logs, **kwargs) + + # To trigger evaluation and save but avoid log again + with guard_set_args(self.control, {"should_log": False}): + super()._maybe_log_save_evaluate(tr_loss, model, epoch, ignore_keys_for_eval) + + def add_kl_divergence_regularization( + self, + prompt: paddle.Tensor, # size = (B, S) # pylint: disable=unused-argument + log_probs: paddle.Tensor, # size = (B, L) + ref_log_probs: paddle.Tensor, # size = (B, L) + reward_score: paddle.Tensor, # size = (B,) + sequence_mask: paddle.Tensor, # size = (B, L) + ) -> paddle.Tensor: + kl_divergence_estimate = -self.kl_coeff * (log_probs - ref_log_probs) # size = (B, L) + rewards = kl_divergence_estimate # size = (B, L) + reward_clip = paddle.clip( # size = (B,) + reward_score, + min=-self.clip_range_score, + max=self.clip_range_score, + ) + batch_size = log_probs.shape[0] + for i in range(batch_size): + end_index = sequence_mask[i].nonzero()[-1] + # rewards[i, end_index] += reward_clip[i] + rewards[i, end_index] = rewards[i, end_index] + reward_clip[i] + + return rewards + + def get_advantages_and_returns( + self, + values: paddle.Tensor, + rewards: paddle.Tensor, + sequence_mask: paddle.Tensor, + start: int, + ) -> Tuple[paddle.Tensor, paddle.Tensor]: + """Compute advantages and returns using Generalized Advantage Estimation (GAE).""" + # Modified from https://github.com/CarperAI/trlx/blob/main/trlx/models/modeling_ppo.py + last_gae_lambda = 0.0 + advantages_reversed = [] + values = values * sequence_mask + rewards = rewards * sequence_mask + length = rewards.shape[-1] + for t in reversed(range(start, length)): # pylint: disable=invalid-name + next_values = values[:, t + 1] if t < length - 1 else 0.0 + delta = rewards[:, t] + self.gamma * next_values - values[:, t] + last_gae_lambda = delta + self.gamma * self.gae_lambda * last_gae_lambda + advantages_reversed.append(last_gae_lambda) + advantages = paddle.stack(advantages_reversed[::-1], axis=1) + returns = advantages + values[:, start:] + return advantages.detach(), returns + + def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: + prompt = rl_batch["prompt"] + old_log_probs = rl_batch["log_probs"] + ref_log_probs = rl_batch["ref_log_probs"] + rewards = rl_batch["rewards"] + old_reward_values = rl_batch["reward_values"] + input_ids = rl_batch["input_ids"] + attention_mask = rl_batch["attention_mask"] + + start = prompt.shape[-1] - 1 + sequence_mask = attention_mask[:, 1:] + + with paddle.no_grad(): + # maybe these two can also be put into rollout + old_rewards = self.add_kl_divergence_regularization( + prompt, + old_log_probs, + ref_log_probs, + rewards, + sequence_mask, + ) + reward_advantages, reward_returns = self.get_advantages_and_returns( + old_reward_values, + old_rewards, + sequence_mask, + start, + ) + # metric + kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask)[:, start:].sum(axis=-1).mean() + mean_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).mean() + max_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).max() + rewards = rewards.mean() + # trainer inputs + old_log_probs = old_log_probs[:, start:] + old_reward_values = old_reward_values[:, start:] + sequence_mask = sequence_mask[:, start:] + + policy_trainer_inputs = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "old_log_probs": old_log_probs, + "reward_advantages": reward_advantages, + "sequence_mask": sequence_mask, + # "start": start, + # "use_cache": False, + # "return_dict": True, + } + kwargs = self.policy_trainer.full_training_step(policy_trainer_inputs, **kwargs) + + value_trainer_inputs = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "old_reward_values": old_reward_values, + "reward_returns": reward_returns, + "sequence_mask": sequence_mask, + # "start": start, + # "use_cache": False, + # "return_dict": True, + } + kwargs = self.value_trainer.full_training_step(value_trainer_inputs, **kwargs) + + return { + "train/actor_loss": kwargs["actor_loss"], + "train/reward_critic_loss": kwargs["reward_critic_loss"], + "train/reward": rewards, + "train/kl_divergence": kl_divergence, + "train/mean_generated_length": mean_generated_length, + "train/max_generated_length": max_generated_length, + "train/actor_lr": self.policy_trainer._get_learning_rate(), + "train/reward_critic_lr": self.value_trainer._get_learning_rate(), + }, kwargs + + def ptx_step(self, ptx_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: + """Perform a single update step with PTX loss.""" + kwargs = self.policy_trainer.full_training_step(ptx_batch, **kwargs) + return {"train/ptx_loss": kwargs["ptx_loss"]}, kwargs + + def split_ptx_micro_batches( + self, + ptx_batch: Dict[str, paddle.Tensor], + ) -> List[Dict[str, paddle.Tensor]]: + """Split a batch of PTX samples into micro-batches.""" + micro_batches = [] + total_batch_size = ptx_batch["input_ids"].shape[0] + micro_batch_size = self.args.per_device_train_batch_size + for i in range(0, total_batch_size, micro_batch_size): + micro_batch = map_structure( + # pylint: disable-next=cell-var-from-loop + lambda tensor: tensor[i : i + micro_batch_size], # noqa: B023 + ptx_batch, + ) + micro_batches.append(micro_batch) + return micro_batches + + def split_rl_micro_batches( + self, + prompt_only_batch: PromptOnlyBatch, + ) -> List[PromptOnlyBatch]: + """Split a batch of RL samples into micro-batches.""" + total_batch_size = prompt_only_batch["input_ids"].shape[0] + micro_batch_size = self.args.per_device_train_batch_size + micro_batches = [] + for i in range(0, total_batch_size, micro_batch_size): + micro_batch = {} + micro_batch = map_structure( + lambda tensor: tensor[i : i + micro_batch_size], + prompt_only_batch, + ) + micro_batches.extend(self.rollout(micro_batch)) + return micro_batches + + @paddle.no_grad() + def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: + """Rollout a batch of experiences.""" + input_ids = prompt_only_batch["input_ids"] + # NOTE: generation output of paddlenlp do not contain prompt, we should + # change sequences here. + # sequences = self.actor_model.generate( + # input_ids=input_ids, + # attention_mask=prompt_only_batch["attention_mask"], + # generation_config=self.generation_config, + # synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, + # )[0] + # sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) + sequences = [self.load_sing_gen_data(as_batches=False)["input_ids"]] + + return [ + self.post_rollout( + input_ids, + seq, + attention_mask=paddle.logical_and( + seq != self.tokenizer.pad_token_id, + seq != self.tokenizer.unk_token_id, + ), + ) + for seq in sequences + ] + + @paddle.no_grad() + def post_rollout( + self, + prompt: paddle.Tensor, + sequence: paddle.Tensor, + attention_mask: paddle.Tensor, + ) -> Dict[str, Any]: + if self.reward_tokenizer is not self.tokenizer: + reward_tokenize_output = batch_retokenize( + sequence, + src_tokenizer=self.tokenizer, + dest_tokenizer=self.reward_tokenizer, + skip_special_tokens=True, + ) + reward_seq = reward_tokenize_output["input_ids"] + reward_attention_mask = reward_tokenize_output["attention_mask"] + else: + # for text in self.tokenizer.batch_decode( + # sequence, + # skip_special_tokens=True + # ): + # print(text) + reward_seq = sequence + reward_attention_mask = attention_mask + + # pipe model outputs a logits tensor with LMHead, while non-pipe model + # outputs a tuple with logits tensor as the only one element. + logits = self.actor_model( + sequence, + attention_mask=attention_mask, + # return_dict=True, + ) # .logits + if not isinstance(logits, paddle.Tensor): + logits = logits[0] + ref_logits = self.reference_model( + sequence, + attention_mask=attention_mask, + # return_dict=True, + ) # .logits + if not isinstance(ref_logits, paddle.Tensor): + ref_logits = ref_logits[0] + + reward_score = self.reward_model( + reward_seq, + attention_mask=reward_attention_mask, + # return_dict=True, + )[ + 1 + ] # .end_scores + reward_value = self.reward_critic_model( + sequence, + attention_mask=attention_mask, + # return_dict=True, + )[ + 0 + ] # .scores + # TODO(guosheng): move these to model methods such as get_logprobs + reward_score = reward_score.squeeze(axis=-1) + reward_value = reward_value.squeeze(axis=-1)[:, :-1] + log_probs = gather_log_probabilities(logits[:, :-1], sequence[:, 1:]) + ref_log_probs = gather_log_probabilities(ref_logits[:, :-1], sequence[:, 1:]) + return { + "prompt": prompt, + "log_probs": log_probs, + "ref_log_probs": ref_log_probs, + "rewards": reward_score, + "reward_values": reward_value, + "input_ids": sequence, + "attention_mask": attention_mask, + } + + # @paddle.no_grad() + # def post_rollout( + # self, + # prompt: paddle.Tensor, + # sequence: paddle.Tensor, + # attention_mask: paddle.Tensor, + # ) -> Dict[str, Any]: + # if self.reward_tokenizer is not self.tokenizer: + # reward_tokenize_output = batch_retokenize diff --git a/examples/RLHF/ppo_config.json b/examples/RLHF/ppo_config.json index d15331443608..2b88f31a007c 100644 --- a/examples/RLHF/ppo_config.json +++ b/examples/RLHF/ppo_config.json @@ -4,7 +4,9 @@ "ptx_datasets": "alpaca", "actor_model_name_or_path": "PKU-Alignment/alpaca-7b-reproduced", "reward_model_name_or_path": "PKU-Alignment/beaver-7b-v1.0-reward", - "output_dir": "./checkpoints/ppo", + "_actor_model_name_or_path": "facebook/llama-7b", + "_reward_model_name_or_path": "facebook/llama-7b", + "output_dir": "/root/paddlejob/workspace/guosheng/checkpoints/ppo-sd14pp2-test", "max_length": 512, "temperature": 1.0, "num_return_sequences":1, @@ -34,7 +36,7 @@ "logging_steps": 1, "evaluation_strategy": "steps", "eval_steps": 100, - "save_strategy": "steps", + "save_strategy": "epoch", "save_steps": 100000, "bf16": true, "fp16_opt_level": "O2", @@ -42,8 +44,11 @@ "do_eval": true, "disable_tqdm": true, "save_total_limit": 1, - "sharding_parallel_degree": 4, - "sharding": "stage3", + "sharding_parallel_degree": 2, + "sharding": "stage1", + "tensor_parallel_degree": 2, + "pipeline_parallel_degree": 2, + "pipeline_parallel_config": "disable_p2p_cache_shape", "comment-PKU_Beaver-max_grad_norm": 1.0, "max_grad_norm": 1.0, "adam_beta1": 0.9, diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index f517288a8679..05829e7d57ff 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -19,18 +19,13 @@ import paddle from data import PromptOnlyDataset, SupervisedDataset, parse_dataset -from models import AutoModelForScore -from ppo_trainer import PPOTrainer +from new_ppo_trainer import PPOTrainer from paddlenlp.trainer import PdArgumentParser, TrainingArguments, get_last_checkpoint -from paddlenlp.transformers import ( - AutoConfig, - AutoModelForCausalLM, - AutoTokenizer, - LlamaTokenizer, -) +from paddlenlp.transformers import AutoConfig, AutoTokenizer, LlamaTokenizer from paddlenlp.utils.log import logger + @dataclass class TrainingArguments(TrainingArguments): kl_coeff: float = field( @@ -220,59 +215,67 @@ def main(): training_args.max_length = data_args.max_length if training_args.pipeline_parallel_degree > 1: - raise ValueError("Not support pipeline parallel mode.") + global AutoModelForCausalLM, AutoModelForScore + from models.model_pp import LlamaPolicyPipe, LlamaValuePipe + + AutoModelForCausalLM = LlamaPolicyPipe + AutoModelForScore = LlamaValuePipe else: - # actor model - model_config = AutoConfig.from_pretrained( - model_args.actor_model_name_or_path, - tensor_parallel_output=False, - tensor_parallel_degree=training_args.tensor_parallel_degree, - tensor_parallel_rank=training_args.tensor_parallel_rank, - dtype=dtype, - ) - if hasattr(model_config, "use_flash_attention"): - model_config.use_flash_attention = model_args.use_flash_attention - actor_model = AutoModelForCausalLM.from_pretrained( - model_args.actor_model_name_or_path, - config=model_config, - ) - # reference model - actor_reference_model = AutoModelForCausalLM.from_pretrained( - model_args.actor_model_name_or_path, - config=model_config, - ) - actor_tokenizer = AutoTokenizer.from_pretrained( - model_args.actor_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" - ) + from models import AutoModelForScore - # reward model - model_config = AutoConfig.from_pretrained( - model_args.reward_model_name_or_path, - tensor_parallel_output=False, - tensor_parallel_degree=training_args.tensor_parallel_degree, - tensor_parallel_rank=training_args.tensor_parallel_rank, - dtype=dtype, - ) - if hasattr(model_config, "use_flash_attention"): - model_config.use_flash_attention = model_args.use_flash_attention - reward_model = AutoModelForScore.from_pretrained( - model_args.reward_model_name_or_path, - config=model_config, - score_type="reward", - do_normalize=training_args.normalize_reward, - ) - reward_tokenizer = AutoTokenizer.from_pretrained( - model_args.reward_model_name_or_path, model_max_length=data_args.max_length, padding_side="right" - ) - # critic model - if model_args.reward_critic_model_name_or_path is None: - model_args.reward_critic_model_name_or_path = model_args.reward_model_name_or_path - reward_critic_model = AutoModelForScore.from_pretrained( - model_args.reward_critic_model_name_or_path, config=model_config, score_type="critic", do_normalize=False - ) - reward_critic_tokenizer = AutoTokenizer.from_pretrained( - model_args.reward_critic_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" - ) + from paddlenlp.transformers import AutoModelForCausalLM + + # actor model + model_config = AutoConfig.from_pretrained( + model_args.actor_model_name_or_path, + tensor_parallel_output=False, + tensor_parallel_degree=training_args.tensor_parallel_degree, + tensor_parallel_rank=training_args.tensor_parallel_rank, + dtype=dtype, + ) + if hasattr(model_config, "use_flash_attention"): + model_config.use_flash_attention = model_args.use_flash_attention + actor_model = AutoModelForCausalLM.from_pretrained( + model_args.actor_model_name_or_path, + config=model_config, + ) + # reference model + actor_reference_model = AutoModelForCausalLM.from_pretrained( + model_args.actor_model_name_or_path, + config=model_config, + ) + actor_tokenizer = AutoTokenizer.from_pretrained( + model_args.actor_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" + ) + + # reward model + model_config = AutoConfig.from_pretrained( + model_args.reward_model_name_or_path, + tensor_parallel_output=False, + tensor_parallel_degree=training_args.tensor_parallel_degree, + tensor_parallel_rank=training_args.tensor_parallel_rank, + dtype=dtype, + ) + if hasattr(model_config, "use_flash_attention"): + model_config.use_flash_attention = model_args.use_flash_attention + reward_model = AutoModelForScore.from_pretrained( + model_args.reward_model_name_or_path, + config=model_config, + score_type="reward", + do_normalize=training_args.normalize_reward, + ) + reward_tokenizer = AutoTokenizer.from_pretrained( + model_args.reward_model_name_or_path, model_max_length=data_args.max_length, padding_side="right" + ) + # critic model + if model_args.reward_critic_model_name_or_path is None: + model_args.reward_critic_model_name_or_path = model_args.reward_model_name_or_path + reward_critic_model = AutoModelForScore.from_pretrained( + model_args.reward_critic_model_name_or_path, config=model_config, score_type="critic", do_normalize=False + ) + reward_critic_tokenizer = AutoTokenizer.from_pretrained( + model_args.reward_critic_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" + ) for tokenizer in [actor_tokenizer, reward_tokenizer, reward_critic_tokenizer]: if isinstance(tokenizer, LlamaTokenizer) and tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id diff --git a/paddlenlp/transformers/llama/modeling.py b/paddlenlp/transformers/llama/modeling.py index ebe8ff213d4b..0dfc6b0ca009 100644 --- a/paddlenlp/transformers/llama/modeling.py +++ b/paddlenlp/transformers/llama/modeling.py @@ -1504,7 +1504,11 @@ def __init__(self, config): super(LlamaPretrainingCriterion, self).__init__() self.ignore_index = getattr(config, "ignore_index", -100) self.config = config - self.enable_parallel_cross_entropy = config.tensor_parallel_degree > 1 and config.tensor_parallel_output + self.enable_parallel_cross_entropy = ( + config.tensor_parallel_degree > 1 + and config.vocab_size % config.tensor_parallel_degree == 0 + and config.tensor_parallel_output + ) if self.enable_parallel_cross_entropy: # and False: # and lm_head is distributed self.loss_func = mpu.ParallelCrossEntropy(ignore_index=self.ignore_index) diff --git a/paddlenlp/transformers/llama/modeling_pp.py b/paddlenlp/transformers/llama/modeling_pp.py index 73600aa6b420..362b8f4c6cb1 100644 --- a/paddlenlp/transformers/llama/modeling_pp.py +++ b/paddlenlp/transformers/llama/modeling_pp.py @@ -95,7 +95,7 @@ def __init__(self, config): self.config = config self.sequence_parallel = config.sequence_parallel self.hidden_size = config.hidden_size - if config.tensor_parallel_degree > 1: + if config.tensor_parallel_degree > 1 and config.vocab_size % config.tensor_parallel_degree == 0: self.embed_tokens = fleet.meta_parallel.VocabParallelEmbedding( config.vocab_size, config.hidden_size, @@ -245,7 +245,7 @@ def get_hcg(): f"llama.layers.{i}", ) self.add_sequential_layer(LayerDesc(LlamaRMSNormPipe, config=config), "llama") - self.add_sequential_layer(LayerDesc(LlamaLMHead, config=config), "lm_head") + self.add_head(config) recompute_interval = 0 @@ -256,7 +256,7 @@ def get_hcg(): PipelineLayer.__init__( self, layers=self.get_sequential_layers(), - loss_fn=LlamaPretrainingCriterion(config), + loss_fn=self.get_loss_fn(config), topology=get_hcg().topology(), seg_method=seg_method, recompute_interval=recompute_interval, @@ -271,3 +271,9 @@ def get_hcg(): self.apply(self._init_weights) # DON'T init PipelinePretrainedModel # PipelinePretrainedModel.__init__(self.super(), config=config) + + def add_head(self, config): + self.add_sequential_layer(LayerDesc(LlamaLMHead, config=config), "lm_head") + + def get_loss_fn(self, config): + return LlamaPretrainingCriterion(config) diff --git a/paddlenlp/transformers/model_utils.py b/paddlenlp/transformers/model_utils.py index 72e46e08b202..79f28ca65e6d 100644 --- a/paddlenlp/transformers/model_utils.py +++ b/paddlenlp/transformers/model_utils.py @@ -2515,7 +2515,8 @@ def _set_pipeline_name_mapping(self, mappings=None): idx = name_splited[0] # for normal pp layer if idx.isdigit(): - single_name = [prefixes[idx]] + # allow empty prefix + single_name = [] if prefixes[idx] == "" else [prefixes[idx]] single_name.extend(name_splited[1:]) elif idx == "shared_layers": single_name = [self.get_shardlayer_prefix(name_splited)] From 2f2ad5e560b9bead21b610bdf68d64e0bd30795a Mon Sep 17 00:00:00 2001 From: whucsgs Date: Fri, 2 Feb 2024 08:08:24 +0000 Subject: [PATCH 02/46] Move new_ppo_trainer.py to ppo_trainer.py --- examples/RLHF/new_ppo_trainer.py | 1805 ------------------------------ examples/RLHF/ppo_main.py | 2 +- examples/RLHF/ppo_trainer.py | 322 +++++- 3 files changed, 270 insertions(+), 1859 deletions(-) delete mode 100644 examples/RLHF/new_ppo_trainer.py diff --git a/examples/RLHF/new_ppo_trainer.py b/examples/RLHF/new_ppo_trainer.py deleted file mode 100644 index d72737f3db17..000000000000 --- a/examples/RLHF/new_ppo_trainer.py +++ /dev/null @@ -1,1805 +0,0 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import itertools -import math -import os -import time -import types -from contextlib import contextmanager -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -import numpy as np -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -import tqdm -from data import DummyDataset, PromptOnlyBatch -from paddle.distributed import fleet -from paddle.io import DataLoader, Dataset, DistributedBatchSampler -from paddle.utils import map_structure -from rich.console import Console -from rich.table import Table - -from paddlenlp.data import DataCollator -from paddlenlp.generation import GenerationConfig -from paddlenlp.generation.utils import GenerationMixin -from paddlenlp.trainer.trainer import ( - TRAINER_STATE_NAME, - EvalLoopOutput, - EvalPrediction, - HybridParallelOptimizer, - NlpDistributedBatchSampler, - ShardingOption, - Trainer, - TrainerCallback, - TrainerControl, - TrainerState, - TrainingArguments, - _obtain_optimizer_parameters_list, - distributed_file, - distributed_isfile, - fused_allreduce_gradients, - logger, - reshard_util, - speed_metrics, - split_inputs_sequence_dim, -) -from paddlenlp.transformers import BatchEncoding, PretrainedModel, PretrainedTokenizer -from paddlenlp.transformers.configuration_utils import PretrainedConfig -from paddlenlp.transformers.model_outputs import ModelOutput -from paddlenlp.transformers.tokenizer_utils_base import ( - PaddingStrategy, - TruncationStrategy, -) - - -def batch_retokenize( - input_ids: paddle.Tensor, - src_tokenizer: PretrainedTokenizer, - dest_tokenizer: PretrainedTokenizer, - *, - padding: bool | str | PaddingStrategy = PaddingStrategy.LONGEST, - truncation: bool | str | TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, - skip_special_tokens: bool = True, -) -> BatchEncoding: - """Re-tokenize a batch of input ids from one tokenizer to another.""" - output = dest_tokenizer( - [ - text + dest_tokenizer.eos_token - for text in src_tokenizer.batch_decode( - input_ids, - skip_special_tokens=skip_special_tokens, - ) - ], - padding=padding, - truncation=truncation, - return_tensors="pd", - ) - return output - - -def gather_log_probabilities(logits: paddle.Tensor, labels: paddle.Tensor) -> paddle.Tensor: - """Gather log probabilities of the given labels from the logits.""" - log_probs = F.log_softmax(logits, axis=-1) - log_probs_labels = paddle.take_along_axis(log_probs, axis=-1, indices=labels.unsqueeze(axis=-1)) - return log_probs_labels.squeeze(axis=-1) - - -def init_train_model_opt( - self: Trainer, max_steps: int, resume_from_checkpoint: bool = False, clear_master_weight: bool = False -) -> PretrainedModel: - # Copy of model/optimizer init and resuming related code in `Trainer.train`. - # NOTE: this `_load_from_checkpoint` is indeed to load model states in the - # following elif-else branches, though they are apart away in `Trainer.train`. - if not self.args.should_load_sharding_stage1_model: - self._load_from_checkpoint(resume_from_checkpoint) - - # delay_optimizer_creation = ( - # self.sharding is not None - # and ShardingOption.SHARD_OP in self.args.sharding - # ) - delay_optimizer_creation = False - - if not delay_optimizer_creation: - self.create_optimizer_and_scheduler(num_training_steps=max_steps) - - if self.args.should_load_sharding_stage1_model: - model = self._wrap_model_and_load_sharded_checkpoint(resume_from_checkpoint) - elif self.args.should_save_sharding_stage1_model: - # In the non-sharded mode, should invoke _load_from_checkpoint before _wrap_model. - # In this mode, the rank0 load all params and the _wrap_model implicitly broadcast params from rank0 to the other ranks. - model = self._wrap_model(self.model_wrapped) - if self.sharding_io is not None: - assert delay_optimizer_creation is False, "delay_optimizer_creation should be False" - # the self.optimizer should be wrapped and it is done in _wrap_model - self.sharding_io.set_optimizer(self.optimizer) - # for the rest of this function `model` is the outside model, whether it was wrapped or not - if model is not self.model: - self.model_wrapped = model - if delay_optimizer_creation: - self.create_optimizer_and_scheduler(num_training_steps=max_steps) - self._load_optimizer_and_scheduler(resume_from_checkpoint) - else: - model = self._wrap_model(self.model_wrapped) - # for the rest of this function `model` is the outside model, whether it was wrapped or not - if model is not self.model: - self.model_wrapped = model - if delay_optimizer_creation: - self.create_optimizer_and_scheduler(num_training_steps=max_steps) - self._load_optimizer_and_scheduler(resume_from_checkpoint) - - if ShardingOption.FULL_SHARD in self.args.sharding and clear_master_weight: - # for inference model to use Trainer sharding stage3, clear master_weight - # which is created in GroupShardedStage3.__init__ - self.optimizer._master_weights = None - - if self.args.device == "npu" and self.args.flatten_param_grads: - from .plugins.npu_plugin import npu_accelerate_plugin - - npu_accelerate_plugin(self.optimizer) - - return model - - -def init_train_state( - self: Trainer, - resume_from_checkpoint: bool, - train_dataloader: DataLoader, - max_steps: int, - num_train_epochs: int, - num_update_steps_per_epoch: int, -): - args = self.args - - self.state = TrainerState() - self.state.epoch = 0 - epochs_trained = 0 - steps_trained_in_current_epoch = 0 - steps_trained_progress_bar = None - - # Check if continuing training from a checkpoint - if resume_from_checkpoint is not None and distributed_isfile( - os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) - ): - self.state = TrainerState.load_from_json( - distributed_file(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) - ) - epochs_trained = self.state.global_step // num_update_steps_per_epoch - if not args.ignore_data_skip: - steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) - steps_trained_in_current_epoch *= args.gradient_accumulation_steps - else: - steps_trained_in_current_epoch = 0 - - logger.info(" Continuing training from checkpoint, will skip to saved global_step") - logger.info(f" Continuing training from epoch {epochs_trained}") - logger.info(f" Continuing training from global step {self.state.global_step}") - if not args.ignore_data_skip: - logger.info( - f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} " - "batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` " - "flag to your launch command, but you will resume the training on data already seen by your model." - ) - if self.is_local_process_zero() and not args.disable_tqdm: - steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch) - steps_trained_progress_bar.set_description("Skipping the first batches") - if not args.ignore_data_skip: - if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( - train_dataloader.batch_sampler, NlpDistributedBatchSampler - ): - consumed_samples = ( - self.state.global_step - * args.train_batch_size - * args.gradient_accumulation_steps - * args.dataset_world_size - ) - train_dataloader.batch_sampler.set_epoch(consumed_samples=consumed_samples) - logger.info(f"Set DistributedBatchSampler consumed_samples to {consumed_samples}") - - self.state.max_steps = int(max_steps) - self.state.num_train_epochs = num_train_epochs - self.state.is_local_process_zero = self.is_local_process_zero() - self.state.is_world_process_zero = self.is_world_process_zero() - - return epochs_trained, steps_trained_in_current_epoch, steps_trained_progress_bar - - -def init_train_log( - self: Trainer, - num_examples: int, - num_train_epochs: int, - total_train_batch_size: int, - max_steps: int, - num_train_samples: int, - model: PretrainedModel, -): - args = self.args - - logger.info("***** Running training *****") - logger.info(f" Num examples = {num_examples:,}") - logger.info(f" Num Epochs = {num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {max_steps:,}") - logger.info(f" Total num train samples = {num_train_samples:,}") - # per_device_trainable_numel = sum(p.numel().item() for p in model.parameters() if not p.stop_gradient) - # TODO: Temporary fix since Tensor.numel() not supported in distributed mode - per_device_trainable_numel = sum(np.prod(p.shape) for p in model.parameters() if not p.stop_gradient) - logger.info(f" Number of trainable parameters = {per_device_trainable_numel:,} (per device)") - if self.args.use_hybrid_parallel: - # todo fix for pipeline_parallel_degree - parts_num = max(self.args.tensor_parallel_degree, 1) * max(self.args.pipeline_parallel_degree, 1) - if parts_num > 1: - all_reduce_dtype = "int64" - if paddle.get_device().split(":")[0] in ["npu", "xpu"]: - # TODO(duanyanhui): fix when NPU all_reduce supports int64 - all_reduce_dtype = "float32" - trainable_numel_tensor = paddle.to_tensor(per_device_trainable_numel, dtype=all_reduce_dtype) - paddle.distributed.all_reduce(trainable_numel_tensor) - trainable_numel = int(trainable_numel_tensor.item()) // self.args.dataset_world_size - # the numel is roughly, because the tensor parallel still hold own bias or layer_norm weight without splited - # so, the trainable numel is a little bigger than real. - logger.info(f" Number of trainable parameters = {trainable_numel:,} (all devices, roughly)") - - -def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): - """ - Just a copy of single training step complete code in Trainer.train while loop - which including forward+backward+step, while wraps the inputs and outputs to - make the complicated copied code no need to change. Maybe a better way is to - add fine-grained methods including these steps to Trainer which is similar to - DeepSpeed engine. - """ - - # TODO(guosheng): step, steps_trained_in_current_epoch and steps_trained_progress_bar - # should use reference since they would be overwrite. - # for state update - epoch = kwargs.get("epoch", 0) - step = kwargs.get("step", 0) - steps_in_epoch = kwargs.get("steps_in_epoch", 0) - step_control = kwargs.get("step_control", 0) - # for step and progress update when resuming data - train_dataloader = kwargs.get("train_dataloader", None) - resume_from_checkpoint = kwargs.get("resume_from_checkpoint", None) - steps_trained_in_current_epoch = kwargs.get("steps_trained_in_current_epoch", 0) - steps_trained_progress_bar = kwargs.get("steps_trained_progress_bar", None) - # for eval output ignore to gather - ignore_keys_for_eval = kwargs.get("ignore_keys_for_eval", None) - tr_loss = kwargs.get("tr_loss", 0.0) - model = kwargs.get("model", self.model_wrapped) - - args = self.args - - if self.args.use_hybrid_parallel and self.args.sep_parallel_degree > 1: - inputs = split_inputs_sequence_dim(inputs) - self.timers and self.timers("read-data").stop() - os.environ["TRAINER_GLOBAL_STEP"] = str(self.state.global_step) - self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) - - # Skip past any already trained steps if resuming training - # for paddlenlp.utils.batch_sampler.DistributedBatchSampler - # We use consumed_samples to reset the status - if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( - train_dataloader.batch_sampler, NlpDistributedBatchSampler - ): - if step == 0: - if steps_trained_progress_bar is not None: - steps_trained_progress_bar.update(steps_trained_in_current_epoch) - steps_trained_progress_bar.close() - steps_trained_progress_bar = None - self._load_rng_state(resume_from_checkpoint) - step += steps_trained_in_current_epoch - elif steps_trained_in_current_epoch > 0: - steps_trained_in_current_epoch -= 1 - if steps_trained_progress_bar is not None: - steps_trained_progress_bar.update(1) - if steps_trained_in_current_epoch == 0: - self._load_rng_state(resume_from_checkpoint) - # continue - final_local_vars = locals() - for k in kwargs.keys(): - if k in final_local_vars: - kwargs[k] = final_local_vars[k] - return kwargs - elif steps_trained_progress_bar is not None: - steps_trained_progress_bar.close() - steps_trained_progress_bar = None - - if step_control % args.gradient_accumulation_steps == 0: - self.control = self.callback_handler.on_step_begin(args, self.state, self.control) - self.timers and self.timers("forward-backward").start() - - dp_enabled = self.args.data_parallel_degree > 1 if self.args.use_hybrid_parallel else args.local_rank != -1 - forbidden_no_sync = False - # stage2 and stage3 should not no_sync, because the is no DDP wrapper and no_sync API - # hybrid_parallel (tp or pp or sharding stage 1) should not no_sync - if self.args.use_hybrid_parallel: - forbidden_no_sync = True - - availiable_no_sync = dp_enabled and not forbidden_no_sync - - is_no_sync = ( - ((step_control + 1) % args.gradient_accumulation_steps != 0) - and availiable_no_sync - and args._no_sync_in_gradient_accumulation - ) or (args.recompute and availiable_no_sync) - # sharding - # stage1. the same as ddp - # stage2. manualy collect gradient on dp group - - dp_master_grad = self.args.world_size > 1 and self.args.amp_master_grad and not self.args.use_hybrid_parallel - if dp_master_grad: - is_no_sync = True - - if is_no_sync: - # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. - with model.no_sync(): - tr_loss_step = self.training_step(model, inputs) - else: - tr_loss_step = self.training_step(model, inputs) - - tr_loss += tr_loss_step - - if (step_control + 1) % args.gradient_accumulation_steps == 0 or ( - # last step in epoch but step is always smaller than gradient_accumulation_steps - steps_in_epoch <= args.gradient_accumulation_steps - and (step + 1) == steps_in_epoch - ): - if self.args.pipeline_parallel_degree <= 1 and self._enable_delay_scale_loss(): - tr_loss /= self.args.gradient_accumulation_steps - - self.timers and self.timers("forward-backward").stop() - # Maunally collect gradients - # Case 1: Use recompute and dp - # Case 2: Hack dp with master_grad - # Case 3: Pipeline or sharding overlap - # local_rank != -1 don't means dp in networks. - self.timers and self.timers("all-reduce").start() - - # Case 1: Use recompute and dp / sharding stage1, - # manualy collect gradient for dp. - if args.recompute and availiable_no_sync: - fused_allreduce_gradients(list(model.parameters()), None) - - # Case 2: hack dp with master_grad - if dp_master_grad and not (args.recompute and availiable_no_sync): - fused_allreduce_gradients(list(model.parameters()), None) - - # Pipeline parallel mode, handle gradient reduce here to overlap - pipeline_parallel_config = ( - set(args.pipeline_parallel_config.split(" ")) if args.pipeline_parallel_degree > 1 else set() - ) - enable_dp_comm_overlap = "enable_dp_comm_overlap" in pipeline_parallel_config - enable_release_grads = "enable_release_grads" in pipeline_parallel_config - - # Case 3: Pipeline parallel mode, overlap with dp - if isinstance(self.optimizer, HybridParallelOptimizer) and not self.do_grad_scaling: - parameters_list = _obtain_optimizer_parameters_list(self.optimizer._inner_opt) - - if not enable_dp_comm_overlap: - if self.optimizer._sharding_enable: - assert reshard_util.is_sharding_opt(self.optimizer) - self.optimizer._inner_opt.reduce_gradients(list(parameters_list), self.optimizer._hcg) - - if self.optimizer._dp_enable or getattr(self.optimizer, "_sep_enable", False): - fused_allreduce_gradients(list(parameters_list), self.optimizer._hcg) - - self.timers and self.timers("all-reduce").stop() - self.timers and self.timers("optimizer-step").start() - - if self.args.gradient_accumulation_steps > 1 and self._enable_delay_scale_loss(): - for p in model._layers.parameters(): - with paddle.no_grad(): - if hasattr(p, "main_grad") and p.main_grad is not None: - assert p.grad is None - p.main_grad.scale_(1.0 / self.args.gradient_accumulation_steps) - elif p.grad is not None: - p.grad.scale_(1.0 / self.args.gradient_accumulation_steps) - - # Optimizer step - self.callback_handler.on_optimizer_begin( - args, self.state, self.control, scaler=self.scaler if self.do_grad_scaling else None - ) - optimizer_was_run = True - if self.do_grad_scaling: - scale_before = paddle.assign(self.scaler._scale) - self.scaler.step(self.optimizer) - self.scaler.update() - scale_after = self.scaler._scale - optimizer_was_run = not self.scaler._cache_founf_inf - if not optimizer_was_run: - scale_before_value = scale_before.cpu().numpy() - scale_after_value = scale_after.cpu().numpy() - logger.warning( - f"optimizer not run, scale_before: {scale_before_value[0]}, scale_after: {scale_after_value[0]}" - ) - elif isinstance(self.optimizer, HybridParallelOptimizer): - self.optimizer._step(parameters_list) - else: - self.optimizer.step() - - self.timers and self.timers("optimizer-step").stop() - - if optimizer_was_run: - self.lr_scheduler.step() - - if enable_release_grads and args.pipeline_parallel_degree > 1: - self.optimizer.clear_grad(set_to_zero=False) - for _, buffers in model._chunk_2_comm_buffers.items(): - for buffer in buffers: - buffer._clear_grad_storage() - else: - self.optimizer.clear_grad() - - self.callback_handler.on_optimizer_end( - args, self.state, self.control, scaler=self.scaler if self.do_grad_scaling else None - ) - - self.state.global_step += 1 - self.state.epoch = epoch + (step + 1) / steps_in_epoch - self.control = self.callback_handler.on_step_end(args, self.state, self.control) - self._maybe_log_save_evaluate(tr_loss, model, epoch, ignore_keys_for_eval, inputs=inputs) - self._print_timer() - step_control = 0 - else: - self.control = self.callback_handler.on_substep_end(args, self.state, self.control) - step_control += 1 - - if self.control.should_epoch_stop or self.control.should_training_stop: - # break - final_local_vars = locals() - for k in kwargs.keys(): - if k in final_local_vars: - kwargs[k] = final_local_vars[k] - return kwargs - self.timers and self.timers("read-data").start() - - final_local_vars = locals() - for k in kwargs.keys(): - if k in final_local_vars: - kwargs[k] = final_local_vars[k] - return kwargs - - -Trainer.init_train_model_opt = init_train_model_opt -Trainer.init_train_log = init_train_log -Trainer.init_train_state = init_train_state -Trainer.full_training_step = full_training_step - - -class PolicyTrainer(Trainer): - def __init__( - self, - model: Union[PretrainedModel, nn.Layer] = None, - criterion: nn.Layer = None, - args: TrainingArguments = None, - data_collator: Optional[DataCollator] = None, - train_dataset: Optional[Dataset] = None, - eval_dataset: Union[Dataset, Dict[str, Dataset]] = None, - tokenizer: Optional[PretrainedTokenizer] = None, - compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, - callbacks: Optional[List[TrainerCallback]] = None, - optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), - preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, - ): - - super().__init__( - model, - criterion, - args, - data_collator, - train_dataset, - eval_dataset, - tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - - def actor_loss_fn( - self, - log_probs: paddle.Tensor, - old_log_probs: paddle.Tensor, - advantages: paddle.Tensor, - mask: paddle.Tensor, - ) -> paddle.Tensor: - # policy gradient loss - ratio = paddle.exp(log_probs - old_log_probs) - pg_loss1 = -advantages * ratio - pg_loss2 = -advantages * paddle.clip( - ratio, - 1.0 - self.clip_range_ratio, - 1.0 + self.clip_range_ratio, - ) - return paddle.sum(paddle.maximum(pg_loss1, pg_loss2) * mask) / mask.sum() - - def compute_loss(self, model, inputs, return_outputs=False): - """ - How the loss is computed by Trainer. By default, all models return the loss in the first element. - Subclass and override for custom behavior. - """ - labels = inputs.get("labels", None) - if labels is not None: - labels = inputs.get("labels", None) - outputs = model(**inputs) - ptx_loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] - ptx_loss = self.ptx_coeff * ptx_loss - return ptx_loss - - input_ids = inputs["input_ids"] - attention_mask = inputs["attention_mask"] - reward_advantages = inputs["reward_advantages"] - # NOTE: TensorParallel model requires non-Tensor inputs to be lists and - # broadcast them, thus do not or optionally use these inputs currently. - # use_cache = inputs["use_cache"] - # return_dict = inputs["return_dict"] - start = inputs.pop("start", None) - old_log_probs = inputs["old_log_probs"][:, start:] if start is not None else inputs["old_log_probs"] - sequence_mask = inputs["sequence_mask"][:, start:] if start is not None else inputs["sequence_mask"] - outputs = model( - input_ids=input_ids, - attention_mask=attention_mask, # use_cache=use_cache, return_dict=return_dict - ) - - logits = outputs["logits"] if isinstance(outputs, dict) else outputs - if isinstance(outputs, dict): - logits = outputs["logits"] - elif isinstance(outputs, tuple): - logits = outputs[0] - - log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:]) - actor_loss = self.actor_loss_fn( - log_probs[:, -old_log_probs.shape[1] :], - old_log_probs, - reward_advantages, - sequence_mask, - ) - - return actor_loss - - def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): - labels = inputs.get("labels", None) - if labels is not None: # use ptx - loss_name = "ptx_loss" - else: - loss_name = "actor_loss" - kwargs["model"] = kwargs.pop("policy_model") - kwargs["step_control"] = kwargs.pop("policy_step_control") - kwargs["tr_loss"] = kwargs.pop(loss_name) - kwargs = super().full_training_step(inputs, **kwargs) - kwargs["policy_model"] = kwargs.pop("model") - kwargs["policy_step_control"] = kwargs.pop("step_control") - kwargs[loss_name] = kwargs.pop("tr_loss") - return kwargs - - -class ValueTrainer(Trainer): - def __init__( - self, - model: Union[PretrainedModel, nn.Layer] = None, - criterion: nn.Layer = None, - args: TrainingArguments = None, - data_collator: Optional[DataCollator] = None, - train_dataset: Optional[Dataset] = None, - eval_dataset: Union[Dataset, Dict[str, Dataset]] = None, - tokenizer: Optional[PretrainedTokenizer] = None, - compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, - callbacks: Optional[List[TrainerCallback]] = None, - optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), - preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, - ): - - super().__init__( - model, - criterion, - args, - data_collator, - train_dataset, - eval_dataset, - tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - - def critic_loss_fn( - self, - values: paddle.Tensor, - old_values: paddle.Tensor, - returns: paddle.Tensor, - mask: paddle.Tensor, - ) -> paddle.Tensor: - """Compute critic loss.""" - # TODO(guosheng): use paddle.clip when its min/max can support more than - # 0D Tensor - values_clipped = paddle.minimum( - paddle.maximum(values, old_values - self.clip_range_value), old_values + self.clip_range_value - ) - vf_loss1 = paddle.square(values - returns) - vf_loss2 = paddle.square(values_clipped - returns) - return 0.5 * paddle.sum(paddle.maximum(vf_loss1, vf_loss2) * mask) / mask.sum() - - def compute_loss(self, model, inputs, return_outputs=False): - """ - How the loss is computed by Trainer. By default, all models return the loss in the first element. - Subclass and override for custom behavior. - """ - input_ids = inputs["input_ids"] - attention_mask = inputs["attention_mask"] - reward_returns = inputs["reward_returns"] - # NOTE: TensorParallel model requires non-Tensor inputs to be lists and - # broadcast them, thus do not or optionally use these inputs currently. - # use_cache = inputs["use_cache"] - # return_dict = inputs["return_dict"] - start = inputs.pop("start", None) - old_reward_values = ( - inputs["old_reward_values"][:, start:] if start is not None else inputs["old_reward_values"] - ) - sequence_mask = inputs["sequence_mask"][:, start:] if start is not None else inputs["sequence_mask"] - outputs = model( - input_ids=input_ids, - attention_mask=attention_mask, # use_cache=use_cache, return_dict=return_dict - ) - - # We don't use .loss here since the model may return tuples instead of ModelOutput. - reward_values = outputs["scores"] if isinstance(outputs, dict) else outputs - if isinstance(outputs, dict): - reward_values = outputs["scores"] - elif isinstance(outputs, tuple): - reward_values = outputs[0] - - reward_values = reward_values.squeeze(axis=-1)[:, :-1] - reward_critic_loss = self.critic_loss_fn( - reward_values[:, -old_reward_values.shape[1] :], - old_reward_values, - reward_returns, - sequence_mask, - ) - - return reward_critic_loss - - def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): - # TODO(guosheng): Make these training control vars mapping as class attr, - # then PPOTrainer can extract and reuse them to avoid hard code. - kwargs["model"] = kwargs.pop("value_model") - kwargs["step_control"] = kwargs.pop("value_step_control") - kwargs["tr_loss"] = kwargs.pop("reward_critic_loss") - kwargs = super().full_training_step(inputs, **kwargs) - kwargs["value_model"] = kwargs.pop("model") - kwargs["value_step_control"] = kwargs.pop("step_control") - kwargs["reward_critic_loss"] = kwargs.pop("tr_loss") - return kwargs - - -@contextmanager -def guard_set_args(args, arg_name_values): - for k, v in arg_name_values.items(): - old_value = getattr(args, k, None) - setattr(args, k, v) - arg_name_values[k] = old_value - yield - for k, v in arg_name_values.items(): - old_value = getattr(args, k) - setattr(args, k, v) - arg_name_values[k] = old_value - - -class MuteDefaultFlowCallback(TrainerCallback): - def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): - control.should_save = False - control.should_evaluate = False - control.should_log = False - return control - - -def is_same_tokenizer( - tokenizer: PretrainedTokenizer, - other_tokenizer: PretrainedTokenizer, -) -> bool: - """Check if two tokenizers are the same.""" - return tokenizer is other_tokenizer or ( - tokenizer.__class__ == other_tokenizer.__class__ and tokenizer.get_vocab() == other_tokenizer.get_vocab() - ) - - -class PipeEvalModel(GenerationMixin): - def __init__(self, trainer: Trainer): - self.model: fleet.model.PipelineParallel = trainer.model_wrapped - self.config: PretrainedConfig = trainer.model.config - self._is_gen = False - # self.gen_fn = None - # self.fwd_fn = None - # use non-pipe model generetion related methods - self.prepare_inputs_for_generation = types.MethodType( - self.model._layers._non_pipe_model_class.prepare_inputs_for_generation, self - ) - self.update_model_kwargs_for_generation = ( - self.model._layers._non_pipe_model_class.update_model_kwargs_for_generation - ) - - def eval(self): - self.model.eval() - - def train(self): - self.model.train() - - def _broadcast_outputs(self, outputs): - # outputs is PipelineParallel.eval_batch which is a list of batches. - out = [] - outputs = (outputs,) if isinstance(outputs, paddle.Tensor) else outputs - for tensors in outputs: - if not self.model.is_pipeline_last_stage(): - tensor = tensors if isinstance(tensors, paddle.Tensor) else tensors[0] - head_out_meta = ( - (self.model._layers.head_out_meta,) - if isinstance(self.model._layers.head_out_meta, paddle.static.InputSpec) - else self.model._layers.head_out_meta - ) - tensors = tuple( - paddle.empty( - shape=[ - tensor.shape[i] if (meta.shape[i] is None or meta.shape[i] < 0) else meta.shape[i] - for i in range(len(meta.shape)) - ], - dtype=tensor.dtype if meta.dtype is None else meta.dtype, - ) - for meta in head_out_meta - ) - else: - # Currently use tuple instead of ModelOutput and require the - # caller use the return result as tuple. - tensors = ( - (tensors,) - if isinstance(tensors, paddle.Tensor) - else tensors.to_tuple() - if isinstance(tensors, ModelOutput) - else tensors - ) - - # map_structure( - # lambda tensor: paddle.distributed.broadcast( - # tensor, - # src=self.model.pp_group.ranks[-1], - # group=self.model.pp_group), tensors) - for tensor in tensors: - paddle.distributed.broadcast(tensor, src=self.model.pp_group.ranks[-1], group=self.model.pp_group) - out.append(tensors[0] if len(tensors) == 1 else tensors) - return out[0] if len(out) == 1 else out - - def __call__(self, *args, **kwargs): - model = self.model - assert self.model.training is False - # TODO(guosheng): hack for post-process in eval, so we can let last stage - # do more to reduce comm overhead. - if self._is_gen: - # inputs by `prepare_inputs_for_generation` is a dict with following keys: - # "input_ids", "position_ids", "past_key_values", "use_cache", "attention_mask" - # NOTE: cache/past_key_values should be rather than pass like - pass - else: - # use _prepare_pipeline_inputs_func to convert pipeline inputs - inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) - # NOTE(guosheng): bug seems exist. pp.eval_batch(compute_loss=False) - # will set pp._compute_loss to False and would not set it back. Thus - # hack here to set it back. - with guard_set_args(model, {"_compute_loss": False}): - outputs = model.eval_batch([inputs, labels], compute_loss=False) - outputs = self._broadcast_outputs(outputs) - return outputs - - def generate(self, *args, **kwargs): - # when generate, cache should be - self._is_gen = True - super().generate(*args, **kwargs) - self._is_gen = False - - -class PPOTrainer(Trainer): - def __init__( - self, - model: Union[PretrainedModel, nn.Layer] = None, - criterion: nn.Layer = None, - args: TrainingArguments = None, - data_collator: Optional[DataCollator] = None, - train_dataset: Optional[Dataset] = None, - ptx_dataset: Optional[Dataset] = None, - eval_dataset: Union[Dataset, Dict[str, Dataset]] = None, - tokenizer: Optional[PretrainedTokenizer] = None, - compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, - callbacks: Optional[List[TrainerCallback]] = None, - optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), - preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, - ): - with guard_set_args( - args, - { - "recompute": False, - "fp16_opt_level": "O1", - "pipeline_parallel_degree": 1, # workaround for pipeline parallel model check - }, - ): - # just used to create trival attrs might be used in the training - # process of trainer, while changing some args to avoid model usage - # in __init__ such as recompute and AMP-O2 - super().__init__( - model, - criterion, - args, - data_collator, - train_dataset, - eval_dataset, - tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - - self.train_dataset = train_dataset - self.ptx_dataset = ptx_dataset - self.eval_dataset = eval_dataset - - (policy_model, reference_model, reward_model, value_model) = model - # policy_tokenizer and value_tokenizer should be same - (policy_tokenizer, reference_tokenizer, reward_tokenizer, value_tokenizer) = tokenizer - - policy_training_args = copy.deepcopy(args) - self.use_ptx = self.ptx_dataset is not None - if self.use_ptx: - policy_training_args.gradient_accumulation_steps *= 2 - self.policy_trainer = PolicyTrainer( - policy_model, - criterion, - policy_training_args, - data_collator, - train_dataset, - eval_dataset, - policy_tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - value_training_args = copy.deepcopy(args) - for attr_name in [ - "critic_learning_rate", - "critic_weight_decay", - "critic_lr_scheduler_type", - "critic_warmup_ratio", - "critic_recompute", - ]: - if getattr(value_training_args, attr_name, None) is not None: - setattr(value_training_args, attr_name[len("critic_") :], getattr(value_training_args, attr_name)) - self.value_trainer = ValueTrainer( - value_model, - criterion, - value_training_args, - data_collator, - train_dataset, - eval_dataset, - value_tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - - # use trainer for reference_model/reward_model to enable sharding stage-3 - # maybe we should allow models to use different dist strategies later - if True: # ShardingOption.FULL_SHARD in args.sharding: - self.reference_trainer = Trainer( - reference_model, - criterion, - args, - data_collator, - train_dataset, - eval_dataset, - reference_tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - self.reward_trainer = Trainer( - reward_model, - criterion, - args, - data_collator, - train_dataset, - eval_dataset, - reward_tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - # TODO(guosheng): sharding stage3 should create master weight optionally - # instead of creation and clear. - self.reference_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps - self.reward_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps - else: - self._reference_model = reference_model - self._reward_model = reward_model - self.reference_model.eval() - self.reward_model.eval() - - self.reward_tokenizer = reward_tokenizer - self.tokenizer = policy_tokenizer - if is_same_tokenizer(self.tokenizer, self.reward_tokenizer): - self.reward_tokenizer = self.tokenizer - - self.generation_config = GenerationConfig( - max_length=self.args.max_length, - num_return_sequences=self.args.num_return_sequences, - temperature=self.args.temperature, - top_p=self.args.top_p, - # top_k=self.args.top_k, - repetition_penalty=self.args.repetition_penalty, - do_sample=True, - trunc_input=False, - bos_token_id=self.tokenizer.bos_token_id, - eos_token_id=self.tokenizer.eos_token_id, - pad_token_id=self.tokenizer.pad_token_id, - ) - # Those value can be changed - self.kl_coeff = self.args.kl_coeff - self.policy_trainer.clip_range_ratio = self.clip_range_ratio = self.args.clip_range_ratio - self.clip_range_score = self.args.clip_range_score - self.value_trainer.clip_range_value = self.clip_range_value = self.args.clip_range_value - self.policy_trainer.ptx_coeff = self.ptx_coeff = self.args.ptx_coeff - self.gamma = 1.0 - self.gae_lambda = 0.95 - - # dummy class and object for model to be compaible with methods of - # Trainer, such as evaluation_loop - self.DummyPPOModel = type( - "DummyPPOModel", (object,), {"eval": lambda _: self.set_eval(), "train": lambda _: self.set_train()} - ) - self.model = self.model_wrapped = self.DummyPPOModel() - # self.optimizer = self.policy_trainer.optimizer - # self.scaler = self.reference_trainer.scaler = self.reward_trainer.scaler = None - - @property - def reference_model(self): - model = getattr(self, "_reference_model", None) - if model is not None: - return model - # use model with Trainer - if self.reference_trainer.args.pipeline_parallel_degree > 1: - # Only accept wrapped model for pipeline_parallel mode - # model = self.reference_trainer.model_wrapped - model = PipeEvalModel(self.reference_trainer) - self._reference_model = model - else: - model = self.reference_trainer.model - return model - - @property - def reward_model(self): - model = getattr(self, "_reward_model", None) - if model is not None: - return model - # use model with Trainer - if self.reward_trainer.args.pipeline_parallel_degree > 1: - # Only accept wrapped model for pipeline_parallel mode - # model = self.reward_trainer.model_wrapped - model = PipeEvalModel(self.reward_trainer) - self._reward_model = model - else: - model = self.reward_trainer.model - return model - - @property - def actor_model(self): - if self.training: - return self.policy_trainer.model_wrapped - model = getattr(self, "_actor_model", None) - if model is not None: - return model - if self.policy_trainer.args.pipeline_parallel_degree > 1: - # Only accept wrapped model for pipeline_parallel mode - # model = self.policy_trainer.model_wrapped - model = PipeEvalModel(self.policy_trainer) - self._actor_model = model - else: - model = self.policy_trainer.model - return model - - @property - def reward_critic_model(self): - if self.training: - return self.value_trainer.model_wrapped - model = getattr(self, "_reward_critic_model", None) - if model is not None: - return model - if self.value_trainer.args.pipeline_parallel_degree > 1: - # Only accept wrapped model for pipeline_parallel mode - # model = self.value_trainer.model_wrapped - model = PipeEvalModel(self.value_trainer) - self._reward_critic_model = model - else: - model = self.value_trainer.model - return model - - def set_train(self, mode: bool = True) -> None: - """Set training mode for all models.""" - if mode: - # self.is_in_train = True - self.training = True - self.actor_model.train() - self.reward_critic_model.train() - else: - self.training = False - self.actor_model.eval() - self.reward_critic_model.eval() - - def set_eval(self) -> None: - """Set model to evaluation mode.""" - self.set_train(mode=False) - - def prediction_step( - self, - model: nn.Layer, - inputs: Dict[str, Union[paddle.Tensor, Any]], - prediction_loss_only: bool, - ignore_keys: Optional[List[str]] = None, - ) -> Tuple[Optional[paddle.Tensor], Optional[paddle.Tensor], Optional[paddle.Tensor]]: - if self.args.pipeline_parallel_degree > 1: - # hack for pipeline mode - inputs = self._prepare_inputs(inputs) - return self.prediction_pipeline_step(model, inputs, prediction_loss_only, ignore_keys) - else: - inputs = self._prepare_inputs(inputs) - - with paddle.no_grad(): - with self.autocast_smart_context_manager(): - seq = self.actor_model.generate( - input_ids=inputs["input_ids"], - attention_mask=inputs["attention_mask"], - generation_config=self.generation_config, - synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, - )[0] - attention_mask = paddle.logical_and( - seq != self.tokenizer.pad_token_id, - seq != self.tokenizer.unk_token_id, - ) - if self.reward_tokenizer is not self.tokenizer: - reward_tokenize_output = batch_retokenize( - input_ids=seq, - src_tokenizer=self.tokenizer, - dest_tokenizer=self.reward_tokenizer, - skip_special_tokens=True, - device=self.args.device, - ) - reward_input_ids = reward_tokenize_output["input_ids"] - reward_attention_mask = reward_tokenize_output["attention_mask"] - else: - reward_input_ids = seq - reward_attention_mask = attention_mask - - reward_score = self.reward_model( - reward_input_ids, attention_mask=reward_attention_mask, return_dict=True - ).end_scores.squeeze(axis=-1) - - # keep the first batch of eval output sequence to print and check - prompt = self.tokenizer.batch_decode(inputs["input_ids"], skip_special_tokens=True) - generated = self.tokenizer.batch_decode(seq, skip_special_tokens=True) - for i, text in enumerate(generated): - self._eval_out_file.write(text + "\n") - if getattr(self, "_eval_seq", None) is None: - generated = [text[len(prompt[i]) :] for i, text in enumerate(generated)] - # prompts.extend(prompt) - # generateds.extend(generated) - self._eval_seq = (prompt, generated, reward_score.tolist()) - - return reward_score.cast(paddle.float32).mean(), None, None - - def evaluation_loop( - self, - dataloader: DataLoader, - description: str, - prediction_loss_only: Optional[bool] = None, - ignore_keys: Optional[List[str]] = None, - metric_key_prefix: str = "eval", - max_eval_iters: Optional[int] = -1, - ) -> EvalLoopOutput: - # to save eval generated sequence - eval_out_file = os.path.join( - self.args.output_dir, f"eval_out-step{self.state.global_step}-rank{self.args.local_rank}.txt" - ) - self._eval_out_file = open(eval_out_file, "w") - - output = super().evaluation_loop( - dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix, max_eval_iters - ) - output.metrics[f"{metric_key_prefix}/reward"] = output.metrics.pop(f"{metric_key_prefix}_loss") - - columns = ["Prompt", "Generated", "Reward"] - rows = list(zip(*self._eval_seq)) - rows = [[str(item) for item in row] for row in rows] - max_num_rows = 5 - table = Table(title="Evaluating...", show_lines=True, title_justify="left") - for column in columns: - table.add_column(column) - for row in rows[:max_num_rows]: - table.add_row(*row) - Console(soft_wrap=True, markup=False, emoji=False).print(table) - self._eval_seq = None - - self._eval_out_file.close() - - return output - - def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: - with guard_set_args(self, {"data_collator": self.eval_dataset.get_collator()}): - return super().get_eval_dataloader(eval_dataset) - - def _save_checkpoint(self, model, metrics=None): - # maybe change args.output_dir of policy_trainer/value_trainer directly - with guard_set_args(self.policy_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "policy")}): - self.policy_trainer._save_checkpoint(model, metrics) - with guard_set_args(self.value_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "value")}): - self.value_trainer._save_checkpoint(model, metrics) - - # def _load_from_checkpoint(self, resume_from_checkpoint=None): - # with guard_set_args(self.policy_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "policy")}): - # self.policy_trainer._load_from_checkpoint(resume_from_checkpoint) - # with guard_set_args(self.value_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "value")}): - # self.value_trainer._load_from_checkpoint(resume_from_checkpoint) - - # def _load_optimizer_and_scheduler(self, checkpoint): - # # NOTE: `Trainer._load_optimizer_and_scheduler` would not seek the latest - # # state as in `_load_from_checkpoint``, and it just use `resume_from_checkpoint` - # # as value of `checkpoint` to load. - # self.policy_trainer._load_optimizer_and_scheduler( - # checkpoint if checkpoint is None else os.path.join(checkpoint, "policy") - # ) - # self.value_trainer._load_optimizer_and_scheduler( - # checkpoint if checkpoint is None else os.path.join(checkpoint, "value") - # ) - - def init_train_model_opt( - self: Trainer, max_steps: int, resume_from_checkpoint: bool = False, clear_master_weight: bool = False - ) -> PretrainedModel: - # resume should be triggered here - # maybe change args.output_dir of policy_trainer/value_trainer directly - with guard_set_args(self.policy_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "policy")}): - policy_model = self.policy_trainer.init_train_model_opt( - max_steps, - os.path.join(resume_from_checkpoint, "policy") - if isinstance(resume_from_checkpoint, str) - else resume_from_checkpoint, - ) - with guard_set_args(self.value_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "value")}): - value_model = self.value_trainer.init_train_model_opt( - max_steps, - os.path.join(resume_from_checkpoint, "value") - if isinstance(resume_from_checkpoint, str) - else resume_from_checkpoint, - ) - return policy_model, value_model - - @staticmethod - def load_sing_gen_data(as_batches=True): - import pickle - - from paddle.distributed import fleet - - hcg = fleet.get_hybrid_communicate_group() - data_rank = hcg.get_sharding_parallel_rank() - with open(f"rl_batch-{data_rank}.data", "rb") as f: - data = pickle.load(f) - rl_batch = map_structure(lambda x: paddle.to_tensor(x), data) - rl_batches = [rl_batch] if as_batches else rl_batch - return rl_batches - - @staticmethod - def save_single_gen_data(rl_batch): - import pickle - - import paddle.distributed as dist - - with open(f"rl_batch-{dist.get_rank()}.data", "wb") as f: - rl_batch = map_structure(lambda x: x.numpy(), rl_batch) - pickle.dump(rl_batch, f) - # exit(0) - - def get_epoch_iterator(self): - # TODO(guosheng): support iter dataset - num_prompt_only_batches = len(self.prompt_only_dataloader) - num_ptx_batches = len(self.ptx_dataloader) - num_ptx_replicas = (num_prompt_only_batches + num_ptx_batches - 1) // num_ptx_batches - - def gen_epoch_data(): - for prompt_only_batch, ptx_batch in zip( - self.prompt_only_dataloader, - itertools.chain.from_iterable([self.ptx_dataloader] * num_ptx_replicas), - ): - # generate batches - self.set_eval() - rl_batches = self.split_rl_micro_batches(prompt_only_batch) - # rl_batches = self.load_sing_gen_data(as_batches=True) - if self.use_ptx: - ptx_batches = self.split_ptx_micro_batches(ptx_batch) - else: - ptx_batches = [None for _ in range(len(rl_batches))] - paddle.device.cuda.empty_cache() - - self.set_train() - for _ in range(self.args.update_iters): - for rl_batch, ptx_batch in zip(rl_batches, ptx_batches): - yield rl_batch, ptx_batch - - class EpochIterator: - def __iter__(self): - return gen_epoch_data() - - return EpochIterator() - - def init_train_num(self: Trainer, train_dataloader: DataLoader): - args = self.args - - total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.dataset_world_size - - len_dataloader = len(train_dataloader) - num_train_sub_steps = ( - len_dataloader - * self.args.update_iters - * self.args.per_device_prompt_batch_size - * self.args.num_return_sequences - // self.args.per_device_train_batch_size - ) - num_update_steps_per_epoch = num_train_sub_steps // args.gradient_accumulation_steps - if args.max_steps > 0: - max_steps = args.max_steps - num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( - args.max_steps % num_update_steps_per_epoch > 0 - ) - else: - max_steps = int(num_update_steps_per_epoch * args.num_train_epochs) - num_train_epochs = math.ceil(args.num_train_epochs) - num_examples = num_train_samples = total_train_batch_size * max_steps - - return ( - total_train_batch_size, - len_dataloader, - max_steps, - num_train_epochs, - num_update_steps_per_epoch, - num_examples, - num_train_samples, - ) - - def train( - self, - resume_from_checkpoint: Optional[Union[str, bool]] = None, - ignore_keys_for_eval: Optional[List[str]] = None, - ) -> None: - # ##### The following code try to keep same as the Trainer.train ##### - args = self.args - self.is_in_train = True - - # ##### trainging data and related num setting ##### - # TODO(guosheng): remove the binding method get_collator of dataset - with guard_set_args( - args, {"per_device_train_batch_size": self.args.per_device_prompt_batch_size} - ), guard_set_args( - self, {"train_dataset": self.train_dataset, "data_collator": self.train_dataset.get_collator()} - ): - train_dataloader = self.prompt_only_dataloader = self.get_train_dataloader() - - if self.use_ptx: - with guard_set_args( - args, - { - "per_device_train_batch_size": self.args.per_device_prompt_batch_size - * self.args.num_return_sequences - }, - ), guard_set_args( - self, {"train_dataset": self.ptx_dataset, "data_collator": self.ptx_dataset.get_collator(shift=True)} - ): - self.ptx_dataloader = self.get_train_dataloader() - else: - self.ptx_dataloader = DataLoader(DummyDataset(len(self.prompt_only_dataloader))) - ( - total_train_batch_size, - len_dataloader, - max_steps, - num_train_epochs, - num_update_steps_per_epoch, - num_examples, - num_train_samples, - ) = self.init_train_num(train_dataloader) - - # ##### model and optimizer related setting ##### - # policy_trainer/value_trainer only init train with init_train_model_opt, - # maybe more training setting used in full_training_step should be set here, - # such as trainer.control and trainer.state - # policy_model = self.policy_trainer.init_train_model_opt(max_steps, resume_from_checkpoint) - # value_model = self.value_trainer.init_train_model_opt(max_steps, resume_from_checkpoint) - policy_model, value_model = self.init_train_model_opt(max_steps, resume_from_checkpoint) - paddle.device.cuda.empty_cache() - # disable inner trainers' callback/state/control - self.policy_trainer.add_callback(MuteDefaultFlowCallback) - self.value_trainer.add_callback(MuteDefaultFlowCallback) - - # ##### traing statistic logging ##### - # Number of trainable parameters only account for policy_model - self.init_train_log( - num_examples, num_train_epochs, total_train_batch_size, max_steps, num_train_samples, policy_model - ) - - # ##### set training state and resume ##### - # consumed_samples used to set train_dataloader.batch_sampler may not be - # correct. Thus, data cannot be resumed perfectly when not breaking at epoch end. - epochs_trained, steps_trained_in_current_epoch, steps_trained_progress_bar = self.init_train_state( - resume_from_checkpoint, train_dataloader, max_steps, num_train_epochs, num_update_steps_per_epoch - ) - - epoch_iterator = self.get_epoch_iterator() - steps_in_epoch = num_update_steps_per_epoch * args.gradient_accumulation_steps - - # self.callback_handler.model = self.model - # self.callback_handler.optimizer = self.optimizer - # self.callback_handler.lr_scheduler = self.lr_scheduler - # self.callback_handler.train_dataloader = train_dataloader - self.state.max_steps = int(max_steps) - self.state.num_train_epochs = num_train_epochs - self.state.is_local_process_zero = self.is_local_process_zero() - self.state.is_world_process_zero = self.is_world_process_zero() - - self.control = self.callback_handler.on_train_begin(args, self.state, self.control) - - actor_loss = paddle.to_tensor(0.0) - reward_critic_loss = paddle.to_tensor(0.0) - ptx_loss = paddle.to_tensor(0.0) - # used when logging and last step - self._total_actor_loss_scalar = 0.0 - self._total_reward_critic_loss_scalar = 0.0 - self._total_ptx_loss_scalar = 0.0 - self._globalstep_last_logged = self.state.global_step - - # train_step_kwargs is used to provide arguments more than model inputs - # for full_training_step which is copied from Trainer.train and needs - # these arguments to control training process. - train_step_kwargs = { - "ignore_keys_for_eval": None, # no need - # TODO(guosheng): commented args mean to resume data, not support yet - # "resume_from_checkpoint": resume_from_checkpoint, - # "train_dataloader": train_dataloader, - # "epochs_trained": epochs_trained, - # "steps_trained_in_current_epoch": steps_trained_in_current_epoch, - # "steps_trained_progress_bar": steps_trained_progress_bar, - "steps_in_epoch": steps_in_epoch, # to control training process - # the following args are corresponding to tr_loss and model used in - # Trainer.train, and they would be used as tr_loss and model in - # PolicyTranier and ValueTrainer. - "actor_loss": actor_loss, - "reward_critic_loss": reward_critic_loss, - "ptx_loss": ptx_loss, - "policy_model": policy_model, - "value_model": value_model, - } - - start_time = time.time() - self._globalstep_last_start_time = start_time # time.time() - # self.timers and self.timers("read-data").start() - - for epoch in range(epochs_trained, num_train_epochs): - if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( - train_dataloader.batch_sampler, DistributedBatchSampler - ): - train_dataloader.batch_sampler.set_epoch(epoch) - - step_control = 0 # used in loop control, reset to 0 after every step - train_step_kwargs.update({"policy_step_control": step_control, "value_step_control": step_control}) - self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) - - for step, inputs in enumerate(epoch_iterator): - # self.timers and self.timers("read-data").stop() - os.environ["TRAINER_GLOBAL_STEP"] = str(self.state.global_step) - self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) - # epoch, step and steps_in_epoch only mostly used in train_step by - # `self.state.epoch = epoch + (step + 1) / steps_in_epoch` if not - # resume data - train_step_kwargs.update({"epoch": epoch, "step": step}) - rl_batch, ptx_batch = inputs - # TODO(guosheng): make rl_step/ptx_step run with autocast_smart_context_manager - rl_info, train_step_kwargs = self.rl_step(rl_batch, **train_step_kwargs) - paddle.device.cuda.empty_cache() - if self.use_ptx: - ptx_info, train_step_kwargs = self.ptx_step(ptx_batch, **train_step_kwargs) - rl_info.update(ptx_info) - paddle.device.cuda.empty_cache() - - self.state.global_step = self.value_trainer.state.global_step - self.state.epoch = self.value_trainer.state.epoch - if train_step_kwargs["value_step_control"] == 0: - # NOTE: PipelineParallel only returns a accumulated loss after - # accumulated steps, which is a mixed loss of ppo-loss and - # ptx-loss. We hack PipelineParallel._forward_step to record - # loss metrics and postprocess the recorded losses here. - # Maybe better to make the last_stage worker log to reduce - # comm and for simplicity. - if isinstance(policy_model, fleet.model.PipelineParallel): - with paddle.no_grad(): - # TODO(guosheng): maybe move this to model_pp.py and - # using interface here is better - # interleave betweeen ppo-loss and ptx-loss - if policy_model.is_pipeline_last_stage(): - # loss is 0D tensor, use stack rather than concat - mix_loss = paddle.stack(policy_model._step_losses) - policy_model._step_losses = None - else: - # The tessor shape is not policy_model.accumulate_steps - # (args.accu_steps) but policy_trainer.args.accu_steps, - # since policy_model is created with global pp_config - # using global args.accu_steps which is only half of - # policy_trainer.args.accu_steps, and indeed trainer hack - # model.accumulate_steps in training_pipeline_step to use - # trainer.args.accu_steps. The dtype is fp32(to be check), - # thus no need to broadcast. - mix_loss = paddle.empty( - shape=[self.policy_trainer.args.gradient_accumulation_steps], dtype=paddle.float32 - ) - paddle.distributed.broadcast( - mix_loss, src=policy_model.pp_group.ranks[-1], group=policy_model.pp_group - ) - real_actor_loss = mix_loss[0::2].mean() - real_ptx_loss = mix_loss[1::2].mean() - rl_info.update({"train/actor_loss": real_actor_loss, "train/ptx_loss": real_ptx_loss}) - # on_step_end - self.control = self.callback_handler.on_step_end(args, self.state, self.control) - else: - # on_sub_step_end - self.control = self.callback_handler.on_substep_end(args, self.state, self.control) - self._maybe_log_save_evaluate(rl_info, None, epoch, ignore_keys_for_eval, inputs=inputs) - - if step < 0: - logger.warning( - f"There seems to be not a single sample in your epoch_iterator, stopping training at step" - f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" - f" num_steps ({self.state.max_steps}) higher than the number of available samples." - ) - self.control.should_training_stop = True - - self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) - # argument model is not used in _maybe_log_save_evaluate, thus use None - self._maybe_log_save_evaluate(rl_info, None, epoch, ignore_keys_for_eval, inputs=inputs) - - if self.control.should_training_stop: - break - # TODO(guosheng): add epilogue of training - - def _maybe_log_save_evaluate(self, tr_loss, model, epoch, ignore_keys_for_eval, **kwargs): - if self.control.should_log: - - logs: Dict[str, float] = {} - - for k, v in tr_loss.items(): - if isinstance(v, paddle.Tensor) and "lr" not in k and "max_generated_length" not in k: - v_scalar = self._nested_gather(v).mean().item() - # TODO(guosheng): maybe should consider self._enable_delay_scale_loss() - # and maybe should merge with loss postprocess in PP - if "train/actor_loss" == k and "train/ptx_loss" in tr_loss: - # use_ptx would double the gradient_accumulation_steps - # which causes actor_loss and ptx_loss reduced by half - v_scalar = v_scalar * 2 - elif "train/ptx_loss" == k: - # similar to actor_loss and should double, additionally - # it should be divided by ptx_coeff for logging - v_scalar = v_scalar * 2 / self.ptx_coeff - logs[k] = round(v_scalar / (self.state.global_step - self._globalstep_last_logged), 8) - v.subtract_(v) - attr_name = "_total_" + k.split("/")[-1] + "_scalar" - attr_value = getattr(self, attr_name, 0) - setattr(self, attr_name, attr_value + v_scalar) - elif "max_generated_length" in k: - v_scalar = self._nested_gather(v).max().item() - logs[k] = v_scalar - else: - logs[k] = float("{0:.3e}".format(v)) - logs["global_step"] = int(self.state.global_step) - - total_train_batch_size = ( - self.args.train_batch_size * self.args.gradient_accumulation_steps * self.args.dataset_world_size - ) - num_steps = self.state.global_step - self._globalstep_last_logged - logs.update( - speed_metrics( - "interval", - self._globalstep_last_start_time, - num_samples=total_train_batch_size * num_steps, - num_steps=num_steps, - ) - ) - - self._globalstep_last_logged = self.state.global_step - self._globalstep_last_start_time = time.time() - - self.log(logs, **kwargs) - - # To trigger evaluation and save but avoid log again - with guard_set_args(self.control, {"should_log": False}): - super()._maybe_log_save_evaluate(tr_loss, model, epoch, ignore_keys_for_eval) - - def add_kl_divergence_regularization( - self, - prompt: paddle.Tensor, # size = (B, S) # pylint: disable=unused-argument - log_probs: paddle.Tensor, # size = (B, L) - ref_log_probs: paddle.Tensor, # size = (B, L) - reward_score: paddle.Tensor, # size = (B,) - sequence_mask: paddle.Tensor, # size = (B, L) - ) -> paddle.Tensor: - kl_divergence_estimate = -self.kl_coeff * (log_probs - ref_log_probs) # size = (B, L) - rewards = kl_divergence_estimate # size = (B, L) - reward_clip = paddle.clip( # size = (B,) - reward_score, - min=-self.clip_range_score, - max=self.clip_range_score, - ) - batch_size = log_probs.shape[0] - for i in range(batch_size): - end_index = sequence_mask[i].nonzero()[-1] - # rewards[i, end_index] += reward_clip[i] - rewards[i, end_index] = rewards[i, end_index] + reward_clip[i] - - return rewards - - def get_advantages_and_returns( - self, - values: paddle.Tensor, - rewards: paddle.Tensor, - sequence_mask: paddle.Tensor, - start: int, - ) -> Tuple[paddle.Tensor, paddle.Tensor]: - """Compute advantages and returns using Generalized Advantage Estimation (GAE).""" - # Modified from https://github.com/CarperAI/trlx/blob/main/trlx/models/modeling_ppo.py - last_gae_lambda = 0.0 - advantages_reversed = [] - values = values * sequence_mask - rewards = rewards * sequence_mask - length = rewards.shape[-1] - for t in reversed(range(start, length)): # pylint: disable=invalid-name - next_values = values[:, t + 1] if t < length - 1 else 0.0 - delta = rewards[:, t] + self.gamma * next_values - values[:, t] - last_gae_lambda = delta + self.gamma * self.gae_lambda * last_gae_lambda - advantages_reversed.append(last_gae_lambda) - advantages = paddle.stack(advantages_reversed[::-1], axis=1) - returns = advantages + values[:, start:] - return advantages.detach(), returns - - def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: - prompt = rl_batch["prompt"] - old_log_probs = rl_batch["log_probs"] - ref_log_probs = rl_batch["ref_log_probs"] - rewards = rl_batch["rewards"] - old_reward_values = rl_batch["reward_values"] - input_ids = rl_batch["input_ids"] - attention_mask = rl_batch["attention_mask"] - - start = prompt.shape[-1] - 1 - sequence_mask = attention_mask[:, 1:] - - with paddle.no_grad(): - # maybe these two can also be put into rollout - old_rewards = self.add_kl_divergence_regularization( - prompt, - old_log_probs, - ref_log_probs, - rewards, - sequence_mask, - ) - reward_advantages, reward_returns = self.get_advantages_and_returns( - old_reward_values, - old_rewards, - sequence_mask, - start, - ) - # metric - kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask)[:, start:].sum(axis=-1).mean() - mean_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).mean() - max_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).max() - rewards = rewards.mean() - # trainer inputs - old_log_probs = old_log_probs[:, start:] - old_reward_values = old_reward_values[:, start:] - sequence_mask = sequence_mask[:, start:] - - policy_trainer_inputs = { - "input_ids": input_ids, - "attention_mask": attention_mask, - "old_log_probs": old_log_probs, - "reward_advantages": reward_advantages, - "sequence_mask": sequence_mask, - # "start": start, - # "use_cache": False, - # "return_dict": True, - } - kwargs = self.policy_trainer.full_training_step(policy_trainer_inputs, **kwargs) - - value_trainer_inputs = { - "input_ids": input_ids, - "attention_mask": attention_mask, - "old_reward_values": old_reward_values, - "reward_returns": reward_returns, - "sequence_mask": sequence_mask, - # "start": start, - # "use_cache": False, - # "return_dict": True, - } - kwargs = self.value_trainer.full_training_step(value_trainer_inputs, **kwargs) - - return { - "train/actor_loss": kwargs["actor_loss"], - "train/reward_critic_loss": kwargs["reward_critic_loss"], - "train/reward": rewards, - "train/kl_divergence": kl_divergence, - "train/mean_generated_length": mean_generated_length, - "train/max_generated_length": max_generated_length, - "train/actor_lr": self.policy_trainer._get_learning_rate(), - "train/reward_critic_lr": self.value_trainer._get_learning_rate(), - }, kwargs - - def ptx_step(self, ptx_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: - """Perform a single update step with PTX loss.""" - kwargs = self.policy_trainer.full_training_step(ptx_batch, **kwargs) - return {"train/ptx_loss": kwargs["ptx_loss"]}, kwargs - - def split_ptx_micro_batches( - self, - ptx_batch: Dict[str, paddle.Tensor], - ) -> List[Dict[str, paddle.Tensor]]: - """Split a batch of PTX samples into micro-batches.""" - micro_batches = [] - total_batch_size = ptx_batch["input_ids"].shape[0] - micro_batch_size = self.args.per_device_train_batch_size - for i in range(0, total_batch_size, micro_batch_size): - micro_batch = map_structure( - # pylint: disable-next=cell-var-from-loop - lambda tensor: tensor[i : i + micro_batch_size], # noqa: B023 - ptx_batch, - ) - micro_batches.append(micro_batch) - return micro_batches - - def split_rl_micro_batches( - self, - prompt_only_batch: PromptOnlyBatch, - ) -> List[PromptOnlyBatch]: - """Split a batch of RL samples into micro-batches.""" - total_batch_size = prompt_only_batch["input_ids"].shape[0] - micro_batch_size = self.args.per_device_train_batch_size - micro_batches = [] - for i in range(0, total_batch_size, micro_batch_size): - micro_batch = {} - micro_batch = map_structure( - lambda tensor: tensor[i : i + micro_batch_size], - prompt_only_batch, - ) - micro_batches.extend(self.rollout(micro_batch)) - return micro_batches - - @paddle.no_grad() - def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: - """Rollout a batch of experiences.""" - input_ids = prompt_only_batch["input_ids"] - # NOTE: generation output of paddlenlp do not contain prompt, we should - # change sequences here. - # sequences = self.actor_model.generate( - # input_ids=input_ids, - # attention_mask=prompt_only_batch["attention_mask"], - # generation_config=self.generation_config, - # synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, - # )[0] - # sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) - sequences = [self.load_sing_gen_data(as_batches=False)["input_ids"]] - - return [ - self.post_rollout( - input_ids, - seq, - attention_mask=paddle.logical_and( - seq != self.tokenizer.pad_token_id, - seq != self.tokenizer.unk_token_id, - ), - ) - for seq in sequences - ] - - @paddle.no_grad() - def post_rollout( - self, - prompt: paddle.Tensor, - sequence: paddle.Tensor, - attention_mask: paddle.Tensor, - ) -> Dict[str, Any]: - if self.reward_tokenizer is not self.tokenizer: - reward_tokenize_output = batch_retokenize( - sequence, - src_tokenizer=self.tokenizer, - dest_tokenizer=self.reward_tokenizer, - skip_special_tokens=True, - ) - reward_seq = reward_tokenize_output["input_ids"] - reward_attention_mask = reward_tokenize_output["attention_mask"] - else: - # for text in self.tokenizer.batch_decode( - # sequence, - # skip_special_tokens=True - # ): - # print(text) - reward_seq = sequence - reward_attention_mask = attention_mask - - # pipe model outputs a logits tensor with LMHead, while non-pipe model - # outputs a tuple with logits tensor as the only one element. - logits = self.actor_model( - sequence, - attention_mask=attention_mask, - # return_dict=True, - ) # .logits - if not isinstance(logits, paddle.Tensor): - logits = logits[0] - ref_logits = self.reference_model( - sequence, - attention_mask=attention_mask, - # return_dict=True, - ) # .logits - if not isinstance(ref_logits, paddle.Tensor): - ref_logits = ref_logits[0] - - reward_score = self.reward_model( - reward_seq, - attention_mask=reward_attention_mask, - # return_dict=True, - )[ - 1 - ] # .end_scores - reward_value = self.reward_critic_model( - sequence, - attention_mask=attention_mask, - # return_dict=True, - )[ - 0 - ] # .scores - # TODO(guosheng): move these to model methods such as get_logprobs - reward_score = reward_score.squeeze(axis=-1) - reward_value = reward_value.squeeze(axis=-1)[:, :-1] - log_probs = gather_log_probabilities(logits[:, :-1], sequence[:, 1:]) - ref_log_probs = gather_log_probabilities(ref_logits[:, :-1], sequence[:, 1:]) - return { - "prompt": prompt, - "log_probs": log_probs, - "ref_log_probs": ref_log_probs, - "rewards": reward_score, - "reward_values": reward_value, - "input_ids": sequence, - "attention_mask": attention_mask, - } - - # @paddle.no_grad() - # def post_rollout( - # self, - # prompt: paddle.Tensor, - # sequence: paddle.Tensor, - # attention_mask: paddle.Tensor, - # ) -> Dict[str, Any]: - # if self.reward_tokenizer is not self.tokenizer: - # reward_tokenize_output = batch_retokenize diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index 05829e7d57ff..9d3be1dd7240 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -19,7 +19,7 @@ import paddle from data import PromptOnlyDataset, SupervisedDataset, parse_dataset -from new_ppo_trainer import PPOTrainer +from ppo_trainer import PPOTrainer from paddlenlp.trainer import PdArgumentParser, TrainingArguments, get_last_checkpoint from paddlenlp.transformers import AutoConfig, AutoTokenizer, LlamaTokenizer diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 32a9c148f193..d72737f3db17 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -17,6 +17,7 @@ import math import os import time +import types from contextlib import contextmanager from typing import Any, Callable, Dict, List, Optional, Tuple, Union @@ -26,6 +27,7 @@ import paddle.nn.functional as F import tqdm from data import DummyDataset, PromptOnlyBatch +from paddle.distributed import fleet from paddle.io import DataLoader, Dataset, DistributedBatchSampler from paddle.utils import map_structure from rich.console import Console @@ -33,6 +35,7 @@ from paddlenlp.data import DataCollator from paddlenlp.generation import GenerationConfig +from paddlenlp.generation.utils import GenerationMixin from paddlenlp.trainer.trainer import ( TRAINER_STATE_NAME, EvalLoopOutput, @@ -55,6 +58,8 @@ split_inputs_sequence_dim, ) from paddlenlp.transformers import BatchEncoding, PretrainedModel, PretrainedTokenizer +from paddlenlp.transformers.configuration_utils import PretrainedConfig +from paddlenlp.transformers.model_outputs import ModelOutput from paddlenlp.transformers.tokenizer_utils_base import ( PaddingStrategy, TruncationStrategy, @@ -538,14 +543,14 @@ def compute_loss(self, model, inputs, return_outputs=False): input_ids = inputs["input_ids"] attention_mask = inputs["attention_mask"] - old_log_probs = inputs["old_log_probs"] reward_advantages = inputs["reward_advantages"] - sequence_mask = inputs["sequence_mask"] - start = inputs["start"] - # NOTE: TensorParallel model requires non-Tensor inputs to be lists, thus - # do not use these inputs currently. + # NOTE: TensorParallel model requires non-Tensor inputs to be lists and + # broadcast them, thus do not or optionally use these inputs currently. # use_cache = inputs["use_cache"] # return_dict = inputs["return_dict"] + start = inputs.pop("start", None) + old_log_probs = inputs["old_log_probs"][:, start:] if start is not None else inputs["old_log_probs"] + sequence_mask = inputs["sequence_mask"][:, start:] if start is not None else inputs["sequence_mask"] outputs = model( input_ids=input_ids, attention_mask=attention_mask, # use_cache=use_cache, return_dict=return_dict @@ -559,10 +564,10 @@ def compute_loss(self, model, inputs, return_outputs=False): log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:]) actor_loss = self.actor_loss_fn( - log_probs[:, start:], - old_log_probs[:, start:], + log_probs[:, -old_log_probs.shape[1] :], + old_log_probs, reward_advantages, - sequence_mask[:, start:], + sequence_mask, ) return actor_loss @@ -637,14 +642,16 @@ def compute_loss(self, model, inputs, return_outputs=False): """ input_ids = inputs["input_ids"] attention_mask = inputs["attention_mask"] - old_reward_values = inputs["old_reward_values"] reward_returns = inputs["reward_returns"] - sequence_mask = inputs["sequence_mask"] - start = inputs["start"] - # NOTE: TensorParallel model requires non-Tensor inputs to be lists, thus - # do not use these inputs currently. + # NOTE: TensorParallel model requires non-Tensor inputs to be lists and + # broadcast them, thus do not or optionally use these inputs currently. # use_cache = inputs["use_cache"] # return_dict = inputs["return_dict"] + start = inputs.pop("start", None) + old_reward_values = ( + inputs["old_reward_values"][:, start:] if start is not None else inputs["old_reward_values"] + ) + sequence_mask = inputs["sequence_mask"][:, start:] if start is not None else inputs["sequence_mask"] outputs = model( input_ids=input_ids, attention_mask=attention_mask, # use_cache=use_cache, return_dict=return_dict @@ -659,15 +666,17 @@ def compute_loss(self, model, inputs, return_outputs=False): reward_values = reward_values.squeeze(axis=-1)[:, :-1] reward_critic_loss = self.critic_loss_fn( - reward_values[:, start:], - old_reward_values[:, start:], + reward_values[:, -old_reward_values.shape[1] :], + old_reward_values, reward_returns, - sequence_mask[:, start:], + sequence_mask, ) return reward_critic_loss def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): + # TODO(guosheng): Make these training control vars mapping as class attr, + # then PPOTrainer can extract and reuse them to avoid hard code. kwargs["model"] = kwargs.pop("value_model") kwargs["step_control"] = kwargs.pop("value_step_control") kwargs["tr_loss"] = kwargs.pop("reward_critic_loss") @@ -709,6 +718,98 @@ def is_same_tokenizer( ) +class PipeEvalModel(GenerationMixin): + def __init__(self, trainer: Trainer): + self.model: fleet.model.PipelineParallel = trainer.model_wrapped + self.config: PretrainedConfig = trainer.model.config + self._is_gen = False + # self.gen_fn = None + # self.fwd_fn = None + # use non-pipe model generetion related methods + self.prepare_inputs_for_generation = types.MethodType( + self.model._layers._non_pipe_model_class.prepare_inputs_for_generation, self + ) + self.update_model_kwargs_for_generation = ( + self.model._layers._non_pipe_model_class.update_model_kwargs_for_generation + ) + + def eval(self): + self.model.eval() + + def train(self): + self.model.train() + + def _broadcast_outputs(self, outputs): + # outputs is PipelineParallel.eval_batch which is a list of batches. + out = [] + outputs = (outputs,) if isinstance(outputs, paddle.Tensor) else outputs + for tensors in outputs: + if not self.model.is_pipeline_last_stage(): + tensor = tensors if isinstance(tensors, paddle.Tensor) else tensors[0] + head_out_meta = ( + (self.model._layers.head_out_meta,) + if isinstance(self.model._layers.head_out_meta, paddle.static.InputSpec) + else self.model._layers.head_out_meta + ) + tensors = tuple( + paddle.empty( + shape=[ + tensor.shape[i] if (meta.shape[i] is None or meta.shape[i] < 0) else meta.shape[i] + for i in range(len(meta.shape)) + ], + dtype=tensor.dtype if meta.dtype is None else meta.dtype, + ) + for meta in head_out_meta + ) + else: + # Currently use tuple instead of ModelOutput and require the + # caller use the return result as tuple. + tensors = ( + (tensors,) + if isinstance(tensors, paddle.Tensor) + else tensors.to_tuple() + if isinstance(tensors, ModelOutput) + else tensors + ) + + # map_structure( + # lambda tensor: paddle.distributed.broadcast( + # tensor, + # src=self.model.pp_group.ranks[-1], + # group=self.model.pp_group), tensors) + for tensor in tensors: + paddle.distributed.broadcast(tensor, src=self.model.pp_group.ranks[-1], group=self.model.pp_group) + out.append(tensors[0] if len(tensors) == 1 else tensors) + return out[0] if len(out) == 1 else out + + def __call__(self, *args, **kwargs): + model = self.model + assert self.model.training is False + # TODO(guosheng): hack for post-process in eval, so we can let last stage + # do more to reduce comm overhead. + if self._is_gen: + # inputs by `prepare_inputs_for_generation` is a dict with following keys: + # "input_ids", "position_ids", "past_key_values", "use_cache", "attention_mask" + # NOTE: cache/past_key_values should be rather than pass like + pass + else: + # use _prepare_pipeline_inputs_func to convert pipeline inputs + inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) + # NOTE(guosheng): bug seems exist. pp.eval_batch(compute_loss=False) + # will set pp._compute_loss to False and would not set it back. Thus + # hack here to set it back. + with guard_set_args(model, {"_compute_loss": False}): + outputs = model.eval_batch([inputs, labels], compute_loss=False) + outputs = self._broadcast_outputs(outputs) + return outputs + + def generate(self, *args, **kwargs): + # when generate, cache should be + self._is_gen = True + super().generate(*args, **kwargs) + self._is_gen = False + + class PPOTrainer(Trainer): def __init__( self, @@ -725,7 +826,14 @@ def __init__( optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, ): - with guard_set_args(args, {"recompute": False, "fp16_opt_level": "O1"}): + with guard_set_args( + args, + { + "recompute": False, + "fp16_opt_level": "O1", + "pipeline_parallel_degree": 1, # workaround for pipeline parallel model check + }, + ): # just used to create trival attrs might be used in the training # process of trainer, while changing some args to avoid model usage # in __init__ such as recompute and AMP-O2 @@ -794,7 +902,7 @@ def __init__( # use trainer for reference_model/reward_model to enable sharding stage-3 # maybe we should allow models to use different dist strategies later - if ShardingOption.FULL_SHARD in args.sharding: + if True: # ShardingOption.FULL_SHARD in args.sharding: self.reference_trainer = Trainer( reference_model, criterion, @@ -869,28 +977,30 @@ def __init__( @property def reference_model(self): - # use model without Trainer model = getattr(self, "_reference_model", None) if model is not None: return model # use model with Trainer if self.reference_trainer.args.pipeline_parallel_degree > 1: # Only accept wrapped model for pipeline_parallel mode - model = self.reference_trainer.model_wrapped + # model = self.reference_trainer.model_wrapped + model = PipeEvalModel(self.reference_trainer) + self._reference_model = model else: model = self.reference_trainer.model return model @property def reward_model(self): - # use model without Trainer model = getattr(self, "_reward_model", None) if model is not None: return model # use model with Trainer if self.reward_trainer.args.pipeline_parallel_degree > 1: # Only accept wrapped model for pipeline_parallel mode - model = self.reward_trainer.model_wrapped + # model = self.reward_trainer.model_wrapped + model = PipeEvalModel(self.reward_trainer) + self._reward_model = model else: model = self.reward_trainer.model return model @@ -899,9 +1009,14 @@ def reward_model(self): def actor_model(self): if self.training: return self.policy_trainer.model_wrapped + model = getattr(self, "_actor_model", None) + if model is not None: + return model if self.policy_trainer.args.pipeline_parallel_degree > 1: # Only accept wrapped model for pipeline_parallel mode - model = self.policy_trainer.model_wrapped + # model = self.policy_trainer.model_wrapped + model = PipeEvalModel(self.policy_trainer) + self._actor_model = model else: model = self.policy_trainer.model return model @@ -910,9 +1025,14 @@ def actor_model(self): def reward_critic_model(self): if self.training: return self.value_trainer.model_wrapped + model = getattr(self, "_reward_critic_model", None) + if model is not None: + return model if self.value_trainer.args.pipeline_parallel_degree > 1: # Only accept wrapped model for pipeline_parallel mode - model = self.value_trainer.model_wrapped + # model = self.value_trainer.model_wrapped + model = PipeEvalModel(self.value_trainer) + self._reward_critic_model = model else: model = self.value_trainer.model return model @@ -1075,6 +1195,31 @@ def init_train_model_opt( ) return policy_model, value_model + @staticmethod + def load_sing_gen_data(as_batches=True): + import pickle + + from paddle.distributed import fleet + + hcg = fleet.get_hybrid_communicate_group() + data_rank = hcg.get_sharding_parallel_rank() + with open(f"rl_batch-{data_rank}.data", "rb") as f: + data = pickle.load(f) + rl_batch = map_structure(lambda x: paddle.to_tensor(x), data) + rl_batches = [rl_batch] if as_batches else rl_batch + return rl_batches + + @staticmethod + def save_single_gen_data(rl_batch): + import pickle + + import paddle.distributed as dist + + with open(f"rl_batch-{dist.get_rank()}.data", "wb") as f: + rl_batch = map_structure(lambda x: x.numpy(), rl_batch) + pickle.dump(rl_batch, f) + # exit(0) + def get_epoch_iterator(self): # TODO(guosheng): support iter dataset num_prompt_only_batches = len(self.prompt_only_dataloader) @@ -1089,6 +1234,7 @@ def gen_epoch_data(): # generate batches self.set_eval() rl_batches = self.split_rl_micro_batches(prompt_only_batch) + # rl_batches = self.load_sing_gen_data(as_batches=True) if self.use_ptx: ptx_batches = self.split_ptx_micro_batches(ptx_batch) else: @@ -1285,6 +1431,39 @@ def train( self.state.global_step = self.value_trainer.state.global_step self.state.epoch = self.value_trainer.state.epoch if train_step_kwargs["value_step_control"] == 0: + # NOTE: PipelineParallel only returns a accumulated loss after + # accumulated steps, which is a mixed loss of ppo-loss and + # ptx-loss. We hack PipelineParallel._forward_step to record + # loss metrics and postprocess the recorded losses here. + # Maybe better to make the last_stage worker log to reduce + # comm and for simplicity. + if isinstance(policy_model, fleet.model.PipelineParallel): + with paddle.no_grad(): + # TODO(guosheng): maybe move this to model_pp.py and + # using interface here is better + # interleave betweeen ppo-loss and ptx-loss + if policy_model.is_pipeline_last_stage(): + # loss is 0D tensor, use stack rather than concat + mix_loss = paddle.stack(policy_model._step_losses) + policy_model._step_losses = None + else: + # The tessor shape is not policy_model.accumulate_steps + # (args.accu_steps) but policy_trainer.args.accu_steps, + # since policy_model is created with global pp_config + # using global args.accu_steps which is only half of + # policy_trainer.args.accu_steps, and indeed trainer hack + # model.accumulate_steps in training_pipeline_step to use + # trainer.args.accu_steps. The dtype is fp32(to be check), + # thus no need to broadcast. + mix_loss = paddle.empty( + shape=[self.policy_trainer.args.gradient_accumulation_steps], dtype=paddle.float32 + ) + paddle.distributed.broadcast( + mix_loss, src=policy_model.pp_group.ranks[-1], group=policy_model.pp_group + ) + real_actor_loss = mix_loss[0::2].mean() + real_ptx_loss = mix_loss[1::2].mean() + rl_info.update({"train/actor_loss": real_actor_loss, "train/ptx_loss": real_ptx_loss}) # on_step_end self.control = self.callback_handler.on_step_end(args, self.state, self.control) else: @@ -1316,6 +1495,8 @@ def _maybe_log_save_evaluate(self, tr_loss, model, epoch, ignore_keys_for_eval, for k, v in tr_loss.items(): if isinstance(v, paddle.Tensor) and "lr" not in k and "max_generated_length" not in k: v_scalar = self._nested_gather(v).mean().item() + # TODO(guosheng): maybe should consider self._enable_delay_scale_loss() + # and maybe should merge with loss postprocess in PP if "train/actor_loss" == k and "train/ptx_loss" in tr_loss: # use_ptx would double the gradient_accumulation_steps # which causes actor_loss and ptx_loss reduced by half @@ -1431,6 +1612,15 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any sequence_mask, start, ) + # metric + kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask)[:, start:].sum(axis=-1).mean() + mean_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).mean() + max_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).max() + rewards = rewards.mean() + # trainer inputs + old_log_probs = old_log_probs[:, start:] + old_reward_values = old_reward_values[:, start:] + sequence_mask = sequence_mask[:, start:] policy_trainer_inputs = { "input_ids": input_ids, @@ -1438,9 +1628,9 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any "old_log_probs": old_log_probs, "reward_advantages": reward_advantages, "sequence_mask": sequence_mask, - "start": start, - "use_cache": False, - "return_dict": True, + # "start": start, + # "use_cache": False, + # "return_dict": True, } kwargs = self.policy_trainer.full_training_step(policy_trainer_inputs, **kwargs) @@ -1450,19 +1640,12 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any "old_reward_values": old_reward_values, "reward_returns": reward_returns, "sequence_mask": sequence_mask, - "start": start, - "use_cache": False, - "return_dict": True, + # "start": start, + # "use_cache": False, + # "return_dict": True, } kwargs = self.value_trainer.full_training_step(value_trainer_inputs, **kwargs) - with paddle.no_grad(): - kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask)[:, start:].sum(axis=-1).mean() - mean_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).mean() - max_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).max() - - rewards = rewards.mean() - return { "train/actor_loss": kwargs["actor_loss"], "train/reward_critic_loss": kwargs["reward_critic_loss"], @@ -1519,13 +1702,14 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: input_ids = prompt_only_batch["input_ids"] # NOTE: generation output of paddlenlp do not contain prompt, we should # change sequences here. - sequences = self.actor_model.generate( - input_ids=input_ids, - attention_mask=prompt_only_batch["attention_mask"], - generation_config=self.generation_config, - synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, - )[0] - sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) + # sequences = self.actor_model.generate( + # input_ids=input_ids, + # attention_mask=prompt_only_batch["attention_mask"], + # generation_config=self.generation_config, + # synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, + # )[0] + # sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) + sequences = [self.load_sing_gen_data(as_batches=False)["input_ids"]] return [ self.post_rollout( @@ -1556,26 +1740,48 @@ def post_rollout( reward_seq = reward_tokenize_output["input_ids"] reward_attention_mask = reward_tokenize_output["attention_mask"] else: + # for text in self.tokenizer.batch_decode( + # sequence, + # skip_special_tokens=True + # ): + # print(text) reward_seq = sequence reward_attention_mask = attention_mask + # pipe model outputs a logits tensor with LMHead, while non-pipe model + # outputs a tuple with logits tensor as the only one element. logits = self.actor_model( sequence, attention_mask=attention_mask, - return_dict=True, - ).logits - ref_logits = self.reference_model(sequence, attention_mask=attention_mask, return_dict=True).logits - - reward_score = self.reward_model(reward_seq, attention_mask=reward_attention_mask, return_dict=True).end_scores + # return_dict=True, + ) # .logits + if not isinstance(logits, paddle.Tensor): + logits = logits[0] + ref_logits = self.reference_model( + sequence, + attention_mask=attention_mask, + # return_dict=True, + ) # .logits + if not isinstance(ref_logits, paddle.Tensor): + ref_logits = ref_logits[0] + + reward_score = self.reward_model( + reward_seq, + attention_mask=reward_attention_mask, + # return_dict=True, + )[ + 1 + ] # .end_scores reward_value = self.reward_critic_model( sequence, attention_mask=attention_mask, - return_dict=True, - ).scores - + # return_dict=True, + )[ + 0 + ] # .scores + # TODO(guosheng): move these to model methods such as get_logprobs reward_score = reward_score.squeeze(axis=-1) reward_value = reward_value.squeeze(axis=-1)[:, :-1] - log_probs = gather_log_probabilities(logits[:, :-1], sequence[:, 1:]) ref_log_probs = gather_log_probabilities(ref_logits[:, :-1], sequence[:, 1:]) return { @@ -1587,3 +1793,13 @@ def post_rollout( "input_ids": sequence, "attention_mask": attention_mask, } + + # @paddle.no_grad() + # def post_rollout( + # self, + # prompt: paddle.Tensor, + # sequence: paddle.Tensor, + # attention_mask: paddle.Tensor, + # ) -> Dict[str, Any]: + # if self.reward_tokenizer is not self.tokenizer: + # reward_tokenize_output = batch_retokenize From 8e8143e741b5fe6c4a3083cb0dfc27a313c645b9 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Sun, 4 Feb 2024 06:04:44 +0000 Subject: [PATCH 03/46] Fix padding among batches of accumulation steps in _prepare_pipeline_inputs_func. --- examples/RLHF/models/model_pp.py | 62 +++++++++++++++++++------ examples/RLHF/models/ppo_model_utils.py | 14 +++++- examples/RLHF/ppo_trainer.py | 23 ++++++--- 3 files changed, 77 insertions(+), 22 deletions(-) diff --git a/examples/RLHF/models/model_pp.py b/examples/RLHF/models/model_pp.py index c7b567db1988..220e79bfee07 100644 --- a/examples/RLHF/models/model_pp.py +++ b/examples/RLHF/models/model_pp.py @@ -175,19 +175,20 @@ def make_position_ids(attention_mask): @paddle.no_grad() -def pad_batches_inputs(inputs, padding_value=0, max_len=None): +def pad_batches_inputs(inputs, padding_value=0, max_len=None, pad_len=None): """Pad length for tensors shaped [bs, seq_len] to [bs, max(seq_lens)]""" - if max_len is None: + if pad_len is not None: + pad_len = [pad_len] * len(inputs) if isinstance(pad_len, int) else pad_len + elif max_len is None: # max_len = max([x.shape[-1] for x in inputs if x is not None]) max_len = max([x.shape[-1] if isinstance(x, paddle.Tensor) else 0 for x in inputs]) + pad_len = [max_len - x.shape[-1] if isinstance(x, paddle.Tensor) else 0 for x in inputs] for i in range(len(inputs)): x = inputs[i] # if x is None or x.shape[-1] == max_len: if not isinstance(x, paddle.Tensor) or x.shape[-1] == max_len: continue - inputs[i] = paddle.concat( - [x, paddle.full([x.shape[0], max_len - x.shape[-1]], padding_value, dtype=x.dtype)], -1 - ) + inputs[i] = paddle.concat([x, paddle.full([x.shape[0], pad_len[i]], padding_value, dtype=x.dtype)], -1) return inputs @@ -227,6 +228,7 @@ class LlamaPolicyPipe(LlamaForCausalLMPipe): @fwd_args_to_dict def _prepare_pipeline_inputs_func(self, inputs): first_stage_keys = ["input_ids", "attention_mask"] + # first_stage_keys = ["input_ids", "attention_mask", "position_ids"] # last_stage_keys = [ # "labels", "input_ids", "log_probs", "advantages", "sequence_mask" # ] @@ -238,7 +240,9 @@ def _prepare_pipeline_inputs_func(self, inputs): if type(inputs) is dict: # ppo-loss and ptx-loss need different labels, and data iter provides # corrensponding data, thus add the not provided fields here. + # policy trian and infer has different inputs, infer uses position_ids. for key in last_stage_keys: + # for key in first_stage_keys + last_stage_keys: if key not in inputs: inputs[key] = None return [ @@ -248,6 +252,7 @@ def _prepare_pipeline_inputs_func(self, inputs): for data in inputs: for key in last_stage_keys: + # for key in first_stage_keys + last_stage_keys: if key not in data: data[key] = None # keys = list(inputs[0].keys()) @@ -256,15 +261,36 @@ def _prepare_pipeline_inputs_func(self, inputs): # micro-batches/accu-steps have the same shape. Thus pad here, maybe # should make data collator do padding and pad optionally here, since # padding strategy may not be clear here. + # 1. For input_ids/attention_mask/labels (prompt+target) padding: # Some data fields, such as input_ids/attention_mask/labels, should # have same shape after padding, and each of them cannot pad only # according to its own max length which might be different since the # filed value is None for different batches/tasks. + src_tgt_keys = ["input_ids", "attention_mask", "labels"] max_len = max([x.shape[-1] for x in inputs_batch["input_ids"]]) - for key, value in inputs_batch.items(): + pad_len = [max_len - x.shape[-1] for x in inputs_batch["input_ids"]] + for key in src_tgt_keys: padding_value = self._ignore_index if key == "labels" else 0 - max_len = max_len if key in ["input_ids", "attention_mask", "labels"] else None - inputs_batch[key] = pad_batches_inputs(value, padding_value, max_len) + inputs_batch[key] = pad_batches_inputs(inputs_batch[key], padding_value, pad_len=pad_len) + # 2. For old_log_probs/reward_advantages/sequence_mask (target) padding: + # hard to pad acorss batches, think in some cases one batch might have the + # longest prompt+target length but the shortest target lengh, which might + # cause mismatch between inputs with prompt+target length and labels with + # target length. NOTE: however trick can be used here, label fields with + # target length such as old_log_probs/reward_advantages/sequence_mask do + # not need to join comm and thus there is no need to keep same shape among + # batches of accumulation steps, they just need to pad as prompt+target + # fields such as input_ids. + tgt_keys = ["old_log_probs", "reward_advantages", "sequence_mask"] + for key in tgt_keys: + padding_value = 0 + inputs_batch[key] = pad_batches_inputs(inputs_batch[key], padding_value, pad_len=pad_len) + # for key, value in inputs_batch.items(): + # padding_value = self._ignore_index if key == "labels" else 0 + # max_len = max_len if key in [ + # "input_ids", "attention_mask", "labels" + # ] else None + # inputs_batch[key] = pad_batches_inputs(value, padding_value, max_len) return [ get_expected_keys(inputs_batch, first_stage_keys), get_expected_keys(inputs_batch, last_stage_keys), @@ -346,12 +372,20 @@ def _prepare_pipeline_inputs_func(self, inputs): # keys = list(inputs[0].keys()) inputs_batch = {key: [data.get(key) for data in inputs] for key in first_stage_keys + last_stage_keys} - # NOTE(guosheng): PipelineParallel requires send/recv tensors among - # micro-batches/accu-steps have the same shape. Thus pad here, maybe - # should make data collator do padding and pad optionally here, since - # padding strategy may not be clear here. - for key, value in inputs_batch.items(): - inputs_batch[key] = pad_batches_inputs(value, padding_value=0) + # 1. For input_ids/attention_mask (prompt+target) padding: + src_tgt_keys = ["input_ids", "attention_mask"] + max_len = max([x.shape[-1] for x in inputs_batch["input_ids"]]) + pad_len = [max_len - x.shape[-1] for x in inputs_batch["input_ids"]] + for key in src_tgt_keys: + padding_value = self._ignore_index if key == "labels" else 0 + inputs_batch[key] = pad_batches_inputs(inputs_batch[key], padding_value, pad_len=pad_len) + # 2. For old_reward_values/reward_returns/sequence_mask (target) padding: + tgt_keys = ["old_reward_values", "reward_returns", "sequence_mask"] + for key in tgt_keys: + padding_value = 0 + inputs_batch[key] = pad_batches_inputs(inputs_batch[key], padding_value, pad_len=pad_len) + # for key, value in inputs_batch.items(): + # inputs_batch[key] = pad_batches_inputs(value, padding_value=0) if "position_ids" not in inputs: inputs_batch["position_ids"] = [ make_position_ids(attention_mask) for attention_mask in inputs_batch["attention_mask"] diff --git a/examples/RLHF/models/ppo_model_utils.py b/examples/RLHF/models/ppo_model_utils.py index cb17dfd70d86..aeab7371bc24 100644 --- a/examples/RLHF/models/ppo_model_utils.py +++ b/examples/RLHF/models/ppo_model_utils.py @@ -77,12 +77,24 @@ def actor_loss_fn( return paddle.sum(paddle.maximum(pg_loss1, pg_loss2) * mask) / mask.sum() def forward(self, logits, input_ids, old_log_probs, reward_advantages, sequence_mask, start=None): + # When used in pipe mode, batches among accumulation steps should be paded. + # Hard to pad acorss batches, think in some cases one batch might have the + # longest prompt+target length but the shortest target lengh, which might + # cause mismatch between inputs with prompt+target length and labels with + # target length. NOTE: Thus, we might make all fields be prompt+target + # length rather rather than target and company an extra start input. + # However trick can be used in pipe_model._prepare_pipeline_inputs_func, + # label fields with target length such as old_log_probs/reward_advantages/sequence_mask + # not need to join comm and thus there is no need to keep same shape among + # batches of accumulation steps, they just need to pad as prompt+target + # fields such as input_ids. log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:]) if start is not None: old_log_probs = old_log_probs[:, start:] sequence_mask = sequence_mask[:, start:] + log_probs = log_probs[:, -old_log_probs.shape[1] :] actor_loss = self.actor_loss_fn( - log_probs[:, -old_log_probs.shape[1] :], + log_probs, old_log_probs, reward_advantages, sequence_mask, diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index d72737f3db17..3a21c253f057 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -562,9 +562,9 @@ def compute_loss(self, model, inputs, return_outputs=False): elif isinstance(outputs, tuple): logits = outputs[0] - log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:]) + log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:])[:, -old_log_probs.shape[1] :] actor_loss = self.actor_loss_fn( - log_probs[:, -old_log_probs.shape[1] :], + log_probs, old_log_probs, reward_advantages, sequence_mask, @@ -729,8 +729,8 @@ def __init__(self, trainer: Trainer): self.prepare_inputs_for_generation = types.MethodType( self.model._layers._non_pipe_model_class.prepare_inputs_for_generation, self ) - self.update_model_kwargs_for_generation = ( - self.model._layers._non_pipe_model_class.update_model_kwargs_for_generation + self.update_model_kwargs_for_generation = types.MethodType( + self.model._layers._non_pipe_model_class.update_model_kwargs_for_generation, self ) def eval(self): @@ -790,8 +790,16 @@ def __call__(self, *args, **kwargs): if self._is_gen: # inputs by `prepare_inputs_for_generation` is a dict with following keys: # "input_ids", "position_ids", "past_key_values", "use_cache", "attention_mask" - # NOTE: cache/past_key_values should be rather than pass like - pass + # NOTE: 1. cache/past_key_values should be passed across decoding steps + # by using as model attr rather than input args to reduce comm overhead. + # Also, pipe model defined for training not support this cache input. + # 2. ignore use_cache since _check_data_vaild requires tensor if not None. + # 3. attention_mask can reuse _prepare_decoder_attention_mask in LlamaEmbeddingPipe. + # 4. TODO(guosheng): position_ids in _prepare_pipeline_inputs_func cause error, fix. + inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) + with guard_set_args(model, {"_compute_loss": False}): + outputs = model.eval_batch([inputs, labels], compute_loss=False) + outputs = self._broadcast_outputs(outputs) else: # use _prepare_pipeline_inputs_func to convert pipeline inputs inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) @@ -1594,6 +1602,7 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any input_ids = rl_batch["input_ids"] attention_mask = rl_batch["attention_mask"] + # log_probs has shifted by one for predicted logits start = prompt.shape[-1] - 1 sequence_mask = attention_mask[:, 1:] @@ -1617,7 +1626,7 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any mean_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).mean() max_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).max() rewards = rewards.mean() - # trainer inputs + # trainer inputs with target length old_log_probs = old_log_probs[:, start:] old_reward_values = old_reward_values[:, start:] sequence_mask = sequence_mask[:, start:] From e4d77814e1c4a842ef2dbf006307ca2d1996d5c2 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Tue, 6 Feb 2024 10:14:26 +0000 Subject: [PATCH 04/46] Fix hcg using in TP generation --- paddlenlp/generation/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddlenlp/generation/utils.py b/paddlenlp/generation/utils.py index cac335344d45..64ac22b360e6 100644 --- a/paddlenlp/generation/utils.py +++ b/paddlenlp/generation/utils.py @@ -1229,7 +1229,7 @@ def sample( try: hcg = fleet.get_hybrid_communicate_group() group = hcg.get_model_parallel_group() - src = group.get_model_parallel_group_src_rank() + src = hcg.get_model_parallel_group_src_rank() except: group, src = None, 0 paddle.distributed.broadcast(next_tokens, src=src, group=group) From 4d1641b58578f9c0fe1b5d85a4aea01c3fa00bc7 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Tue, 6 Feb 2024 10:24:15 +0000 Subject: [PATCH 05/46] Try to support generation in PP. And allow extra training args passed from main from_pratrined. --- examples/RLHF/models/model_pp.py | 14 +++++---- examples/RLHF/models/ppo_model.py | 6 ++-- examples/RLHF/ppo_main.py | 15 +++++++++- examples/RLHF/ppo_trainer.py | 50 +++++++++++++++++++++++-------- 4 files changed, 63 insertions(+), 22 deletions(-) diff --git a/examples/RLHF/models/model_pp.py b/examples/RLHF/models/model_pp.py index 220e79bfee07..bd8dbc7c19d9 100644 --- a/examples/RLHF/models/model_pp.py +++ b/examples/RLHF/models/model_pp.py @@ -20,6 +20,7 @@ from paddle.distributed.fleet.meta_parallel import LayerDesc from paddlenlp.transformers import LlamaForCausalLM, LlamaForCausalLMPipe +from paddlenlp.transformers.llama.modeling import LlamaDecoderLayer from paddlenlp.transformers.llama.modeling_pp import ( LlamaRMSNormPipe, parse_args, @@ -202,6 +203,7 @@ def get_expected_keys(inputs, keys): # patches for base pipe model # non-pipe model class, can be used to parse and convert forward args LlamaForCausalLMPipe._non_pipe_model_class = LlamaForCausalLM +LlamaForCausalLMPipe._non_pipe_decoder_layer_class = LlamaDecoderLayer def fwd_args_to_dict(fun): @@ -227,8 +229,8 @@ class LlamaPolicyPipe(LlamaForCausalLMPipe): @fwd_args_to_dict def _prepare_pipeline_inputs_func(self, inputs): - first_stage_keys = ["input_ids", "attention_mask"] - # first_stage_keys = ["input_ids", "attention_mask", "position_ids"] + # first_stage_keys = ["input_ids", "attention_mask"] + first_stage_keys = ["input_ids", "attention_mask", "position_ids"] # last_stage_keys = [ # "labels", "input_ids", "log_probs", "advantages", "sequence_mask" # ] @@ -241,8 +243,8 @@ def _prepare_pipeline_inputs_func(self, inputs): # ppo-loss and ptx-loss need different labels, and data iter provides # corrensponding data, thus add the not provided fields here. # policy trian and infer has different inputs, infer uses position_ids. - for key in last_stage_keys: - # for key in first_stage_keys + last_stage_keys: + # for key in last_stage_keys: + for key in first_stage_keys + last_stage_keys: if key not in inputs: inputs[key] = None return [ @@ -251,8 +253,8 @@ def _prepare_pipeline_inputs_func(self, inputs): ] for data in inputs: - for key in last_stage_keys: - # for key in first_stage_keys + last_stage_keys: + # for key in last_stage_keys: + for key in first_stage_keys + last_stage_keys: if key not in data: data[key] = None # keys = list(inputs[0].keys()) diff --git a/examples/RLHF/models/ppo_model.py b/examples/RLHF/models/ppo_model.py index 486a1cc822e7..720009161022 100644 --- a/examples/RLHF/models/ppo_model.py +++ b/examples/RLHF/models/ppo_model.py @@ -21,9 +21,9 @@ # TODO(guosheng): create Mixin and make model classes using metaclass. class LlamaPolicyModel(LlamaForCausalLM): - def __init__(self, config: PretrainedConfig): + def __init__(self, config: PretrainedConfig, **kwargs): super().__init__(config) - self.loss_fn = RLHFPPOMixedLoss(config) + self.loss_fn = RLHFPPOMixedLoss(config, **kwargs) def forward( self, @@ -72,7 +72,7 @@ def forward( class LlamaValueModel(LlamaModelForScore): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) - self.loss_fn = RLHFValueLoss(config) + self.loss_fn = RLHFValueLoss(config, **kwargs) def forward( self, diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index 9d3be1dd7240..af191303809b 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -220,11 +220,17 @@ def main(): AutoModelForCausalLM = LlamaPolicyPipe AutoModelForScore = LlamaValuePipe + extra_args = { + "ptx_coeff": training_args.ptx_coeff, + "clip_range_ratio": training_args.clip_range_ratio, + } else: from models import AutoModelForScore from paddlenlp.transformers import AutoModelForCausalLM + extra_args = {} + # actor model model_config = AutoConfig.from_pretrained( model_args.actor_model_name_or_path, @@ -238,6 +244,9 @@ def main(): actor_model = AutoModelForCausalLM.from_pretrained( model_args.actor_model_name_or_path, config=model_config, + **extra_args, + # ptx_coeff=training_args.ptx_coeff, + # clip_range_ratio=training_args.clip_range_ratio, ) # reference model actor_reference_model = AutoModelForCausalLM.from_pretrained( @@ -271,7 +280,11 @@ def main(): if model_args.reward_critic_model_name_or_path is None: model_args.reward_critic_model_name_or_path = model_args.reward_model_name_or_path reward_critic_model = AutoModelForScore.from_pretrained( - model_args.reward_critic_model_name_or_path, config=model_config, score_type="critic", do_normalize=False + model_args.reward_critic_model_name_or_path, + config=model_config, + score_type="critic", + do_normalize=False, + clip_range_value=training_args.clip_range_value, ) reward_critic_tokenizer = AutoTokenizer.from_pretrained( model_args.reward_critic_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 3a21c253f057..1785d4be45e3 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -729,8 +729,8 @@ def __init__(self, trainer: Trainer): self.prepare_inputs_for_generation = types.MethodType( self.model._layers._non_pipe_model_class.prepare_inputs_for_generation, self ) - self.update_model_kwargs_for_generation = types.MethodType( - self.model._layers._non_pipe_model_class.update_model_kwargs_for_generation, self + self.update_model_kwargs_for_generation = ( + self.model._layers._non_pipe_model_class.update_model_kwargs_for_generation ) def eval(self): @@ -812,10 +812,26 @@ def __call__(self, *args, **kwargs): return outputs def generate(self, *args, **kwargs): - # when generate, cache should be self._is_gen = True - super().generate(*args, **kwargs) + # patch DecoderLayerPipe to use cache, DecoderLayerPipe is subclass of + # DecoderLayer, and would call super().forward + ori_decoder_layer_forward = self.model._layers._non_pipe_decoder_layer_class.forward + + def decoder_layer_forward(self, *args, **kwargs): + kwargs.update({"use_cache": True, "past_key_value": getattr(self, "_cache", None)}) + outputs = ori_decoder_layer_forward(self, *args, **kwargs) + output = outputs[0] + self._cache = outputs[1] + return output + + with guard_set_args(self.model._layers._non_pipe_decoder_layer_class, {"forward": decoder_layer_forward}): + super().generate(*args, **kwargs) self._is_gen = False + # clear cache of decoder layers, sublayers is incursive thus suitable + # to both 1F1B and interleave + for layer in self.model._layers.sublayers(): + if isinstance(layer, self.model._layers._non_pipe_decoder_layer_class): + layer._cache = None class PPOTrainer(Trainer): @@ -1203,27 +1219,35 @@ def init_train_model_opt( ) return policy_model, value_model - @staticmethod - def load_sing_gen_data(as_batches=True): + def load_sing_gen_data(self, as_batches=True, use_counter=False): + if use_counter: + iter_counter = getattr(self, "iter_counter", 0) + self.iter_counter = iter_counter + 1 + else: + iter_counter = "" import pickle from paddle.distributed import fleet hcg = fleet.get_hybrid_communicate_group() data_rank = hcg.get_sharding_parallel_rank() - with open(f"rl_batch-{data_rank}.data", "rb") as f: + with open(f"{iter_counter}rl_batch-{data_rank}.data", "rb") as f: data = pickle.load(f) rl_batch = map_structure(lambda x: paddle.to_tensor(x), data) rl_batches = [rl_batch] if as_batches else rl_batch return rl_batches - @staticmethod - def save_single_gen_data(rl_batch): + def save_single_gen_data(self, rl_batch, use_counter=False): + if use_counter: + iter_counter = getattr(self, "iter_counter", 0) + self.iter_counter = iter_counter + 1 + else: + iter_counter = "" import pickle import paddle.distributed as dist - with open(f"rl_batch-{dist.get_rank()}.data", "wb") as f: + with open(f"{iter_counter}rl_batch-{dist.get_rank()}.data", "wb") as f: rl_batch = map_structure(lambda x: x.numpy(), rl_batch) pickle.dump(rl_batch, f) # exit(0) @@ -1242,7 +1266,8 @@ def gen_epoch_data(): # generate batches self.set_eval() rl_batches = self.split_rl_micro_batches(prompt_only_batch) - # rl_batches = self.load_sing_gen_data(as_batches=True) + # rl_batches = self.load_sing_gen_data(as_batches=True, + # use_counter=True) if self.use_ptx: ptx_batches = self.split_ptx_micro_batches(ptx_batch) else: @@ -1252,6 +1277,7 @@ def gen_epoch_data(): self.set_train() for _ in range(self.args.update_iters): for rl_batch, ptx_batch in zip(rl_batches, ptx_batches): + # self.save_single_gen_data(rl_batch, use_counter=True) yield rl_batch, ptx_batch class EpochIterator: @@ -1718,7 +1744,7 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: # synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, # )[0] # sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) - sequences = [self.load_sing_gen_data(as_batches=False)["input_ids"]] + sequences = [self.load_sing_gen_data(as_batches=False, use_counter=False)["input_ids"]] return [ self.post_rollout( From 34d4cd115b16dd14a27c1a67cec2ef960896caa1 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Tue, 20 Feb 2024 06:46:47 +0000 Subject: [PATCH 06/46] Support PP generation. --- examples/RLHF/ppo_trainer.py | 82 +++++++++++++++------ paddlenlp/generation/utils.py | 9 +++ paddlenlp/transformers/llama/modeling_pp.py | 16 +++- 3 files changed, 81 insertions(+), 26 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 1785d4be45e3..6798297a8667 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -13,11 +13,11 @@ # limitations under the License. import copy +import inspect import itertools import math import os import time -import types from contextlib import contextmanager from typing import Any, Callable, Dict, List, Optional, Tuple, Union @@ -726,13 +726,17 @@ def __init__(self, trainer: Trainer): # self.gen_fn = None # self.fwd_fn = None # use non-pipe model generetion related methods - self.prepare_inputs_for_generation = types.MethodType( - self.model._layers._non_pipe_model_class.prepare_inputs_for_generation, self - ) + # self.prepare_inputs_for_generation = types.MethodType( + # self.model._layers._non_pipe_model_class.prepare_inputs_for_generation, self + # ) self.update_model_kwargs_for_generation = ( self.model._layers._non_pipe_model_class.update_model_kwargs_for_generation ) + @property + def pp_group(self): + return self.model.pp_group + def eval(self): self.model.eval() @@ -785,8 +789,6 @@ def _broadcast_outputs(self, outputs): def __call__(self, *args, **kwargs): model = self.model assert self.model.training is False - # TODO(guosheng): hack for post-process in eval, so we can let last stage - # do more to reduce comm overhead. if self._is_gen: # inputs by `prepare_inputs_for_generation` is a dict with following keys: # "input_ids", "position_ids", "past_key_values", "use_cache", "attention_mask" @@ -799,6 +801,16 @@ def __call__(self, *args, **kwargs): inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) with guard_set_args(model, {"_compute_loss": False}): outputs = model.eval_batch([inputs, labels], compute_loss=False) + # TODO(guosheng): Broadcasted logits are used to get next_scores, remove + # it to reduce comm overhead. Also note that we still need broadcast + # next_tokens though logits are broadcasted since pp ranks' seeds differs. + # Currently, just slice the last token to reduce comm overhead. + outputs = [ + micro_batch_output[:, -1, :].unsqueeze(1) + if isinstance(micro_batch_output, paddle.Tensor) + else micro_batch_output[0][:, -1, :].unsqueeze(1) + for micro_batch_output in outputs + ] outputs = self._broadcast_outputs(outputs) else: # use _prepare_pipeline_inputs_func to convert pipeline inputs @@ -817,21 +829,45 @@ def generate(self, *args, **kwargs): # DecoderLayer, and would call super().forward ori_decoder_layer_forward = self.model._layers._non_pipe_decoder_layer_class.forward - def decoder_layer_forward(self, *args, **kwargs): - kwargs.update({"use_cache": True, "past_key_value": getattr(self, "_cache", None)}) - outputs = ori_decoder_layer_forward(self, *args, **kwargs) + def decoder_layer_forward(layer_self, *args, **kwargs): + kwargs.update({"use_cache": True, "past_key_value": getattr(layer_self, "_cache", None)}) + outputs = ori_decoder_layer_forward(layer_self, *args, **kwargs) output = outputs[0] - self._cache = outputs[1] + layer_self._cache = outputs[1] + self._has_cache = True return output with guard_set_args(self.model._layers._non_pipe_decoder_layer_class, {"forward": decoder_layer_forward}): - super().generate(*args, **kwargs) + outputs = super().generate(*args, **kwargs) self._is_gen = False # clear cache of decoder layers, sublayers is incursive thus suitable # to both 1F1B and interleave for layer in self.model._layers.sublayers(): if isinstance(layer, self.model._layers._non_pipe_decoder_layer_class): layer._cache = None + self._has_cache = False + return outputs + + def prepare_inputs_for_generation(self, *args, **kwargs): + arg_bind = inspect.signature(self.model._layers._non_pipe_model_class.prepare_inputs_for_generation).bind( + *((self,) + args), **kwargs + ) + arg_bind.apply_defaults() + arg_dict = arg_bind.arguments + last_arg_name, last_arg_value = arg_dict.popitem() + if arg_bind.signature.parameters[last_arg_name].kind == inspect.Parameter.VAR_KEYWORD: + arg_dict.update(last_arg_value) + else: + arg_dict[last_arg_name] = last_arg_value + arg_dict.pop("self") + past_key_values = arg_dict.get("past_key_values", None) + # prepare_inputs_for_generation use past_key_values to discrimate prefill + # or decode and slice inputs accordingly. + if getattr(self, "_has_cache", False): + arg_dict.update({"past_key_values": True}) + model_inputs = self.model._layers._non_pipe_model_class.prepare_inputs_for_generation(self, **arg_dict) + model_inputs.update({"past_key_values": past_key_values}) + return model_inputs class PPOTrainer(Trainer): @@ -1219,7 +1255,7 @@ def init_train_model_opt( ) return policy_model, value_model - def load_sing_gen_data(self, as_batches=True, use_counter=False): + def load_sing_gen_data(self, as_batches=True, use_counter=False, data_dir="pkl_data"): if use_counter: iter_counter = getattr(self, "iter_counter", 0) self.iter_counter = iter_counter + 1 @@ -1231,13 +1267,13 @@ def load_sing_gen_data(self, as_batches=True, use_counter=False): hcg = fleet.get_hybrid_communicate_group() data_rank = hcg.get_sharding_parallel_rank() - with open(f"{iter_counter}rl_batch-{data_rank}.data", "rb") as f: + with open(os.path.join(data_dir, f"{iter_counter}rl_batch-{data_rank}.data"), "rb") as f: data = pickle.load(f) rl_batch = map_structure(lambda x: paddle.to_tensor(x), data) rl_batches = [rl_batch] if as_batches else rl_batch return rl_batches - def save_single_gen_data(self, rl_batch, use_counter=False): + def save_single_gen_data(self, rl_batch, use_counter=False, data_dir="pkl_data"): if use_counter: iter_counter = getattr(self, "iter_counter", 0) self.iter_counter = iter_counter + 1 @@ -1247,7 +1283,7 @@ def save_single_gen_data(self, rl_batch, use_counter=False): import paddle.distributed as dist - with open(f"{iter_counter}rl_batch-{dist.get_rank()}.data", "wb") as f: + with open(os.path.join(data_dir, f"{iter_counter}rl_batch-{dist.get_rank()}.data"), "wb") as f: rl_batch = map_structure(lambda x: x.numpy(), rl_batch) pickle.dump(rl_batch, f) # exit(0) @@ -1737,14 +1773,14 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: input_ids = prompt_only_batch["input_ids"] # NOTE: generation output of paddlenlp do not contain prompt, we should # change sequences here. - # sequences = self.actor_model.generate( - # input_ids=input_ids, - # attention_mask=prompt_only_batch["attention_mask"], - # generation_config=self.generation_config, - # synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, - # )[0] - # sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) - sequences = [self.load_sing_gen_data(as_batches=False, use_counter=False)["input_ids"]] + sequences = self.actor_model.generate( + input_ids=input_ids, + attention_mask=prompt_only_batch["attention_mask"], + generation_config=self.generation_config, + synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, + )[0] + sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) + # sequences = [self.load_sing_gen_data(as_batches=False, use_counter=False)["input_ids"]] return [ self.post_rollout( diff --git a/paddlenlp/generation/utils.py b/paddlenlp/generation/utils.py index 64ac22b360e6..0b4d0583e412 100644 --- a/paddlenlp/generation/utils.py +++ b/paddlenlp/generation/utils.py @@ -1233,6 +1233,15 @@ def sample( except: group, src = None, 0 paddle.distributed.broadcast(next_tokens, src=src, group=group) + # config does not include tensor_parallel_degree, and pipeline parallel + # uses trainer.model_wrapped to run in both train and predict mode + # which has pp_group as a attribute + # TODO(guosheng): only let the last stage of pipeline to do softmax + # and sampling, and then broadcast to avoid broadcast logits. + if hasattr(self, "pp_group"): + paddle.distributed.broadcast( + next_tokens, src=self.pp_group.ranks[-1], group=self.pp_group # use rank 0 for same seed to check + ) next_scores = paddle.index_sample(origin_probs, next_tokens) diff --git a/paddlenlp/transformers/llama/modeling_pp.py b/paddlenlp/transformers/llama/modeling_pp.py index 362b8f4c6cb1..5dbe15d1ea9f 100644 --- a/paddlenlp/transformers/llama/modeling_pp.py +++ b/paddlenlp/transformers/llama/modeling_pp.py @@ -177,15 +177,25 @@ def forward(self, args): if self.enable_recompute and self.config.recompute_granularity == "full" and has_gradient: if attention_mask is not None or alibi is not None: hidden_states = recompute( - super().forward, hidden_states, attention_mask=attention_mask, alibi=alibi, use_reentrant=False + super().forward, + hidden_states, + position_ids=position_ids, + attention_mask=attention_mask, + alibi=alibi, + use_reentrant=False, ) else: # for pretrain hidden_states = recompute( - super().forward, hidden_states, use_reentrant=self.config.recompute_use_reentrant + super().forward, + hidden_states, + position_ids=position_ids, + use_reentrant=self.config.recompute_use_reentrant, ) else: - hidden_states = super().forward(hidden_states, attention_mask=attention_mask, alibi=alibi) + hidden_states = super().forward( + hidden_states, position_ids=position_ids, attention_mask=attention_mask, alibi=alibi + ) return return_args(hidden_states, attention_mask, position_ids, alibi) From 665fee2a0480c072804a4ca061ccb2b3d5dee486 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Tue, 20 Feb 2024 10:02:21 +0000 Subject: [PATCH 07/46] Fix PP eval by unify prediction_step --- examples/RLHF/ppo_trainer.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 6798297a8667..4425fa562e74 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -797,9 +797,10 @@ def __call__(self, *args, **kwargs): # Also, pipe model defined for training not support this cache input. # 2. ignore use_cache since _check_data_vaild requires tensor if not None. # 3. attention_mask can reuse _prepare_decoder_attention_mask in LlamaEmbeddingPipe. - # 4. TODO(guosheng): position_ids in _prepare_pipeline_inputs_func cause error, fix. + # 4. position_ids pass through _prepare_pipeline_inputs_func and PipeLayer. inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) - with guard_set_args(model, {"_compute_loss": False}): + # currently, set accumulate_steps to 1 to avoid multi-batch eval/gen + with guard_set_args(model, {"_compute_loss": False, "accumulate_steps": 1}): outputs = model.eval_batch([inputs, labels], compute_loss=False) # TODO(guosheng): Broadcasted logits are used to get next_scores, remove # it to reduce comm overhead. Also note that we still need broadcast @@ -818,7 +819,7 @@ def __call__(self, *args, **kwargs): # NOTE(guosheng): bug seems exist. pp.eval_batch(compute_loss=False) # will set pp._compute_loss to False and would not set it back. Thus # hack here to set it back. - with guard_set_args(model, {"_compute_loss": False}): + with guard_set_args(model, {"_compute_loss": False, "accumulate_steps": 1}): outputs = model.eval_batch([inputs, labels], compute_loss=False) outputs = self._broadcast_outputs(outputs) return outputs @@ -1120,12 +1121,7 @@ def prediction_step( prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[paddle.Tensor], Optional[paddle.Tensor], Optional[paddle.Tensor]]: - if self.args.pipeline_parallel_degree > 1: - # hack for pipeline mode - inputs = self._prepare_inputs(inputs) - return self.prediction_pipeline_step(model, inputs, prediction_loss_only, ignore_keys) - else: - inputs = self._prepare_inputs(inputs) + inputs = self._prepare_inputs(inputs) with paddle.no_grad(): with self.autocast_smart_context_manager(): @@ -1153,9 +1149,15 @@ def prediction_step( reward_input_ids = seq reward_attention_mask = attention_mask + # unify PP with others since PP always return tuple reward_score = self.reward_model( - reward_input_ids, attention_mask=reward_attention_mask, return_dict=True - ).end_scores.squeeze(axis=-1) + reward_input_ids, + attention_mask=reward_attention_mask, + # return_dict=True, + )[ + 1 + ] # .end_scores + reward_score = reward_score.squeeze(axis=-1) # keep the first batch of eval output sequence to print and check prompt = self.tokenizer.batch_decode(inputs["input_ids"], skip_special_tokens=True) From a2e970231684b1ab3ce1b21f301c7b0da10405d5 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Tue, 20 Feb 2024 12:02:32 +0000 Subject: [PATCH 08/46] Fix reward value showing error cased by BF16 dtype when eval --- examples/RLHF/ppo_trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 4425fa562e74..07dc1e0af11c 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -1157,7 +1157,7 @@ def prediction_step( )[ 1 ] # .end_scores - reward_score = reward_score.squeeze(axis=-1) + reward_score = reward_score.squeeze(axis=-1).cast(paddle.float32) # keep the first batch of eval output sequence to print and check prompt = self.tokenizer.batch_decode(inputs["input_ids"], skip_special_tokens=True) @@ -1170,7 +1170,7 @@ def prediction_step( # generateds.extend(generated) self._eval_seq = (prompt, generated, reward_score.tolist()) - return reward_score.cast(paddle.float32).mean(), None, None + return reward_score.mean(), None, None def evaluation_loop( self, From 6c8441cd6b9d32f21664f25c4477c285690ce2e5 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Thu, 22 Feb 2024 11:57:38 +0800 Subject: [PATCH 09/46] fix all --- examples/RLHF/models/score_model.py | 4 + examples/RLHF/models/score_model_utils.py | 7 +- examples/RLHF/ppo_config.json | 5 +- examples/RLHF/ppo_main.py | 42 ++++- examples/RLHF/ppo_trainer.py | 219 +++++++++++++++++++++- examples/RLHF/reward_main.py | 8 + paddlenlp/transformers/model_utils.py | 27 +-- 7 files changed, 288 insertions(+), 24 deletions(-) diff --git a/examples/RLHF/models/score_model.py b/examples/RLHF/models/score_model.py index dd3741596a9e..8239f845300d 100644 --- a/examples/RLHF/models/score_model.py +++ b/examples/RLHF/models/score_model.py @@ -17,6 +17,7 @@ import paddle from paddle import nn +import paddlenlp from paddlenlp.transformers import ( LlamaConfig, LlamaModel, @@ -131,3 +132,6 @@ def _get_name_mappings(cls, config: LlamaConfig) -> list[StateDictNameMapping]: mappings = [StateDictNameMapping(*mapping, index=index) for index, mapping in enumerate(model_mappings)] return mappings + + +paddlenlp.transformers.LlamaModelForScore = LlamaModelForScore diff --git a/examples/RLHF/models/score_model_utils.py b/examples/RLHF/models/score_model_utils.py index 5d14f7995731..47ded671e6fa 100644 --- a/examples/RLHF/models/score_model_utils.py +++ b/examples/RLHF/models/score_model_utils.py @@ -49,9 +49,10 @@ class AutoModelForScore(_BaseAutoModelClass): _score_module_name: str = "models.score_model" @classmethod - def _get_model_class_from_config(cls, pretrained_model_name_or_path, config_file_path): - with io.open(config_file_path, encoding="utf-8") as f: - config = json.load(f) + def _get_model_class_from_config(cls, pretrained_model_name_or_path, config_file_path, config=None): + if config is None: + with io.open(config_file_path, encoding="utf-8") as f: + config = json.load(f) # Get class name corresponds to this configuration if is_standard_config(config): diff --git a/examples/RLHF/ppo_config.json b/examples/RLHF/ppo_config.json index d15331443608..df3dc8db17a6 100644 --- a/examples/RLHF/ppo_config.json +++ b/examples/RLHF/ppo_config.json @@ -42,8 +42,9 @@ "do_eval": true, "disable_tqdm": true, "save_total_limit": 1, - "sharding_parallel_degree": 4, - "sharding": "stage3", + "sharding_parallel_degree": -1, + "tensor_parallel_degree": 2, + "sharding": "", "comment-PKU_Beaver-max_grad_norm": 1.0, "max_grad_norm": 1.0, "adam_beta1": 0.9, diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index f517288a8679..f49d0dd2469d 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import os import sys from dataclasses import dataclass, field @@ -20,6 +21,7 @@ import paddle from data import PromptOnlyDataset, SupervisedDataset, parse_dataset from models import AutoModelForScore +from models.score_model import LlamaModelForScore # noqa from ppo_trainer import PPOTrainer from paddlenlp.trainer import PdArgumentParser, TrainingArguments, get_last_checkpoint @@ -31,6 +33,17 @@ ) from paddlenlp.utils.log import logger +# launch would unset http_proxy +# export https_proxy=http://172.19.57.45:3128 +# os.environ["http_proxy"] = "http://172.19.56.199:3128" +# os.environ["https_proxy"] = "http://172.19.56.199:3128" +# os.environ["http_proxy"] = "http://172.19.57.45:3128" +# os.environ["https_proxy"] = "http://172.19.57.45:3128" +os.environ["http_proxy"] = "http://10.162.37.16:8128" +os.environ["https_proxy"] = "http://10.162.37.16:8128" +os.environ["no_proxy"] = "localhost,bcebos.com" + + @dataclass class TrainingArguments(TrainingArguments): kl_coeff: float = field( @@ -230,12 +243,19 @@ def main(): tensor_parallel_rank=training_args.tensor_parallel_rank, dtype=dtype, ) + model_config.num_hidden_layers = 2 if hasattr(model_config, "use_flash_attention"): model_config.use_flash_attention = model_args.use_flash_attention actor_model = AutoModelForCausalLM.from_pretrained( model_args.actor_model_name_or_path, config=model_config, ) + + config = copy.deepcopy(actor_model.config) + config.tensor_parallel_degree = -1 + config.tensor_parallel_rank = 0 + actor_eval_model = AutoModelForCausalLM.from_config(config) + # reference model actor_reference_model = AutoModelForCausalLM.from_pretrained( model_args.actor_model_name_or_path, @@ -253,6 +273,7 @@ def main(): tensor_parallel_rank=training_args.tensor_parallel_rank, dtype=dtype, ) + model_config.num_hidden_layers = 2 if hasattr(model_config, "use_flash_attention"): model_config.use_flash_attention = model_args.use_flash_attention reward_model = AutoModelForScore.from_pretrained( @@ -264,6 +285,7 @@ def main(): reward_tokenizer = AutoTokenizer.from_pretrained( model_args.reward_model_name_or_path, model_max_length=data_args.max_length, padding_side="right" ) + # critic model if model_args.reward_critic_model_name_or_path is None: model_args.reward_critic_model_name_or_path = model_args.reward_model_name_or_path @@ -273,6 +295,12 @@ def main(): reward_critic_tokenizer = AutoTokenizer.from_pretrained( model_args.reward_critic_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" ) + + config = copy.deepcopy(reward_critic_model.config) + config.tensor_parallel_degree = -1 + config.tensor_parallel_rank = 0 + reward_critic_eval_model = AutoModelForScore.from_config(config) + for tokenizer in [actor_tokenizer, reward_tokenizer, reward_critic_tokenizer]: if isinstance(tokenizer, LlamaTokenizer) and tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id @@ -292,7 +320,19 @@ def main(): ) trainer = PPOTrainer( - model=(actor_model, actor_reference_model, reward_model, reward_critic_model), + # (policy_model, reference_model, reward_model, value_model) + # policy_model, sft_model, reward_model, value_model + # (policy_model, reference_model, reward_model, value_model, + # (policy_model, reference_model, reward_model, value_model, policy_eval_model, value_eval_model + # (actor_model, actor_reference_model, reward_model, reward_critic_model, actor_eval_model, reward_critic_eval_model + model=( + actor_model, + actor_reference_model, + reward_model, + reward_critic_model, + actor_eval_model, + reward_critic_eval_model, + ), args=training_args, train_dataset=train_ds, eval_dataset=dev_ds, diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 32a9c148f193..3185d3f29150 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -22,10 +22,12 @@ import numpy as np import paddle +import paddle.distributed as dist import paddle.nn as nn import paddle.nn.functional as F import tqdm from data import DummyDataset, PromptOnlyBatch +from paddle.distributed import fleet from paddle.io import DataLoader, Dataset, DistributedBatchSampler from paddle.utils import map_structure from rich.console import Console @@ -33,6 +35,7 @@ from paddlenlp.data import DataCollator from paddlenlp.generation import GenerationConfig +from paddlenlp.trainer.plugins.unified_checkpoint import flatten_list from paddlenlp.trainer.trainer import ( TRAINER_STATE_NAME, EvalLoopOutput, @@ -59,6 +62,7 @@ PaddingStrategy, TruncationStrategy, ) +from paddlenlp.utils.distributed import distributed_gather def batch_retokenize( @@ -470,11 +474,165 @@ def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs return kwargs +def offload_model(self: Trainer, **kwargs): + for k, v in self.model.state_dict(): + v = paddle.copy_(v, paddle.CPUPlace()) + + +def reload_model(self: Trainer, **kwargs): + for k, v in self.model.state_dict(): + v = paddle.copy_(v, paddle.GPUPlace()) + + +def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): + with_offload = kwargs.pop("with_offload", False) + train_tp_size = max(train_model.config.tensor_parallel_degree, 1) + eval_tp_size = max(eval_model.config.tensor_parallel_degree, 1) + + hcg = fleet.get_hybrid_communicate_group() + tp_group = hcg.get_model_parallel_group() + pp_group = hcg.get_pipe_parallel_group() + sd_group = hcg.get_sharding_parallel_group() + dp_group = hcg.get_data_parallel_group() + + global_rank = paddle.distributed.get_rank() + + train_state_dict = train_model.state_dict() + eval_state_dict = eval_model.state_dict() + + print(sd_group) + + if dp_group.rank <= 0 and sd_group.rank <= 0: + train_pp_size = pp_group.nranks + if eval_tp_size > 1 and train_tp_size != eval_tp_size: + raise ValueError("Only support for the same tensor_parallel_degree for train and eval model for now.") + + # 单卡情况 + # tp->single + # tp+pp -> single + if eval_tp_size == 1: + if train_pp_size == 1 and train_tp_size > 1: + # tp ->single + logger.error("using tp to single eval model.") + # state = train_model.merge_tensor_parallel() + tp_actions = train_model.get_tensor_parallel_convert_actions( + train_model.config, + loaded_state_dict_keys=eval_state_dict.keys(), + is_split=False, + ignore_error=False, + ) + + # print(tp_actions.keys()) + + is_dst = global_rank == 0 + for key in eval_state_dict.keys(): + # print(f"get key {key}") + tensor = train_state_dict[key] + if key in tp_actions: + ret = distributed_gather(tensor, dst=0, group=tp_group, offload=False) + action = tp_actions.pop(key) + tensor = action(ret) if is_dst else None + # print(f"merge {key}") + else: + tensor = tensor._copy_to(paddle.CPUPlace(), False) if is_dst else None + + if tensor is not None: + # print(tensor.shape) + eval_state_dict[key].set_value(tensor) + + if with_offload: + train_state_dict[key].copy_(train_state_dict[key]._copy_to(paddle.CPUPlace(), False)) + else: + # single to single + # tp+pp -> single + raise ValueError("Not support yet.") + + def create_send_recv_table(all_keys, train_keys, eval_keys): + recv_table = [] + send_table = [] + if pp_group.rank == 0: + for key in eval_keys: + recv_table.append((key, global_rank)) + + for key in train_keys: + recv_table.append((key, global_rank)) + + all_recv, all_send = [], [] + paddle.distributed.all_gather_object(all_recv, [recv_table], group=pp_group) + paddle.distributed.all_gather_object(all_send, [send_table], group=pp_group) + all_recv = flatten_list(all_recv) + all_send = flatten_list(all_send) + + send_dict = {} + for k, v in all_send: + send_dict[k] = v + table = [] + for k, v in all_recv: + # key, send, recv + table.append([k, send_dict.pop(k), v]) + assert len(send_dict) == 0, f"Some key can't be recv {send_dict.keys()}" + return table + + # pp0tp0 -> pp0tp0 + # pp0tp1 -> pp0tp1 + # pp1tp0 -> pp0tp0 + # pp1tp1 -> pp0tp1 + + # tp情况 + # tp+pp->tp + if eval_tp_size > 1 and train_pp_size > 1: + table = create_send_recv_table(train_state_dict.keys(), eval_state_dict.keys()) + + for key, src_rank, dst_rank in table: + # Init tensor for model is cleaned + if global_rank == dst_rank and not eval_state_dict[key]._is_initialized(): + eval_state_dict[key] = paddle.random() + + if global_rank == src_rank: + dist.stream.send(train_state_dict[key], dst=dst_rank) + + if global_rank == dst_rank: + dist.stream.recv(eval_state_dict[key], dst=dst_rank) + + # Offload train model if need + if global_rank == src_rank and with_offload: + train_state_dict[key].copy_(train_state_dict[key]._copy_to(paddle.CPUPlace(), blocking=False)) + else: + # 其他 DP rank 的state dict, 适配 offload 和初始化 + if with_offload: + for key in list(train_state_dict.keys()): + train_state_dict[key].copy_(train_state_dict[key]._copy_to(paddle.CPUPlace(), blocking=False)) + for k, v in eval_state_dict.items(): + if not v._is_initialized(): + v.set_value(paddle.random()) + + paddle.distributed.barrier() + if eval_tp_size == 1: + for _, tensor in eval_state_dict.items(): + paddle.distributed.broadcast(tensor, src=0, group=None, sync_op=True) + else: + if sd_group.nranks > 1: + paddle.distributed.parallel.sync_params_buffers(eval_model, comm_group=sd_group, fuse_params=False) + if dp_group.nranks > 1: + print(dp_group) + paddle.distributed.parallel.sync_params_buffers( + eval_model, comm_group=dp_group, src_rank=dp_group.rank, fuse_params=False + ) + + +def cleanup_model(sefl: Trainer, **kwargs): + pass + + Trainer.init_train_model_opt = init_train_model_opt Trainer.init_train_log = init_train_log Trainer.init_train_state = init_train_state Trainer.full_training_step = full_training_step +Trainer.offload = offload_model +Trainer.reload = reload_model +Trainer.export_evaluate_model = export_evaluate_model + class PolicyTrainer(Trainer): def __init__( @@ -747,7 +905,10 @@ def __init__( self.ptx_dataset = ptx_dataset self.eval_dataset = eval_dataset - (policy_model, reference_model, reward_model, value_model) = model + (policy_model, reference_model, reward_model, value_model, policy_model_eval, value_model_eval) = model + self._policy_model_eval = policy_model_eval + self._value_model_eval = value_model_eval + # policy_tokenizer and value_tokenizer should be same (policy_tokenizer, reference_tokenizer, reward_tokenizer, value_tokenizer) = tokenizer @@ -837,7 +998,7 @@ def __init__( self.reward_tokenizer = self.tokenizer self.generation_config = GenerationConfig( - max_length=self.args.max_length, + max_new_tokens=self.args.max_length, num_return_sequences=self.args.num_return_sequences, temperature=self.args.temperature, top_p=self.args.top_p, @@ -1088,6 +1249,21 @@ def gen_epoch_data(): ): # generate batches self.set_eval() + + policy_model_train = self.policy_trainer.model + self.policy_trainer.export_evaluate_model( + policy_model_train, self._policy_model_eval, with_offload=True + ) + # todo: zhui + # self.optimizer.offload() + self.value_trainer.export_evaluate_model( + self.value_trainer.model, self._value_model_eval, with_offload=True + ) + + # self.reference_model.reload() + # self.reward_model.reload() + + # 生成数据 rl_batches = self.split_rl_micro_batches(prompt_only_batch) if self.use_ptx: ptx_batches = self.split_ptx_micro_batches(ptx_batch) @@ -1095,6 +1271,12 @@ def gen_epoch_data(): ptx_batches = [None for _ in range(len(rl_batches))] paddle.device.cuda.empty_cache() + # # 数据造好, 开始训练 + # self.reference_model.offload() + # self.reward_model.offload() + # policy_model_eval.cleanup() + # value_model_eval.cleanup() + self.set_train() for _ in range(self.args.update_iters): for rl_batch, ptx_batch in zip(rl_batches, ptx_batches): @@ -1275,7 +1457,12 @@ def train( train_step_kwargs.update({"epoch": epoch, "step": step}) rl_batch, ptx_batch = inputs # TODO(guosheng): make rl_step/ptx_step run with autocast_smart_context_manager + + # policy_model.reload() + # value_model.reload() + rl_info, train_step_kwargs = self.rl_step(rl_batch, **train_step_kwargs) + paddle.device.cuda.empty_cache() if self.use_ptx: ptx_info, train_step_kwargs = self.ptx_step(ptx_batch, **train_step_kwargs) @@ -1511,6 +1698,7 @@ def split_rl_micro_batches( prompt_only_batch, ) micro_batches.extend(self.rollout(micro_batch)) + return micro_batches @paddle.no_grad() @@ -1519,7 +1707,14 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: input_ids = prompt_only_batch["input_ids"] # NOTE: generation output of paddlenlp do not contain prompt, we should # change sequences here. - sequences = self.actor_model.generate( + + # todo, fixme zhui, self.actor_model.generate + if self._policy_model_eval is not None: + actor_model_in_use = self._policy_model_eval + else: + actor_model_in_use = self.actor_model + + sequences = actor_model_in_use.generate( input_ids=input_ids, attention_mask=prompt_only_batch["attention_mask"], generation_config=self.generation_config, @@ -1559,15 +1754,29 @@ def post_rollout( reward_seq = sequence reward_attention_mask = attention_mask - logits = self.actor_model( + # todo, fixme zhui, self.actor_model forward + if self._policy_model_eval is not None: + actor_model_in_use = self._policy_model_eval + else: + actor_model_in_use = self.actor_model + + if self._value_model_eval is not None: + reward_critic_model_in_use = self._value_model_eval + else: + reward_critic_model_in_use = self.reward_critic_model + + logits = actor_model_in_use( sequence, attention_mask=attention_mask, return_dict=True, ).logits + # todo, fixme zhui, self.reference_model forward ref_logits = self.reference_model(sequence, attention_mask=attention_mask, return_dict=True).logits + # todo, fixme zhui, self.reward_model forward reward_score = self.reward_model(reward_seq, attention_mask=reward_attention_mask, return_dict=True).end_scores - reward_value = self.reward_critic_model( + # todo, fixme zhui, self.reward_critic_model forward + reward_value = reward_critic_model_in_use( sequence, attention_mask=attention_mask, return_dict=True, diff --git a/examples/RLHF/reward_main.py b/examples/RLHF/reward_main.py index f52415452b2e..6bcd4485ccfa 100644 --- a/examples/RLHF/reward_main.py +++ b/examples/RLHF/reward_main.py @@ -32,6 +32,14 @@ from paddlenlp.transformers import AutoConfig, AutoTokenizer, LlamaTokenizer from paddlenlp.utils.log import logger +# launch would unset http_proxy +# export https_proxy=http://172.19.57.45:3128 +# os.environ["http_proxy"] = "http://172.19.57.45:3128" +# os.environ["https_proxy"] = "http://172.19.57.45:3128" +os.environ["http_proxy"] = "http://10.162.37.16:8128" +os.environ["https_proxy"] = "http://10.162.37.16:8128" + + @dataclass class TrainingArguments(TrainingArguments): loss_type: Literal["token-wise", "sequence-wise"] = field( diff --git a/paddlenlp/transformers/model_utils.py b/paddlenlp/transformers/model_utils.py index 72e46e08b202..c07216e5a702 100644 --- a/paddlenlp/transformers/model_utils.py +++ b/paddlenlp/transformers/model_utils.py @@ -2256,19 +2256,20 @@ def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs): if "quant_weight" in key: quantization_linear_list.append(key[:-13]) - model, missing_keys, unexpected_keys, mismatched_keys = cls._load_pretrained_model( - model=model, - state_dict=state_dict, - loaded_keys=loaded_state_dict_keys, - resolved_archive_file=resolved_sharded_files if is_sharded else resolved_archive_file, - pretrained_model_name_or_path=pretrained_model_name_or_path, - config=config, - ignore_mismatched_sizes=ignore_mismatched_sizes, - low_cpu_mem_usage=low_cpu_mem_usage, - dtype=dtype, - keep_in_fp32_modules=keep_in_fp32_modules, - quantization_linear_list=quantization_linear_list, - ) + model, missing_keys, unexpected_keys, mismatched_keys = model, [], [], [] + # model, missing_keys, unexpected_keys, mismatched_keys = cls._load_pretrained_model( + # model=model, + # state_dict=state_dict, + # loaded_keys=loaded_state_dict_keys, + # resolved_archive_file=resolved_sharded_files if is_sharded else resolved_archive_file, + # pretrained_model_name_or_path=pretrained_model_name_or_path, + # config=config, + # ignore_mismatched_sizes=ignore_mismatched_sizes, + # low_cpu_mem_usage=low_cpu_mem_usage, + # dtype=dtype, + # keep_in_fp32_modules=keep_in_fp32_modules, + # quantization_linear_list=quantization_linear_list, + # ) # load generation_config.json if model.can_generate() and pretrained_model_name_or_path is not None: From d295d1123d14211519bb74df7118cc4752d43dfe Mon Sep 17 00:00:00 2001 From: whucsgs Date: Thu, 22 Feb 2024 08:11:26 +0000 Subject: [PATCH 10/46] Make non-PipelineParallel models use the same loss layer with PipeModel to unify. --- examples/RLHF/models/ppo_model_utils.py | 46 +++++++++++---- examples/RLHF/ppo_trainer.py | 74 +++++++++++++++++++++---- 2 files changed, 98 insertions(+), 22 deletions(-) diff --git a/examples/RLHF/models/ppo_model_utils.py b/examples/RLHF/models/ppo_model_utils.py index aeab7371bc24..601d63d560ca 100644 --- a/examples/RLHF/models/ppo_model_utils.py +++ b/examples/RLHF/models/ppo_model_utils.py @@ -16,6 +16,7 @@ from __future__ import annotations +import inspect from dataclasses import dataclass from typing import Optional, Tuple @@ -77,6 +78,8 @@ def actor_loss_fn( return paddle.sum(paddle.maximum(pg_loss1, pg_loss2) * mask) / mask.sum() def forward(self, logits, input_ids, old_log_probs, reward_advantages, sequence_mask, start=None): + # tgt_mask or sequence_mask according to length + # When used in pipe mode, batches among accumulation steps should be paded. # Hard to pad acorss batches, think in some cases one batch might have the # longest prompt+target length but the shortest target lengh, which might @@ -102,6 +105,27 @@ def forward(self, logits, input_ids, old_log_probs, reward_advantages, sequence_ return actor_loss +def merge_fwd_labels(loss_cls): + """ + PipelineParallel and trainer.criterion both use labels as tuple, thus wrap. + """ + ori_fwd = loss_cls.forward + + def loss_fwd(self, predict, labels): + return ori_fwd(self, predict, *labels) + + fwd_params = inspect.signature(ori_fwd).parameters + # forward(self, predict, label1, label2, ...) + loss_cls.label_names = list(fwd_params.keys())[2:] + loss_cls.label_default_values = {} + for label_name in loss_cls.label_names: + if fwd_params[label_name].default is not inspect.Parameter.empty: + loss_cls.label_default_values[label_name] = fwd_params[label_name].default + loss_cls.forward = loss_fwd + return loss_cls + + +@merge_fwd_labels class RLHFPPOMixedLoss(nn.Layer): """provide two losses, one for PPO loss, the other for SFT loss.""" @@ -111,9 +135,11 @@ def __init__(self, config, **kwargs): self.ppo_criterion = RLHFPPOLoss(config, **kwargs) self.sft_criterion = PretrainingCriterion(config) - def forward(self, logits, label_info): - labels, input_ids, old_log_probs, reward_advantages, sequence_mask = label_info + def forward(self, logits, labels, input_ids, old_log_probs, reward_advantages, sequence_mask): + # def forward(self, logits, label_info): + # labels, input_ids, old_log_probs, reward_advantages, sequence_mask = label_info + logits = logits if isinstance(logits, paddle.Tensor) else logits[0] loss = None # sft, pt loss if labels is not None: @@ -125,6 +151,7 @@ def forward(self, logits, label_info): return loss +@merge_fwd_labels class RLHFValueLoss(nn.Layer): def __init__(self, config, **kwargs): super().__init__() @@ -151,15 +178,14 @@ def critic_loss_fn( def forward( self, reward_values, - # old_reward_values, - # reward_returns, - # sequence_mask, - # start=None, - label_info, + old_reward_values, + reward_returns, + sequence_mask, + start=None, + # label_info, ): - if not isinstance(reward_values, paddle.Tensor): - reward_values = reward_values[0] - old_reward_values, reward_returns, sequence_mask = label_info + # old_reward_values, reward_returns, sequence_mask = label_info + reward_values = reward_values if isinstance(reward_values, paddle.Tensor) else reward_values[0] # if start is not None: # old_reward_values = old_reward_values[:, start:] # sequence_mask = sequence_mask[:, start:] diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 07dc1e0af11c..6594b751abba 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -27,6 +27,7 @@ import paddle.nn.functional as F import tqdm from data import DummyDataset, PromptOnlyBatch +from models.ppo_model_utils import RLHFPPOMixedLoss, RLHFValueLoss from paddle.distributed import fleet from paddle.io import DataLoader, Dataset, DistributedBatchSampler from paddle.utils import map_structure @@ -496,7 +497,8 @@ def __init__( optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, ): - + # only used for non-PipelineParallel models + criterion = RLHFPPOMixedLoss(model.config, ptx_coeff=args.ptx_coeff) super().__init__( model, criterion, @@ -511,6 +513,27 @@ def __init__( preprocess_logits_for_metrics, ) + def _prepare_inputs(self, inputs: Dict[str, Union[paddle.Tensor, Any]]) -> Dict[str, Union[paddle.Tensor, Any]]: + inputs = super()._prepare_input(inputs) + label_names = self.criterion.__class__.label_names + # some data fields are used both in model and loss + shared_fields = set(["input_ids", "attention_mask"]) + labels = [] + for name in label_names: + if name not in inputs: + label = self.criterion.__class__.label_default_values.get(name, None) + elif name in shared_fields: + label = inputs[name] + else: + label = inputs.pop(name) + labels.append(label) + # "labels" is the pre-defined label name in Trainer + inputs["labels"] = labels + # NOTE: TensorParallel model requires non-Tensor inputs to be lists and + # broadcast them, thus do not or optionally use these inputs. labels use + # in criterion not send to model can workaround this. + return inputs + def actor_loss_fn( self, log_probs: paddle.Tensor, @@ -528,7 +551,7 @@ def actor_loss_fn( ) return paddle.sum(paddle.maximum(pg_loss1, pg_loss2) * mask) / mask.sum() - def compute_loss(self, model, inputs, return_outputs=False): + def _compute_loss(self, model, inputs, return_outputs=False): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for custom behavior. @@ -603,7 +626,7 @@ def __init__( optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, ): - + criterion = RLHFValueLoss(model.config, clip_range_value=args.clip_range_value) super().__init__( model, criterion, @@ -618,6 +641,27 @@ def __init__( preprocess_logits_for_metrics, ) + def _prepare_inputs(self, inputs: Dict[str, Union[paddle.Tensor, Any]]) -> Dict[str, Union[paddle.Tensor, Any]]: + inputs = super()._prepare_input(inputs) + label_names = self.criterion.__class__.label_names + # some data fields are used both in model and loss + shared_fields = set(["input_ids", "attention_mask"]) + labels = [] + for name in label_names: + if name not in inputs: + label = self.criterion.__class__.label_default_values.get(name, None) + elif name in shared_fields: + label = inputs[name] + else: + label = inputs.pop(name) + labels.append(label) + # "labels" is the pre-defined label name in Trainer + inputs["labels"] = labels + # NOTE: TensorParallel model requires non-Tensor inputs to be lists and + # broadcast them, thus do not or optionally use these inputs. labels use + # in criterion not send to model can workaround this. + return inputs + def critic_loss_fn( self, values: paddle.Tensor, @@ -635,7 +679,7 @@ def critic_loss_fn( vf_loss2 = paddle.square(values_clipped - returns) return 0.5 * paddle.sum(paddle.maximum(vf_loss1, vf_loss2) * mask) / mask.sum() - def compute_loss(self, model, inputs, return_outputs=False): + def _compute_loss(self, model, inputs, return_outputs=False): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for custom behavior. @@ -1667,6 +1711,9 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any attention_mask = rl_batch["attention_mask"] # log_probs has shifted by one for predicted logits + # TODO(guosheng): When using flash_attn with casual mask and right padding + # inputs, responses of batch input cannot be got by sliced from start. And + # use sequences (as labels) with full length instead of target length. start = prompt.shape[-1] - 1 sequence_mask = attention_mask[:, 1:] @@ -1803,15 +1850,16 @@ def post_rollout( sequence: paddle.Tensor, attention_mask: paddle.Tensor, ) -> Dict[str, Any]: - if self.reward_tokenizer is not self.tokenizer: + if False: # self.reward_tokenizer is not self.tokenizer: + # right padding reward_tokenize_output = batch_retokenize( sequence, src_tokenizer=self.tokenizer, dest_tokenizer=self.reward_tokenizer, skip_special_tokens=True, ) - reward_seq = reward_tokenize_output["input_ids"] - reward_attention_mask = reward_tokenize_output["attention_mask"] + reward_seq = sequence = reward_tokenize_output["input_ids"] + reward_attention_mask = attention_mask = reward_tokenize_output["attention_mask"] else: # for text in self.tokenizer.batch_decode( # sequence, @@ -1854,17 +1902,19 @@ def post_rollout( ] # .scores # TODO(guosheng): move these to model methods such as get_logprobs reward_score = reward_score.squeeze(axis=-1) - reward_value = reward_value.squeeze(axis=-1)[:, :-1] + reward_value = reward_value.squeeze(axis=-1) + + reward_value = reward_value[:, :-1] log_probs = gather_log_probabilities(logits[:, :-1], sequence[:, 1:]) ref_log_probs = gather_log_probabilities(ref_logits[:, :-1], sequence[:, 1:]) return { "prompt": prompt, - "log_probs": log_probs, - "ref_log_probs": ref_log_probs, - "rewards": reward_score, - "reward_values": reward_value, "input_ids": sequence, "attention_mask": attention_mask, + "rewards": reward_score, + "reward_values": reward_value, + "log_probs": log_probs, + "ref_log_probs": ref_log_probs, } # @paddle.no_grad() From 38cc1a73b6c8dee50fa742c12642dcf7e9397da8 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Thu, 22 Feb 2024 16:44:10 +0800 Subject: [PATCH 11/46] add offload. --- examples/RLHF/ppo_main.py | 3 ++ examples/RLHF/ppo_trainer.py | 59 +++++++++++++++++++++++++++--------- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index f49d0dd2469d..30f6b791223d 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -319,6 +319,9 @@ def main(): else None ) + # offload + # cleanup actor_eval_model, reward_critic_eval_model + # offload actor_reference_model reward_model trainer = PPOTrainer( # (policy_model, reference_model, reward_model, value_model) # policy_model, sft_model, reward_model, value_model diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 3185d3f29150..e94f481bc083 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -64,6 +64,8 @@ ) from paddlenlp.utils.distributed import distributed_gather +global_dev_id = 0 if paddle.get_device() == "cpu" else int(paddle.get_device().split(":")[1]) + def batch_retokenize( input_ids: paddle.Tensor, @@ -474,14 +476,38 @@ def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs return kwargs -def offload_model(self: Trainer, **kwargs): - for k, v in self.model.state_dict(): - v = paddle.copy_(v, paddle.CPUPlace()) +def offload_tensor_to_cpu(tensor): + if isinstance(tensor, dict): + for _, v in tensor.items(): + offload_tensor_to_cpu(v) + elif isinstance(tensor, paddle.Tensor): + if not tensor.place.is_cpu_place(): + cpu_tensor = tensor._copy_to(paddle.CPUPlace(), True) + tensor.value().get_tensor()._share_data_with(cpu_tensor.value().get_tensor()) + else: + raise ValueError(f"Can't parse for type {type(tensor)}") -def reload_model(self: Trainer, **kwargs): - for k, v in self.model.state_dict(): - v = paddle.copy_(v, paddle.GPUPlace()) +def reload_tensor_to_gpu(tensor): + if isinstance(tensor, dict): + for _, v in tensor.items(): + reload_tensor_to_gpu(v) + elif isinstance(tensor, paddle.Tensor): + if not tensor.place.is_gpu_place(): + gpu_tensor = tensor._copy_to(paddle.CUDAPlace(global_dev_id), True) + tensor.value().get_tensor()._share_data_with(gpu_tensor.value().get_tensor()) + else: + raise ValueError(f"Can't parse for type {type(tensor)}") + + +def cleanup_tensor_space(tensor): + if isinstance(tensor, dict): + for _, v in tensor.items(): + cleanup_tensor_space(v) + elif isinstance(tensor, paddle.Tensor): + tensor._clear_data() + else: + raise ValueError(f"Can't parse for type {type(tensor)}") def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): @@ -541,7 +567,7 @@ def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): eval_state_dict[key].set_value(tensor) if with_offload: - train_state_dict[key].copy_(train_state_dict[key]._copy_to(paddle.CPUPlace(), False)) + offload_tensor_to_cpu(train_state_dict[key]) else: # single to single # tp+pp -> single @@ -596,12 +622,12 @@ def create_send_recv_table(all_keys, train_keys, eval_keys): # Offload train model if need if global_rank == src_rank and with_offload: - train_state_dict[key].copy_(train_state_dict[key]._copy_to(paddle.CPUPlace(), blocking=False)) + offload_tensor_to_cpu(train_state_dict[key]) else: # 其他 DP rank 的state dict, 适配 offload 和初始化 if with_offload: for key in list(train_state_dict.keys()): - train_state_dict[key].copy_(train_state_dict[key]._copy_to(paddle.CPUPlace(), blocking=False)) + offload_tensor_to_cpu(train_state_dict[key]) for k, v in eval_state_dict.items(): if not v._is_initialized(): v.set_value(paddle.random()) @@ -620,17 +646,11 @@ def create_send_recv_table(all_keys, train_keys, eval_keys): ) -def cleanup_model(sefl: Trainer, **kwargs): - pass - - Trainer.init_train_model_opt = init_train_model_opt Trainer.init_train_log = init_train_log Trainer.init_train_state = init_train_state Trainer.full_training_step = full_training_step -Trainer.offload = offload_model -Trainer.reload = reload_model Trainer.export_evaluate_model = export_evaluate_model @@ -883,6 +903,7 @@ def __init__( optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, ): + with guard_set_args(args, {"recompute": False, "fp16_opt_level": "O1"}): # just used to create trival attrs might be used in the training # process of trainer, while changing some args to avoid model usage @@ -1262,6 +1283,8 @@ def gen_epoch_data(): # self.reference_model.reload() # self.reward_model.reload() + # reload_tensor_to_gpu(self.reference_model.state_dict()) + # reload_tensor_to_gpu(self.reward_model.state_dict()) # 生成数据 rl_batches = self.split_rl_micro_batches(prompt_only_batch) @@ -1276,6 +1299,10 @@ def gen_epoch_data(): # self.reward_model.offload() # policy_model_eval.cleanup() # value_model_eval.cleanup() + # offload_tensor_to_cpu(self.reference_model.state_dict()) + # offload_tensor_to_cpu(self.reward_model.state_dict()) + # cleanup_tensor_space(self._policy_model_eval.state_dict()) + # cleanup_tensor_space(self._value_model_eval.state_dict()) self.set_train() for _ in range(self.args.update_iters): @@ -1460,6 +1487,8 @@ def train( # policy_model.reload() # value_model.reload() + reload_tensor_to_gpu(self.actor_model.state_dict()) + reload_tensor_to_gpu(self.reward_critic_model.state_dict()) rl_info, train_step_kwargs = self.rl_step(rl_batch, **train_step_kwargs) From 6ff38c8219eaaec82b2617d738fd663c9faf6485 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Thu, 22 Feb 2024 11:07:46 +0000 Subject: [PATCH 12/46] Use create_loss to unify Pipe and non-Pipe usage. --- examples/RLHF/models/model_pp.py | 14 +-- examples/RLHF/models/ppo_model_utils.py | 65 +++++++------ examples/RLHF/ppo_trainer.py | 124 +----------------------- 3 files changed, 46 insertions(+), 157 deletions(-) diff --git a/examples/RLHF/models/model_pp.py b/examples/RLHF/models/model_pp.py index bd8dbc7c19d9..82a5d9020ee0 100644 --- a/examples/RLHF/models/model_pp.py +++ b/examples/RLHF/models/model_pp.py @@ -13,6 +13,7 @@ # limitations under the License. import importlib +import inspect import types import paddle @@ -27,7 +28,7 @@ return_args, ) -from .ppo_model_utils import RLHFPPOMixedLoss, RLHFValueLoss +from .ppo_model_utils import RLHFPPOMixedLoss, RLHFValueLoss, create_loss from .score_model_utils import ScoreModelMixin @@ -213,7 +214,6 @@ def _impl(self, *args, **kwargs): except TypeError: # otherwise, inputs is any valid format of non_pipe_model forward args, # convert to dict, to support more args format in prediction_pipeline_step - import inspect arg_dict = ( inspect.signature(self._non_pipe_model_class.forward).bind(*((self,) + args), **kwargs).arguments @@ -311,10 +311,8 @@ def __init__(self, config, **kwargs): super().__init__(config) self._ignore_index = self._loss_fn.sft_criterion.ignore_index - # @loss_fwd_wrapper def get_loss_fn(self, config): - init_kwargs = self._init_kwargs - return RLHFPPOMixedLoss(config, **init_kwargs) + return create_loss(RLHFPPOMixedLoss, config, self._init_kwargs) @property def head_out_meta(self): @@ -416,12 +414,8 @@ def add_head(self, config): self.add_sequential_layer(LayerDesc(_LlamaRMSNormPipe, config=config), norm_prefix) self.add_sequential_layer(LayerDesc(ValueHead, config, **init_kwargs), "") - # @loss_fwd_wrapper def get_loss_fn(self, config): - init_kwargs = self._init_kwargs - # TODO(guosheng): make wraper for loss to make original loss adapt to - # pipeline only one args - return RLHFValueLoss(config, **init_kwargs) + return create_loss(RLHFValueLoss, config, self._init_kwargs) @property def head_out_meta(self): diff --git a/examples/RLHF/models/ppo_model_utils.py b/examples/RLHF/models/ppo_model_utils.py index 601d63d560ca..cb605ca29a62 100644 --- a/examples/RLHF/models/ppo_model_utils.py +++ b/examples/RLHF/models/ppo_model_utils.py @@ -51,6 +51,37 @@ class ValueOutput(ModelOutput): cross_attentions: Optional[Tuple[paddle.Tensor]] = None +def merge_fwd_labels(loss_cls): + """ + PipelineParallel and trainer.criterion both use labels as tuple, thus wrap. + """ + ori_fwd = loss_cls.forward + + def loss_fwd(self, predict, labels): + return ori_fwd(self, predict, *labels) + + fwd_params = inspect.signature(ori_fwd).parameters + # forward(self, predict, label1, label2, ...) + loss_cls.label_names = list(fwd_params.keys())[2:] + loss_cls.label_default_values = {} + for label_name in loss_cls.label_names: + if fwd_params[label_name].default is not inspect.Parameter.empty: + loss_cls.label_default_values[label_name] = fwd_params[label_name].default + loss_cls.forward = loss_fwd + return loss_cls + + +def create_loss(loss_cls, config, extra_args): + # forward(self, predict, label1, label2, ...) + loss_arg_names = list(inspect.signature(loss_cls.__init__).parameters.keys())[2:] + if isinstance(extra_args, dict): + loss_kwargs = dict([(name, extra_args[name]) for name in loss_arg_names if name in extra_args]) + else: + # create from TrainingArguments + loss_kwargs = dict([(name, getattr(extra_args, name)) for name in loss_arg_names if hasattr(extra_args, name)]) + return loss_cls(config, **loss_kwargs) + + def gather_log_probabilities(logits: paddle.Tensor, labels: paddle.Tensor) -> paddle.Tensor: """Gather log probabilities of the given labels from the logits.""" log_probs = F.log_softmax(logits, axis=-1) @@ -59,9 +90,9 @@ def gather_log_probabilities(logits: paddle.Tensor, labels: paddle.Tensor) -> pa class RLHFPPOLoss(nn.Layer): - def __init__(self, config, **kwargs): + def __init__(self, config, clip_range_ratio=0.2): super().__init__() - self.clip_range_ratio = kwargs.pop("clip_range_ratio", getattr(config, "clip_range_ratio", 0.2)) + self.clip_range_ratio = clip_range_ratio self.config = config def actor_loss_fn( @@ -105,34 +136,14 @@ def forward(self, logits, input_ids, old_log_probs, reward_advantages, sequence_ return actor_loss -def merge_fwd_labels(loss_cls): - """ - PipelineParallel and trainer.criterion both use labels as tuple, thus wrap. - """ - ori_fwd = loss_cls.forward - - def loss_fwd(self, predict, labels): - return ori_fwd(self, predict, *labels) - - fwd_params = inspect.signature(ori_fwd).parameters - # forward(self, predict, label1, label2, ...) - loss_cls.label_names = list(fwd_params.keys())[2:] - loss_cls.label_default_values = {} - for label_name in loss_cls.label_names: - if fwd_params[label_name].default is not inspect.Parameter.empty: - loss_cls.label_default_values[label_name] = fwd_params[label_name].default - loss_cls.forward = loss_fwd - return loss_cls - - @merge_fwd_labels class RLHFPPOMixedLoss(nn.Layer): """provide two losses, one for PPO loss, the other for SFT loss.""" - def __init__(self, config, **kwargs): + def __init__(self, config, ptx_coeff=16, clip_range_ratio=0.2): super(RLHFPPOMixedLoss, self).__init__() - self.ptx_coeff = kwargs.pop("ptx_coeff", getattr(config, "ptx_coeff", 16.0)) - self.ppo_criterion = RLHFPPOLoss(config, **kwargs) + self.ptx_coeff = ptx_coeff + self.ppo_criterion = RLHFPPOLoss(config, clip_range_ratio) self.sft_criterion = PretrainingCriterion(config) def forward(self, logits, labels, input_ids, old_log_probs, reward_advantages, sequence_mask): @@ -153,9 +164,9 @@ def forward(self, logits, labels, input_ids, old_log_probs, reward_advantages, s @merge_fwd_labels class RLHFValueLoss(nn.Layer): - def __init__(self, config, **kwargs): + def __init__(self, config, clip_range_value=5.0): super().__init__() - self.clip_range_value = kwargs.pop("clip_range_value", getattr(config, "clip_range_value", 5.0)) + self.clip_range_value = clip_range_value self.config = config def critic_loss_fn( diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 6594b751abba..7570c0ab3f9d 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -27,7 +27,7 @@ import paddle.nn.functional as F import tqdm from data import DummyDataset, PromptOnlyBatch -from models.ppo_model_utils import RLHFPPOMixedLoss, RLHFValueLoss +from models.ppo_model_utils import RLHFPPOMixedLoss, RLHFValueLoss, create_loss from paddle.distributed import fleet from paddle.io import DataLoader, Dataset, DistributedBatchSampler from paddle.utils import map_structure @@ -498,7 +498,7 @@ def __init__( preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, ): # only used for non-PipelineParallel models - criterion = RLHFPPOMixedLoss(model.config, ptx_coeff=args.ptx_coeff) + criterion = create_loss(RLHFPPOMixedLoss, model.config, args) super().__init__( model, criterion, @@ -534,67 +534,6 @@ def _prepare_inputs(self, inputs: Dict[str, Union[paddle.Tensor, Any]]) -> Dict[ # in criterion not send to model can workaround this. return inputs - def actor_loss_fn( - self, - log_probs: paddle.Tensor, - old_log_probs: paddle.Tensor, - advantages: paddle.Tensor, - mask: paddle.Tensor, - ) -> paddle.Tensor: - # policy gradient loss - ratio = paddle.exp(log_probs - old_log_probs) - pg_loss1 = -advantages * ratio - pg_loss2 = -advantages * paddle.clip( - ratio, - 1.0 - self.clip_range_ratio, - 1.0 + self.clip_range_ratio, - ) - return paddle.sum(paddle.maximum(pg_loss1, pg_loss2) * mask) / mask.sum() - - def _compute_loss(self, model, inputs, return_outputs=False): - """ - How the loss is computed by Trainer. By default, all models return the loss in the first element. - Subclass and override for custom behavior. - """ - labels = inputs.get("labels", None) - if labels is not None: - labels = inputs.get("labels", None) - outputs = model(**inputs) - ptx_loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] - ptx_loss = self.ptx_coeff * ptx_loss - return ptx_loss - - input_ids = inputs["input_ids"] - attention_mask = inputs["attention_mask"] - reward_advantages = inputs["reward_advantages"] - # NOTE: TensorParallel model requires non-Tensor inputs to be lists and - # broadcast them, thus do not or optionally use these inputs currently. - # use_cache = inputs["use_cache"] - # return_dict = inputs["return_dict"] - start = inputs.pop("start", None) - old_log_probs = inputs["old_log_probs"][:, start:] if start is not None else inputs["old_log_probs"] - sequence_mask = inputs["sequence_mask"][:, start:] if start is not None else inputs["sequence_mask"] - outputs = model( - input_ids=input_ids, - attention_mask=attention_mask, # use_cache=use_cache, return_dict=return_dict - ) - - logits = outputs["logits"] if isinstance(outputs, dict) else outputs - if isinstance(outputs, dict): - logits = outputs["logits"] - elif isinstance(outputs, tuple): - logits = outputs[0] - - log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:])[:, -old_log_probs.shape[1] :] - actor_loss = self.actor_loss_fn( - log_probs, - old_log_probs, - reward_advantages, - sequence_mask, - ) - - return actor_loss - def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): labels = inputs.get("labels", None) if labels is not None: # use ptx @@ -626,7 +565,8 @@ def __init__( optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, ): - criterion = RLHFValueLoss(model.config, clip_range_value=args.clip_range_value) + # only used for non-PipelineParallel models + criterion = create_loss(RLHFValueLoss, model.config, args) super().__init__( model, criterion, @@ -662,62 +602,6 @@ def _prepare_inputs(self, inputs: Dict[str, Union[paddle.Tensor, Any]]) -> Dict[ # in criterion not send to model can workaround this. return inputs - def critic_loss_fn( - self, - values: paddle.Tensor, - old_values: paddle.Tensor, - returns: paddle.Tensor, - mask: paddle.Tensor, - ) -> paddle.Tensor: - """Compute critic loss.""" - # TODO(guosheng): use paddle.clip when its min/max can support more than - # 0D Tensor - values_clipped = paddle.minimum( - paddle.maximum(values, old_values - self.clip_range_value), old_values + self.clip_range_value - ) - vf_loss1 = paddle.square(values - returns) - vf_loss2 = paddle.square(values_clipped - returns) - return 0.5 * paddle.sum(paddle.maximum(vf_loss1, vf_loss2) * mask) / mask.sum() - - def _compute_loss(self, model, inputs, return_outputs=False): - """ - How the loss is computed by Trainer. By default, all models return the loss in the first element. - Subclass and override for custom behavior. - """ - input_ids = inputs["input_ids"] - attention_mask = inputs["attention_mask"] - reward_returns = inputs["reward_returns"] - # NOTE: TensorParallel model requires non-Tensor inputs to be lists and - # broadcast them, thus do not or optionally use these inputs currently. - # use_cache = inputs["use_cache"] - # return_dict = inputs["return_dict"] - start = inputs.pop("start", None) - old_reward_values = ( - inputs["old_reward_values"][:, start:] if start is not None else inputs["old_reward_values"] - ) - sequence_mask = inputs["sequence_mask"][:, start:] if start is not None else inputs["sequence_mask"] - outputs = model( - input_ids=input_ids, - attention_mask=attention_mask, # use_cache=use_cache, return_dict=return_dict - ) - - # We don't use .loss here since the model may return tuples instead of ModelOutput. - reward_values = outputs["scores"] if isinstance(outputs, dict) else outputs - if isinstance(outputs, dict): - reward_values = outputs["scores"] - elif isinstance(outputs, tuple): - reward_values = outputs[0] - - reward_values = reward_values.squeeze(axis=-1)[:, :-1] - reward_critic_loss = self.critic_loss_fn( - reward_values[:, -old_reward_values.shape[1] :], - old_reward_values, - reward_returns, - sequence_mask, - ) - - return reward_critic_loss - def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): # TODO(guosheng): Make these training control vars mapping as class attr, # then PPOTrainer can extract and reuse them to avoid hard code. From 6e49431fc77fba62fc7cc3a76a44aedbeae6ceff Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Fri, 23 Feb 2024 14:20:23 +0800 Subject: [PATCH 13/46] Add eval mode and offload level. --- examples/RLHF/ppo_config.json | 4 ++- examples/RLHF/ppo_main.py | 49 ++++++++++++++++++++++++++++------- examples/RLHF/ppo_trainer.py | 33 ++++++++++++++++------- 3 files changed, 67 insertions(+), 19 deletions(-) diff --git a/examples/RLHF/ppo_config.json b/examples/RLHF/ppo_config.json index df3dc8db17a6..9946dfcb91d4 100644 --- a/examples/RLHF/ppo_config.json +++ b/examples/RLHF/ppo_config.json @@ -48,5 +48,7 @@ "comment-PKU_Beaver-max_grad_norm": 1.0, "max_grad_norm": 1.0, "adam_beta1": 0.9, - "adam_beta2": 0.95 + "adam_beta2": 0.95, + "eval_mode": "single", + "offload_level": "eval reward" } diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index 30f6b791223d..d931c70ed43c 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -22,7 +22,7 @@ from data import PromptOnlyDataset, SupervisedDataset, parse_dataset from models import AutoModelForScore from models.score_model import LlamaModelForScore # noqa -from ppo_trainer import PPOTrainer +from ppo_trainer import PPOTrainer, cleanup_tensor_space, offload_tensor_to_cpu from paddlenlp.trainer import PdArgumentParser, TrainingArguments, get_last_checkpoint from paddlenlp.transformers import ( @@ -126,6 +126,18 @@ class TrainingArguments(TrainingArguments): default=16, metadata={"help": "Batch size (per device) for the training dataloader."}, ) + eval_mode: str = field( + default=None, + metadata={ + "help": "eval mode for actor model and reward_critic_model, optional for: None, single, tensor_parallel." + }, + ) + + offload_level: str = field( + default=None, + metadata={"help": "Offload model, optional for: eval, reward, eval reward, ."}, + ) + # save_generation_output: bool = field( # default=False, # metadata={"help": "Whether to save generated text to file when eval"}, @@ -197,6 +209,8 @@ def main(): model_args, data_args, training_args = parser.parse_args_into_dataclasses() training_args.print_config(model_args, "Model") training_args.print_config(data_args, "Data") + if training_args.eval_mode is None and training_args.offload_level is not None: + training_args.offload_level = training_args.offload_level.replace("eval", "") # Setup GPU & distributed training paddle.set_device(training_args.device) @@ -251,10 +265,14 @@ def main(): config=model_config, ) - config = copy.deepcopy(actor_model.config) - config.tensor_parallel_degree = -1 - config.tensor_parallel_rank = 0 - actor_eval_model = AutoModelForCausalLM.from_config(config) + if training_args.eval_mode is not None: + config = copy.deepcopy(actor_model.config) + if training_args.eval_mode == "single": + config.tensor_parallel_degree = -1 + config.tensor_parallel_rank = 0 + actor_eval_model = AutoModelForCausalLM.from_config(config) + else: + actor_eval_model = None # reference model actor_reference_model = AutoModelForCausalLM.from_pretrained( @@ -296,10 +314,14 @@ def main(): model_args.reward_critic_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" ) - config = copy.deepcopy(reward_critic_model.config) - config.tensor_parallel_degree = -1 - config.tensor_parallel_rank = 0 - reward_critic_eval_model = AutoModelForScore.from_config(config) + if training_args.eval_mode is not None: + config = copy.deepcopy(reward_critic_model.config) + if training_args.eval_mode == "single": + config.tensor_parallel_degree = -1 + config.tensor_parallel_rank = 0 + reward_critic_eval_model = AutoModelForScore.from_config(config) + else: + reward_critic_eval_model = None for tokenizer in [actor_tokenizer, reward_tokenizer, reward_critic_tokenizer]: if isinstance(tokenizer, LlamaTokenizer) and tokenizer.pad_token_id is None: @@ -322,6 +344,15 @@ def main(): # offload # cleanup actor_eval_model, reward_critic_eval_model # offload actor_reference_model reward_model + + if training_args.offload_level is not None: + if "eval" in training_args.offload_level: + cleanup_tensor_space(actor_eval_model.state_dict()) + cleanup_tensor_space(reward_critic_eval_model.state_dict()) + if "reward" in training_args.offload_level: + offload_tensor_to_cpu(actor_reference_model.state_dict()) + offload_tensor_to_cpu(reward_model.state_dict()) + trainer = PPOTrainer( # (policy_model, reference_model, reward_model, value_model) # policy_model, sft_model, reward_model, value_model diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index e94f481bc083..72211c9dfbbe 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -511,6 +511,9 @@ def cleanup_tensor_space(tensor): def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): + if eval_model is None: + return None + with_offload = kwargs.pop("with_offload", False) train_tp_size = max(train_model.config.tensor_parallel_degree, 1) eval_tp_size = max(eval_model.config.tensor_parallel_degree, 1) @@ -566,6 +569,11 @@ def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): # print(tensor.shape) eval_state_dict[key].set_value(tensor) + if not eval_state_dict[key]._is_initialized(): + v = eval_state_dict[key] + t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) + v.get_tensor()._share_data_with(t.get_tensor()) + if with_offload: offload_tensor_to_cpu(train_state_dict[key]) else: @@ -612,7 +620,9 @@ def create_send_recv_table(all_keys, train_keys, eval_keys): for key, src_rank, dst_rank in table: # Init tensor for model is cleaned if global_rank == dst_rank and not eval_state_dict[key]._is_initialized(): - eval_state_dict[key] = paddle.random() + v = eval_state_dict[key] + t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) + v.get_tensor()._share_data_with(t.get_tensor()) if global_rank == src_rank: dist.stream.send(train_state_dict[key], dst=dst_rank) @@ -630,7 +640,8 @@ def create_send_recv_table(all_keys, train_keys, eval_keys): offload_tensor_to_cpu(train_state_dict[key]) for k, v in eval_state_dict.items(): if not v._is_initialized(): - v.set_value(paddle.random()) + t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) + v.get_tensor()._share_data_with(t.get_tensor()) paddle.distributed.barrier() if eval_tp_size == 1: @@ -1271,14 +1282,15 @@ def gen_epoch_data(): # generate batches self.set_eval() - policy_model_train = self.policy_trainer.model self.policy_trainer.export_evaluate_model( - policy_model_train, self._policy_model_eval, with_offload=True + self.policy_trainer.model, + self._policy_model_eval, + with_offload=self.args.offload_level is not None, ) # todo: zhui # self.optimizer.offload() self.value_trainer.export_evaluate_model( - self.value_trainer.model, self._value_model_eval, with_offload=True + self.value_trainer.model, self._value_model_eval, with_offload=self.args.offload_level is not None ) # self.reference_model.reload() @@ -1299,10 +1311,13 @@ def gen_epoch_data(): # self.reward_model.offload() # policy_model_eval.cleanup() # value_model_eval.cleanup() - # offload_tensor_to_cpu(self.reference_model.state_dict()) - # offload_tensor_to_cpu(self.reward_model.state_dict()) - # cleanup_tensor_space(self._policy_model_eval.state_dict()) - # cleanup_tensor_space(self._value_model_eval.state_dict()) + if self.args.offload_level is not None: + if "eval" in self.args.offload_level: + cleanup_tensor_space(self._policy_model_eval.state_dict()) + cleanup_tensor_space(self._value_model_eval.state_dict()) + if "reward" in self.args.offload_level: + offload_tensor_to_cpu(self.reference_model.state_dict()) + offload_tensor_to_cpu(self.reward_model.state_dict()) self.set_train() for _ in range(self.args.update_iters): From c421af7514db8523b2be6db4d7dbb50b67614b70 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Fri, 23 Feb 2024 15:18:34 +0800 Subject: [PATCH 14/46] merge --- examples/RLHF/ppo_config.json | 2 +- examples/RLHF/ppo_main.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/examples/RLHF/ppo_config.json b/examples/RLHF/ppo_config.json index 2b88f31a007c..c528d6232d6f 100644 --- a/examples/RLHF/ppo_config.json +++ b/examples/RLHF/ppo_config.json @@ -6,7 +6,7 @@ "reward_model_name_or_path": "PKU-Alignment/beaver-7b-v1.0-reward", "_actor_model_name_or_path": "facebook/llama-7b", "_reward_model_name_or_path": "facebook/llama-7b", - "output_dir": "/root/paddlejob/workspace/guosheng/checkpoints/ppo-sd14pp2-test", + "output_dir": "./ppo-sd14pp2-test", "max_length": 512, "temperature": 1.0, "num_return_sequences":1, diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index af191303809b..e483eae1a2ee 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -14,6 +14,11 @@ import os import sys + +os.environ["http_proxy"] = "http://10.162.37.16:8128" +os.environ["https_proxy"] = "http://10.162.37.16:8128" +os.environ["no_proxy"] = "localhost,bcebos.com" + from dataclasses import dataclass, field from typing import Any, Dict, Tuple From 63df4fd8308eac324fcfd0f3123a3ddca4b85b6f Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Mon, 26 Feb 2024 16:37:10 +0800 Subject: [PATCH 15/46] support tp+pp --- examples/RLHF/ppo_config.json | 4 +- examples/RLHF/ppo_main.py | 206 +++++++++++++++++++--------------- examples/RLHF/ppo_trainer.py | 49 +++++--- 3 files changed, 148 insertions(+), 111 deletions(-) diff --git a/examples/RLHF/ppo_config.json b/examples/RLHF/ppo_config.json index 3cfcf821fa0d..19539bc452cc 100644 --- a/examples/RLHF/ppo_config.json +++ b/examples/RLHF/ppo_config.json @@ -53,6 +53,6 @@ "max_grad_norm": 1.0, "adam_beta1": 0.9, "adam_beta2": 0.95, - "eval_mode": "", - "offload_level": "" + "eval_mode": "tensor_parallel", + "offload_level": "eval" } diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index 00b1be1abc29..cdbeceab091b 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -16,9 +16,24 @@ import os import sys -os.environ["http_proxy"] = "http://10.162.37.16:8128" -os.environ["https_proxy"] = "http://10.162.37.16:8128" +# os.environ["http_proxy"] = "http://10.162.37.16:8128" +# os.environ["https_proxy"] = "http://10.162.37.16:8128" os.environ["no_proxy"] = "localhost,bcebos.com" +# launch would unset http_proxy +# export https_proxy=http://172.19.57.45:3128 + +# os.environ["http_proxy"] = "http://172.19.56.199:3128" +# os.environ["https_proxy"] = "http://172.19.56.199:3128" + +# os.environ["http_proxy"] = "http://172.19.57.45:3128" +# os.environ["https_proxy"] = "http://172.19.57.45:3128" + +# os.environ["http_proxy"] = "http://10.162.37.16:8128" +# os.environ["https_proxy"] = "http://10.162.37.16:8128" +# os.environ["no_proxy"] = "localhost,bcebos.com" + +# os.environ["http_proxy"] = "agent.baidu.com:8118" +# os.environ["https_proxy"] = "agent.baidu.com:8118" from dataclasses import dataclass, field from typing import Any, Dict, Tuple @@ -30,18 +45,23 @@ from ppo_trainer import PPOTrainer, cleanup_tensor_space, offload_tensor_to_cpu from paddlenlp.trainer import PdArgumentParser, TrainingArguments, get_last_checkpoint -from paddlenlp.transformers import AutoConfig, AutoTokenizer, LlamaTokenizer +from paddlenlp.transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + LlamaTokenizer, +) from paddlenlp.utils.log import logger # launch would unset http_proxy # export https_proxy=http://172.19.57.45:3128 # os.environ["http_proxy"] = "http://172.19.56.199:3128" # os.environ["https_proxy"] = "http://172.19.56.199:3128" -# os.environ["http_proxy"] = "http://172.19.57.45:3128" -# os.environ["https_proxy"] = "http://172.19.57.45:3128" -os.environ["http_proxy"] = "http://10.162.37.16:8128" -os.environ["https_proxy"] = "http://10.162.37.16:8128" -os.environ["no_proxy"] = "localhost,bcebos.com" +os.environ["http_proxy"] = "http://172.19.57.45:3128" +os.environ["https_proxy"] = "http://172.19.57.45:3128" +# os.environ["http_proxy"] = "http://10.162.37.16:8128" +# os.environ["https_proxy"] = "http://10.162.37.16:8128" +# os.environ["no_proxy"] = "localhost,bcebos.com" @dataclass @@ -248,21 +268,17 @@ def main(): dtype = "float32" training_args.max_length = data_args.max_length + model_class_lm, model_class_score = AutoModelForCausalLM, AutoModelForScore if training_args.pipeline_parallel_degree > 1: - global AutoModelForCausalLM, AutoModelForScore from models.model_pp import LlamaPolicyPipe, LlamaValuePipe - AutoModelForCausalLM = LlamaPolicyPipe - AutoModelForScore = LlamaValuePipe + model_class_lm = LlamaPolicyPipe + model_class_score = LlamaValuePipe extra_args = { "ptx_coeff": training_args.ptx_coeff, "clip_range_ratio": training_args.clip_range_ratio, } else: - from models import AutoModelForScore - - from paddlenlp.transformers import AutoModelForCausalLM - extra_args = {} # actor model @@ -276,7 +292,9 @@ def main(): if hasattr(model_config, "use_flash_attention"): model_config.use_flash_attention = model_args.use_flash_attention - actor_model = AutoModelForCausalLM.from_pretrained( + model_config.num_hidden_layers = 2 + + actor_model = model_class_lm.from_pretrained( model_args.actor_model_name_or_path, config=model_config, **extra_args, @@ -293,7 +311,7 @@ def main(): actor_eval_model = None # reference model - actor_reference_model = AutoModelForCausalLM.from_pretrained( + actor_reference_model = model_class_lm.from_pretrained( model_args.actor_model_name_or_path, config=model_config, ) @@ -311,7 +329,9 @@ def main(): ) if hasattr(model_config, "use_flash_attention"): model_config.use_flash_attention = model_args.use_flash_attention - reward_model = AutoModelForScore.from_pretrained( + model_config.num_hidden_layers = 2 + + reward_model = model_class_score.from_pretrained( model_args.reward_model_name_or_path, config=model_config, score_type="reward", @@ -323,7 +343,7 @@ def main(): # critic model if model_args.reward_critic_model_name_or_path is None: model_args.reward_critic_model_name_or_path = model_args.reward_model_name_or_path - reward_critic_model = AutoModelForScore.from_pretrained( + reward_critic_model = model_class_score.from_pretrained( model_args.reward_critic_model_name_or_path, config=model_config, score_type="critic", @@ -342,80 +362,79 @@ def main(): else: reward_critic_eval_model = None - -# # actor model -# model_config = AutoConfig.from_pretrained( -# model_args.actor_model_name_or_path, -# tensor_parallel_output=False, -# tensor_parallel_degree=training_args.tensor_parallel_degree, -# tensor_parallel_rank=training_args.tensor_parallel_rank, -# dtype=dtype, -# ) -# model_config.num_hidden_layers = 2 -# if hasattr(model_config, "use_flash_attention"): -# model_config.use_flash_attention = model_args.use_flash_attention -# actor_model = AutoModelForCausalLM.from_pretrained( -# model_args.actor_model_name_or_path, -# config=model_config, -# ) -# -# if training_args.eval_mode is not None: -# config = copy.deepcopy(actor_model.config) -# if training_args.eval_mode == "single": -# config.tensor_parallel_degree = -1 -# config.tensor_parallel_rank = 0 -# actor_eval_model = AutoModelForCausalLM.from_config(config) -# else: -# actor_eval_model = None -# -# # reference model -# actor_reference_model = AutoModelForCausalLM.from_pretrained( -# model_args.actor_model_name_or_path, -# config=model_config, -# ) -# actor_tokenizer = AutoTokenizer.from_pretrained( -# model_args.actor_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" -# ) -# -# # reward model -# model_config = AutoConfig.from_pretrained( -# model_args.reward_model_name_or_path, -# tensor_parallel_output=False, -# tensor_parallel_degree=training_args.tensor_parallel_degree, -# tensor_parallel_rank=training_args.tensor_parallel_rank, -# dtype=dtype, -# ) -# model_config.num_hidden_layers = 2 -# if hasattr(model_config, "use_flash_attention"): -# model_config.use_flash_attention = model_args.use_flash_attention -# reward_model = AutoModelForScore.from_pretrained( -# model_args.reward_model_name_or_path, -# config=model_config, -# score_type="reward", -# do_normalize=training_args.normalize_reward, -# ) -# reward_tokenizer = AutoTokenizer.from_pretrained( -# model_args.reward_model_name_or_path, model_max_length=data_args.max_length, padding_side="right" -# ) -# -# # critic model -# if model_args.reward_critic_model_name_or_path is None: -# model_args.reward_critic_model_name_or_path = model_args.reward_model_name_or_path -# reward_critic_model = AutoModelForScore.from_pretrained( -# model_args.reward_critic_model_name_or_path, config=model_config, score_type="critic", do_normalize=False -# ) -# reward_critic_tokenizer = AutoTokenizer.from_pretrained( -# model_args.reward_critic_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" -# ) -# -# if training_args.eval_mode is not None: -# config = copy.deepcopy(reward_critic_model.config) -# if training_args.eval_mode == "single": -# config.tensor_parallel_degree = -1 -# config.tensor_parallel_rank = 0 -# reward_critic_eval_model = AutoModelForScore.from_config(config) -# else: -# reward_critic_eval_model = None + # # actor model + # model_config = AutoConfig.from_pretrained( + # model_args.actor_model_name_or_path, + # tensor_parallel_output=False, + # tensor_parallel_degree=training_args.tensor_parallel_degree, + # tensor_parallel_rank=training_args.tensor_parallel_rank, + # dtype=dtype, + # ) + # model_config.num_hidden_layers = 2 + # if hasattr(model_config, "use_flash_attention"): + # model_config.use_flash_attention = model_args.use_flash_attention + # actor_model = AutoModelForCausalLM.from_pretrained( + # model_args.actor_model_name_or_path, + # config=model_config, + # ) + # + # if training_args.eval_mode is not None: + # config = copy.deepcopy(actor_model.config) + # if training_args.eval_mode == "single": + # config.tensor_parallel_degree = -1 + # config.tensor_parallel_rank = 0 + # actor_eval_model = AutoModelForCausalLM.from_config(config) + # else: + # actor_eval_model = None + # + # # reference model + # actor_reference_model = AutoModelForCausalLM.from_pretrained( + # model_args.actor_model_name_or_path, + # config=model_config, + # ) + # actor_tokenizer = AutoTokenizer.from_pretrained( + # model_args.actor_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" + # ) + # + # # reward model + # model_config = AutoConfig.from_pretrained( + # model_args.reward_model_name_or_path, + # tensor_parallel_output=False, + # tensor_parallel_degree=training_args.tensor_parallel_degree, + # tensor_parallel_rank=training_args.tensor_parallel_rank, + # dtype=dtype, + # ) + # model_config.num_hidden_layers = 2 + # if hasattr(model_config, "use_flash_attention"): + # model_config.use_flash_attention = model_args.use_flash_attention + # reward_model = AutoModelForScore.from_pretrained( + # model_args.reward_model_name_or_path, + # config=model_config, + # score_type="reward", + # do_normalize=training_args.normalize_reward, + # ) + # reward_tokenizer = AutoTokenizer.from_pretrained( + # model_args.reward_model_name_or_path, model_max_length=data_args.max_length, padding_side="right" + # ) + # + # # critic model + # if model_args.reward_critic_model_name_or_path is None: + # model_args.reward_critic_model_name_or_path = model_args.reward_model_name_or_path + # reward_critic_model = AutoModelForScore.from_pretrained( + # model_args.reward_critic_model_name_or_path, config=model_config, score_type="critic", do_normalize=False + # ) + # reward_critic_tokenizer = AutoTokenizer.from_pretrained( + # model_args.reward_critic_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" + # ) + # + # if training_args.eval_mode is not None: + # config = copy.deepcopy(reward_critic_model.config) + # if training_args.eval_mode == "single": + # config.tensor_parallel_degree = -1 + # config.tensor_parallel_rank = 0 + # reward_critic_eval_model = AutoModelForScore.from_config(config) + # else: + # reward_critic_eval_model = None for tokenizer in [actor_tokenizer, reward_tokenizer, reward_critic_tokenizer]: if isinstance(tokenizer, LlamaTokenizer) and tokenizer.pad_token_id is None: @@ -444,6 +463,7 @@ def main(): cleanup_tensor_space(actor_eval_model.state_dict()) cleanup_tensor_space(reward_critic_eval_model.state_dict()) if "reward" in training_args.offload_level: + # if pp mode, should lazy offload offload_tensor_to_cpu(actor_reference_model.state_dict()) offload_tensor_to_cpu(reward_model.state_dict()) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index c90f3a875b63..281b0d3c3de1 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -490,7 +490,8 @@ def offload_tensor_to_cpu(tensor): cpu_tensor = tensor._copy_to(paddle.CPUPlace(), True) tensor.value().get_tensor()._share_data_with(cpu_tensor.value().get_tensor()) else: - raise ValueError(f"Can't parse for type {type(tensor)}") + logger.warning(f"Can't parse for type {type(tensor)}") + return tensor def reload_tensor_to_gpu(tensor): @@ -502,7 +503,8 @@ def reload_tensor_to_gpu(tensor): gpu_tensor = tensor._copy_to(paddle.CUDAPlace(global_dev_id), True) tensor.value().get_tensor()._share_data_with(gpu_tensor.value().get_tensor()) else: - raise ValueError(f"Can't parse for type {type(tensor)}") + logger.warning(f"Can't parse for type {type(tensor)}") + return tensor def cleanup_tensor_space(tensor): @@ -512,7 +514,8 @@ def cleanup_tensor_space(tensor): elif isinstance(tensor, paddle.Tensor): tensor._clear_data() else: - raise ValueError(f"Can't parse for type {type(tensor)}") + logger.warning(f"Can't parse for type {type(tensor)}") + return tensor def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): @@ -586,7 +589,7 @@ def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): # tp+pp -> single raise ValueError("Not support yet.") - def create_send_recv_table(all_keys, train_keys, eval_keys): + def create_send_recv_table(train_keys, eval_keys): recv_table = [] send_table = [] if pp_group.rank == 0: @@ -594,7 +597,7 @@ def create_send_recv_table(all_keys, train_keys, eval_keys): recv_table.append((key, global_rank)) for key in train_keys: - recv_table.append((key, global_rank)) + send_table.append((key, global_rank)) all_recv, all_send = [], [] paddle.distributed.all_gather_object(all_recv, [recv_table], group=pp_group) @@ -605,6 +608,7 @@ def create_send_recv_table(all_keys, train_keys, eval_keys): send_dict = {} for k, v in all_send: send_dict[k] = v + table = [] for k, v in all_recv: # key, send, recv @@ -621,19 +625,27 @@ def create_send_recv_table(all_keys, train_keys, eval_keys): # tp+pp->tp if eval_tp_size > 1 and train_pp_size > 1: table = create_send_recv_table(train_state_dict.keys(), eval_state_dict.keys()) + # print(table) for key, src_rank, dst_rank in table: # Init tensor for model is cleaned - if global_rank == dst_rank and not eval_state_dict[key]._is_initialized(): + # print(key, src_rank, dst_rank, eval_state_dict[key]._is_initialized()) + # if key in train_state_dict: + # print(train_state_dict[key]._is_initialized()) + + if not eval_state_dict[key]._is_initialized(): v = eval_state_dict[key] t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) v.get_tensor()._share_data_with(t.get_tensor()) - if global_rank == src_rank: - dist.stream.send(train_state_dict[key], dst=dst_rank) + if src_rank == dst_rank and global_rank == src_rank: + eval_state_dict[key].copy_(train_state_dict[key], True) + else: + if global_rank == src_rank: + dist.stream.send(train_state_dict[key], dst=dst_rank) - if global_rank == dst_rank: - dist.stream.recv(eval_state_dict[key], dst=dst_rank) + if global_rank == dst_rank: + dist.stream.recv(eval_state_dict[key], src=src_rank) # Offload train model if need if global_rank == src_rank and with_offload: @@ -645,6 +657,7 @@ def create_send_recv_table(all_keys, train_keys, eval_keys): offload_tensor_to_cpu(train_state_dict[key]) for k, v in eval_state_dict.items(): if not v._is_initialized(): + # print(f"init {k}") t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) v.get_tensor()._share_data_with(t.get_tensor()) @@ -654,11 +667,13 @@ def create_send_recv_table(all_keys, train_keys, eval_keys): paddle.distributed.broadcast(tensor, src=0, group=None, sync_op=True) else: if sd_group.nranks > 1: - paddle.distributed.parallel.sync_params_buffers(eval_model, comm_group=sd_group, fuse_params=False) + if dp_group.rank <= 0: + paddle.distributed.parallel.sync_params_buffers( + eval_model, comm_group=sd_group, src_rank=sd_group.ranks[0], fuse_params=False + ) if dp_group.nranks > 1: - print(dp_group) paddle.distributed.parallel.sync_params_buffers( - eval_model, comm_group=dp_group, src_rank=dp_group.rank, fuse_params=False + eval_model, comm_group=dp_group, src_rank=dp_group.ranks[0], fuse_params=False ) @@ -1423,17 +1438,20 @@ def gen_epoch_data(): # generate batches self.set_eval() + # self.optimizer.offload() + if self.eval_mode is not None and "optimizer" in self.args.offload_level: + offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) + offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) + self.policy_trainer.export_evaluate_model( self.policy_trainer.model, self._policy_model_eval, with_offload=self.args.offload_level is not None, ) # todo: zhui - # self.optimizer.offload() self.value_trainer.export_evaluate_model( self.value_trainer.model, self._value_model_eval, with_offload=self.args.offload_level is not None ) - # self.reference_model.reload() # self.reward_model.reload() # reload_tensor_to_gpu(self.reference_model.state_dict()) @@ -2001,7 +2019,6 @@ def post_rollout( else: reward_critic_model_in_use = self.reward_critic_model - # pipe model outputs a logits tensor with LMHead, while non-pipe model # outputs a tuple with logits tensor as the only one element. logits = actor_model_in_use( From c9e5cad8278f7e3d0c7a70af5f398245cf407efa Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Tue, 27 Feb 2024 20:37:37 +0800 Subject: [PATCH 16/46] fix data split. --- examples/RLHF/ppo_main.py | 65 ++++++----- examples/RLHF/ppo_trainer.py | 215 ++++++++++++++++++++++++++--------- 2 files changed, 198 insertions(+), 82 deletions(-) diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index cdbeceab091b..024ad1090bf7 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -18,19 +18,19 @@ # os.environ["http_proxy"] = "http://10.162.37.16:8128" # os.environ["https_proxy"] = "http://10.162.37.16:8128" -os.environ["no_proxy"] = "localhost,bcebos.com" +# os.environ["no_proxy"] = "localhost,bcebos.com" # launch would unset http_proxy # export https_proxy=http://172.19.57.45:3128 -# os.environ["http_proxy"] = "http://172.19.56.199:3128" -# os.environ["https_proxy"] = "http://172.19.56.199:3128" +os.environ["http_proxy"] = "http://172.19.56.199:3128" +os.environ["https_proxy"] = "http://172.19.56.199:3128" # os.environ["http_proxy"] = "http://172.19.57.45:3128" # os.environ["https_proxy"] = "http://172.19.57.45:3128" # os.environ["http_proxy"] = "http://10.162.37.16:8128" # os.environ["https_proxy"] = "http://10.162.37.16:8128" -# os.environ["no_proxy"] = "localhost,bcebos.com" +os.environ["no_proxy"] = "localhost,bcebos.com" # os.environ["http_proxy"] = "agent.baidu.com:8118" # os.environ["https_proxy"] = "agent.baidu.com:8118" @@ -53,16 +53,6 @@ ) from paddlenlp.utils.log import logger -# launch would unset http_proxy -# export https_proxy=http://172.19.57.45:3128 -# os.environ["http_proxy"] = "http://172.19.56.199:3128" -# os.environ["https_proxy"] = "http://172.19.56.199:3128" -os.environ["http_proxy"] = "http://172.19.57.45:3128" -os.environ["https_proxy"] = "http://172.19.57.45:3128" -# os.environ["http_proxy"] = "http://10.162.37.16:8128" -# os.environ["https_proxy"] = "http://10.162.37.16:8128" -# os.environ["no_proxy"] = "localhost,bcebos.com" - @dataclass class TrainingArguments(TrainingArguments): @@ -310,11 +300,22 @@ def main(): else: actor_eval_model = None - # reference model - actor_reference_model = model_class_lm.from_pretrained( - model_args.actor_model_name_or_path, - config=model_config, - ) + # todo reference model + if training_args.eval_mode is not None: + config = copy.deepcopy(model_config) + if training_args.eval_mode == "single": + config.tensor_parallel_degree = -1 + config.tensor_parallel_rank = 0 + actor_reference_model = AutoModelForCausalLM.from_pretrained( + model_args.actor_model_name_or_path, + config=config, + ) + else: + actor_reference_model = model_class_lm.from_pretrained( + model_args.actor_model_name_or_path, + config=model_config, + ) + actor_tokenizer = AutoTokenizer.from_pretrained( model_args.actor_model_name_or_path, model_max_length=data_args.max_length, padding_side="left" ) @@ -330,13 +331,25 @@ def main(): if hasattr(model_config, "use_flash_attention"): model_config.use_flash_attention = model_args.use_flash_attention model_config.num_hidden_layers = 2 - - reward_model = model_class_score.from_pretrained( - model_args.reward_model_name_or_path, - config=model_config, - score_type="reward", - do_normalize=training_args.normalize_reward, - ) + # todo + if training_args.eval_mode is not None: + config = copy.deepcopy(model_config) + if training_args.eval_mode == "single": + config.tensor_parallel_degree = -1 + config.tensor_parallel_rank = 0 + reward_model = AutoModelForScore.from_pretrained( + model_args.reward_model_name_or_path, + config=config, + score_type="reward", + do_normalize=training_args.normalize_reward, + ) + else: + reward_model = model_class_score.from_pretrained( + model_args.reward_model_name_or_path, + config=model_config, + score_type="reward", + do_normalize=training_args.normalize_reward, + ) reward_tokenizer = AutoTokenizer.from_pretrained( model_args.reward_model_name_or_path, model_max_length=data_args.max_length, padding_side="right" ) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 281b0d3c3de1..10d7cb72b7ec 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -481,41 +481,80 @@ def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs return kwargs -def offload_tensor_to_cpu(tensor): - if isinstance(tensor, dict): - for _, v in tensor.items(): +def offload_tensor_to_cpu(tensors): + if isinstance(tensors, dict): + for _, v in tensors.items(): offload_tensor_to_cpu(v) - elif isinstance(tensor, paddle.Tensor): - if not tensor.place.is_cpu_place(): - cpu_tensor = tensor._copy_to(paddle.CPUPlace(), True) - tensor.value().get_tensor()._share_data_with(cpu_tensor.value().get_tensor()) + elif isinstance(tensors, paddle.Tensor): + if not tensors.place.is_cpu_place(): + cpu_tensor = tensors._copy_to(paddle.CPUPlace(), True) + tensors.value().get_tensor()._share_data_with(cpu_tensor.value().get_tensor()) else: - logger.warning(f"Can't parse for type {type(tensor)}") - return tensor + logger.warning(f"Can't parse for type {type(tensors)}") + return tensors -def reload_tensor_to_gpu(tensor): - if isinstance(tensor, dict): - for _, v in tensor.items(): +def reload_tensor_to_gpu(tensors): + if isinstance(tensors, dict): + for _, v in tensors.items(): reload_tensor_to_gpu(v) - elif isinstance(tensor, paddle.Tensor): - if not tensor.place.is_gpu_place(): - gpu_tensor = tensor._copy_to(paddle.CUDAPlace(global_dev_id), True) - tensor.value().get_tensor()._share_data_with(gpu_tensor.value().get_tensor()) + elif isinstance(tensors, paddle.Tensor): + if not tensors.place.is_gpu_place(): + gpu_tensor = tensors._copy_to(paddle.CUDAPlace(global_dev_id), True) + tensors.value().get_tensor()._share_data_with(gpu_tensor.value().get_tensor()) else: - logger.warning(f"Can't parse for type {type(tensor)}") - return tensor + logger.warning(f"Can't parse for type {type(tensors)}") + return tensors -def cleanup_tensor_space(tensor): - if isinstance(tensor, dict): - for _, v in tensor.items(): +def cleanup_tensor_space(tensors): + if isinstance(tensors, dict): + for _, v in tensors.items(): cleanup_tensor_space(v) - elif isinstance(tensor, paddle.Tensor): - tensor._clear_data() + elif isinstance(tensors, paddle.Tensor): + tensors._clear_data() else: - logger.warning(f"Can't parse for type {type(tensor)}") - return tensor + logger.warning(f"Can't parse for type {type(tensors)}") + return tensors + + +def data_group_split(tensors, group): + if group is None: + return tensors + if isinstance(tensors, (list, tuple)): + return type(tensors)(data_group_split(t, group) for t in tensors) + elif isinstance(tensors, dict): + new_dict = {} + for k, v in tensors.items(): + new_dict[k] = data_group_split(v, group) + return new_dict + elif isinstance(tensors, paddle.Tensor): + print("Spliting ", tensors) + return tensors.split(group.nranks)[group.rank] + else: + logger.warning(f"Can't parse for type {type(tensors)}") + return tensors + + +def data_group_merge(tensors, group): + if group is None: + return tensors + + if isinstance(tensors, (list, tuple)): + return type(tensors)(data_group_merge(t, group) for t in tensors) + elif isinstance(tensors, dict): + new_dict = {} + for k, v in tensors.items(): + new_dict[k] = data_group_merge(v, group) + return new_dict + elif isinstance(tensors, paddle.Tensor): + tensor_list = [] + print("Mergeing ", tensors) + paddle.distributed.all_gather(tensor_list, tensors, group=group) + return paddle.concat(tensor_list) + else: + logger.warning(f"Can't parse for type {type(tensors)}") + return tensors def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): @@ -676,6 +715,32 @@ def create_send_recv_table(train_keys, eval_keys): eval_model, comm_group=dp_group, src_rank=dp_group.ranks[0], fuse_params=False ) + group_nums = self.args.logical_process_index // eval_tp_size + + all_split_table = [] + paddle.distributed.all_gather_object(all_split_table, [(global_rank, group_nums)]) + all_split_table = flatten_list(all_split_table) + split_dict = {} + for k, v in all_split_table: + split_dict[k] = v + + split_ranks = {} + for k, v in all_split_table: + if v in split_ranks: + split_ranks[v].append(k) + else: + split_ranks[v] = [k] + + group = None + for k, ranks in split_ranks.items(): + gp = paddle.distributed.new_group(ranks=ranks) + if global_rank in ranks: + group = gp + + print("all_split_table:", all_split_table) + print("export", group) + return group + Trainer.init_train_model_opt = init_train_model_opt Trainer.init_train_log = init_train_log @@ -1098,39 +1163,59 @@ def __init__( # use trainer for reference_model/reward_model to enable sharding stage-3 # maybe we should allow models to use different dist strategies later if True: # ShardingOption.FULL_SHARD in args.sharding: - self.reference_trainer = Trainer( - reference_model, - criterion, - args, - data_collator, - train_dataset, - eval_dataset, - reference_tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - self.reward_trainer = Trainer( - reward_model, - criterion, + from paddle.distributed.fleet.meta_parallel import PipelineLayer + + with guard_set_args( args, - data_collator, - train_dataset, - eval_dataset, - reward_tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - # TODO(guosheng): sharding stage3 should create master weight optionally - # instead of creation and clear. - self.reference_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps - self.reward_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps + { + "recompute": False, + "fp16_opt_level": "O1", + "pipeline_parallel_degree": args.pipeline_parallel_degree + if isinstance(reference_model, PipelineLayer) + else 1, # workaround for pipeline parallel model check + }, + ): + + assert args.pipeline_parallel_degree == 1, "error" + self.reference_trainer = Trainer( + reference_model, + criterion, + copy.deepcopy(args), + data_collator, + train_dataset, + eval_dataset, + reference_tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + + assert args.pipeline_parallel_degree == 1, "error" + self.reward_trainer = Trainer( + reward_model, + criterion, + copy.deepcopy(args), + data_collator, + train_dataset, + eval_dataset, + reward_tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + # TODO(guosheng): sharding stage3 should create master weight optionally + # instead of creation and clear. + from paddlenlp.trainer.trainer_utils import ShardingOption + + if args.pipeline_parallel_degree > 1 or ShardingOption.FULL_SHARD in args.sharding: + self.reference_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps + self.reward_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps else: self._reference_model = reference_model self._reward_model = reward_model + self.reference_model.eval() self.reward_model.eval() @@ -1439,11 +1524,11 @@ def gen_epoch_data(): self.set_eval() # self.optimizer.offload() - if self.eval_mode is not None and "optimizer" in self.args.offload_level: + if self.args.eval_mode is not None and "optimizer" in self.args.offload_level: offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) - self.policy_trainer.export_evaluate_model( + gp = self.policy_trainer.export_evaluate_model( self.policy_trainer.model, self._policy_model_eval, with_offload=self.args.offload_level is not None, @@ -1457,14 +1542,27 @@ def gen_epoch_data(): # reload_tensor_to_gpu(self.reference_model.state_dict()) # reload_tensor_to_gpu(self.reward_model.state_dict()) + # todo, split prompt_only_batch + # pp2tp2dp2 -> dp4tp2 prompt_only_batch + print("create gp", gp) + prompt_only_batch = data_group_split(prompt_only_batch, group=gp) + print("prompt_only_batch =", prompt_only_batch) # 生成数据 rl_batches = self.split_rl_micro_batches(prompt_only_batch) # rl_batches = self.load_sing_gen_data(as_batches=True, # use_counter=True) if self.use_ptx: + ptx_batch = data_group_split(ptx_batch, group=gp) + print("ptx_batch =", ptx_batch) ptx_batches = self.split_ptx_micro_batches(ptx_batch) + print("ptx_batchs =", ptx_batches) + ptx_batches = data_group_merge(ptx_batches, group=gp) else: ptx_batches = [None for _ in range(len(rl_batches))] + + print("rl_batches =", rl_batches) + # todo, merge data + rl_batches = data_group_merge(rl_batches, group=gp) paddle.device.cuda.empty_cache() # # 数据造好, 开始训练 @@ -2019,6 +2117,7 @@ def post_rollout( else: reward_critic_model_in_use = self.reward_critic_model + logger.error("Get Here!!") # pipe model outputs a logits tensor with LMHead, while non-pipe model # outputs a tuple with logits tensor as the only one element. logits = actor_model_in_use( @@ -2026,6 +2125,7 @@ def post_rollout( attention_mask=attention_mask, # return_dict=True, ) # .logits + logger.error("Get Here 1.0!!") if not isinstance(logits, paddle.Tensor): logits = logits[0] ref_logits = self.reference_model( @@ -2033,6 +2133,7 @@ def post_rollout( attention_mask=attention_mask, # return_dict=True, ) # .logits + logger.error("Get Here 2.0!!") if not isinstance(ref_logits, paddle.Tensor): ref_logits = ref_logits[0] @@ -2043,6 +2144,8 @@ def post_rollout( )[ 1 ] # .end_scores + + logger.error("Get Here 3.0!!") reward_value = reward_critic_model_in_use( sequence, attention_mask=attention_mask, From 5979507f45948cd83568b290b9462308edced390 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Wed, 28 Feb 2024 08:24:56 +0000 Subject: [PATCH 17/46] Fix position_ids in generation/eval/train. --- examples/RLHF/models/model_pp.py | 22 ++++++++++++++++------ examples/RLHF/ppo_trainer.py | 26 +++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 7 deletions(-) diff --git a/examples/RLHF/models/model_pp.py b/examples/RLHF/models/model_pp.py index 82a5d9020ee0..65c93e0401a0 100644 --- a/examples/RLHF/models/model_pp.py +++ b/examples/RLHF/models/model_pp.py @@ -240,6 +240,9 @@ def _prepare_pipeline_inputs_func(self, inputs): last_stage_keys = ["labels", "input_ids", "old_log_probs", "reward_advantages", "sequence_mask"] if type(inputs) is dict: + # for left padding, position_ids is nececessary + if "position_ids" not in inputs: + inputs["position_ids"] = make_position_ids(inputs["attention_mask"]) # ppo-loss and ptx-loss need different labels, and data iter provides # corrensponding data, thus add the not provided fields here. # policy trian and infer has different inputs, infer uses position_ids. @@ -256,6 +259,9 @@ def _prepare_pipeline_inputs_func(self, inputs): # for key in last_stage_keys: for key in first_stage_keys + last_stage_keys: if key not in data: + if key == "position_ids": + data[key] = make_position_ids(data["attention_mask"]) + continue data[key] = None # keys = list(inputs[0].keys()) inputs_batch = {key: [data.get(key) for data in inputs] for key in first_stage_keys + last_stage_keys} @@ -268,7 +274,7 @@ def _prepare_pipeline_inputs_func(self, inputs): # have same shape after padding, and each of them cannot pad only # according to its own max length which might be different since the # filed value is None for different batches/tasks. - src_tgt_keys = ["input_ids", "attention_mask", "labels"] + src_tgt_keys = ["input_ids", "attention_mask", "labels", "position_ids"] max_len = max([x.shape[-1] for x in inputs_batch["input_ids"]]) pad_len = [max_len - x.shape[-1] for x in inputs_batch["input_ids"]] for key in src_tgt_keys: @@ -370,10 +376,14 @@ def _prepare_pipeline_inputs_func(self, inputs): get_expected_keys(inputs, last_stage_keys), ] + for data in inputs: + if "position_ids" not in data: + data["position_ids"] = make_position_ids(data["attention_mask"]) # keys = list(inputs[0].keys()) inputs_batch = {key: [data.get(key) for data in inputs] for key in first_stage_keys + last_stage_keys} # 1. For input_ids/attention_mask (prompt+target) padding: - src_tgt_keys = ["input_ids", "attention_mask"] + # src_tgt_keys = ["input_ids", "attention_mask"] + src_tgt_keys = ["input_ids", "attention_mask", "position_ids"] max_len = max([x.shape[-1] for x in inputs_batch["input_ids"]]) pad_len = [max_len - x.shape[-1] for x in inputs_batch["input_ids"]] for key in src_tgt_keys: @@ -386,10 +396,10 @@ def _prepare_pipeline_inputs_func(self, inputs): inputs_batch[key] = pad_batches_inputs(inputs_batch[key], padding_value, pad_len=pad_len) # for key, value in inputs_batch.items(): # inputs_batch[key] = pad_batches_inputs(value, padding_value=0) - if "position_ids" not in inputs: - inputs_batch["position_ids"] = [ - make_position_ids(attention_mask) for attention_mask in inputs_batch["attention_mask"] - ] + # if "position_ids" not in inputs: + # inputs_batch["position_ids"] = [ + # make_position_ids(attention_mask) for attention_mask in inputs_batch["attention_mask"] + # ] return [ get_expected_keys(inputs_batch, first_stage_keys), get_expected_keys(inputs_batch, last_stage_keys), diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 7570c0ab3f9d..683b4cfe4066 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -27,6 +27,7 @@ import paddle.nn.functional as F import tqdm from data import DummyDataset, PromptOnlyBatch +from models.model_pp import make_position_ids from models.ppo_model_utils import RLHFPPOMixedLoss, RLHFValueLoss, create_loss from paddle.distributed import fleet from paddle.io import DataLoader, Dataset, DistributedBatchSampler @@ -1056,6 +1057,9 @@ def prediction_step( seq = self.actor_model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], + position_ids=inputs["position_ids"] + if "position_ids" in inputs + else make_position_ids(inputs["attention_mask"]), generation_config=self.generation_config, synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, )[0] @@ -1625,10 +1629,13 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any old_log_probs = old_log_probs[:, start:] old_reward_values = old_reward_values[:, start:] sequence_mask = sequence_mask[:, start:] + # position_ids is necessayr for left padding + position_ids = make_position_ids(attention_mask) policy_trainer_inputs = { "input_ids": input_ids, "attention_mask": attention_mask, + "position_ids": position_ids, "old_log_probs": old_log_probs, "reward_advantages": reward_advantages, "sequence_mask": sequence_mask, @@ -1641,6 +1648,7 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any value_trainer_inputs = { "input_ids": input_ids, "attention_mask": attention_mask, + "position_ids": position_ids, "old_reward_values": old_reward_values, "reward_returns": reward_returns, "sequence_mask": sequence_mask, @@ -1704,11 +1712,18 @@ def split_rl_micro_batches( def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: """Rollout a batch of experiences.""" input_ids = prompt_only_batch["input_ids"] + attention_mask = prompt_only_batch["attention_mask"] + position_ids = ( + prompt_only_batch["position_ids"] + if "position_ids" in prompt_only_batch + else make_position_ids(attention_mask) + ) # NOTE: generation output of paddlenlp do not contain prompt, we should # change sequences here. sequences = self.actor_model.generate( input_ids=input_ids, - attention_mask=prompt_only_batch["attention_mask"], + attention_mask=attention_mask, + position_ids=position_ids, generation_config=self.generation_config, synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, )[0] @@ -1716,6 +1731,10 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: # sequences = [self.load_sing_gen_data(as_batches=False, use_counter=False)["input_ids"]] return [ + # TODO(guosheng): move post_rollout out to split_rl_micro_batches + # to allow infer model generate multi times consecutively and then + # convert weights, otherwise we have to convert weights multi times + # when need multi batch rollout data. self.post_rollout( input_ids, seq, @@ -1752,12 +1771,14 @@ def post_rollout( # print(text) reward_seq = sequence reward_attention_mask = attention_mask + position_ids = make_position_ids(attention_mask) # pipe model outputs a logits tensor with LMHead, while non-pipe model # outputs a tuple with logits tensor as the only one element. logits = self.actor_model( sequence, attention_mask=attention_mask, + position_ids=position_ids, # return_dict=True, ) # .logits if not isinstance(logits, paddle.Tensor): @@ -1765,6 +1786,7 @@ def post_rollout( ref_logits = self.reference_model( sequence, attention_mask=attention_mask, + position_ids=position_ids, # return_dict=True, ) # .logits if not isinstance(ref_logits, paddle.Tensor): @@ -1773,6 +1795,7 @@ def post_rollout( reward_score = self.reward_model( reward_seq, attention_mask=reward_attention_mask, + position_ids=position_ids, # return_dict=True, )[ 1 @@ -1780,6 +1803,7 @@ def post_rollout( reward_value = self.reward_critic_model( sequence, attention_mask=attention_mask, + position_ids=position_ids, # return_dict=True, )[ 0 From 16d886a594bd64315c0a55789885fa8f81a67517 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Fri, 1 Mar 2024 16:31:56 +0800 Subject: [PATCH 18/46] fix data group. --- examples/RLHF/ppo_config.json | 2 +- examples/RLHF/ppo_main.py | 16 ++++++++++------ examples/RLHF/ppo_trainer.py | 26 +++++++++++++++++--------- paddlenlp/transformers/model_utils.py | 27 +++++++++++++-------------- 4 files changed, 41 insertions(+), 30 deletions(-) diff --git a/examples/RLHF/ppo_config.json b/examples/RLHF/ppo_config.json index 19539bc452cc..672434a9e12f 100644 --- a/examples/RLHF/ppo_config.json +++ b/examples/RLHF/ppo_config.json @@ -7,7 +7,7 @@ "_actor_model_name_or_path": "facebook/llama-7b", "_reward_model_name_or_path": "facebook/llama-7b", "output_dir": "./ppo-sd14pp2-test", - "max_length": 512, + "max_length": 100, "temperature": 1.0, "num_return_sequences":1, "repetition_penalty": 1.0, diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index 024ad1090bf7..76af7608105b 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -22,14 +22,14 @@ # launch would unset http_proxy # export https_proxy=http://172.19.57.45:3128 -os.environ["http_proxy"] = "http://172.19.56.199:3128" -os.environ["https_proxy"] = "http://172.19.56.199:3128" +# os.environ["http_proxy"] = "http://172.19.56.199:3128" +# os.environ["https_proxy"] = "http://172.19.56.199:3128" # os.environ["http_proxy"] = "http://172.19.57.45:3128" # os.environ["https_proxy"] = "http://172.19.57.45:3128" -# os.environ["http_proxy"] = "http://10.162.37.16:8128" -# os.environ["https_proxy"] = "http://10.162.37.16:8128" +os.environ["http_proxy"] = "http://10.162.37.16:8128" +os.environ["https_proxy"] = "http://10.162.37.16:8128" os.environ["no_proxy"] = "localhost,bcebos.com" # os.environ["http_proxy"] = "agent.baidu.com:8118" @@ -282,7 +282,7 @@ def main(): if hasattr(model_config, "use_flash_attention"): model_config.use_flash_attention = model_args.use_flash_attention - model_config.num_hidden_layers = 2 + # model_config.num_hidden_layers = 2 actor_model = model_class_lm.from_pretrained( model_args.actor_model_name_or_path, @@ -297,6 +297,7 @@ def main(): config.tensor_parallel_degree = -1 config.tensor_parallel_rank = 0 actor_eval_model = AutoModelForCausalLM.from_config(config) + # actor_eval_model = AutoModelForCausalLM.from_pretrained(model_args.actor_model_name_or_path, config=config) else: actor_eval_model = None @@ -330,7 +331,7 @@ def main(): ) if hasattr(model_config, "use_flash_attention"): model_config.use_flash_attention = model_args.use_flash_attention - model_config.num_hidden_layers = 2 + # model_config.num_hidden_layers = 2 # todo if training_args.eval_mode is not None: config = copy.deepcopy(model_config) @@ -372,6 +373,9 @@ def main(): config.tensor_parallel_degree = -1 config.tensor_parallel_rank = 0 reward_critic_eval_model = AutoModelForScore.from_config(config) + # reward_critic_eval_model = AutoModelForScore.from_pretrained( + # model_args.reward_critic_model_name_or_path,config=model_config + # ) else: reward_critic_eval_model = None diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 10d7cb72b7ec..75e5976fab41 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -564,6 +564,7 @@ def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): with_offload = kwargs.pop("with_offload", False) train_tp_size = max(train_model.config.tensor_parallel_degree, 1) eval_tp_size = max(eval_model.config.tensor_parallel_degree, 1) + eval_tp_rank = max(eval_model.config.tensor_parallel_rank, 0) hcg = fleet.get_hybrid_communicate_group() tp_group = hcg.get_model_parallel_group() @@ -715,8 +716,15 @@ def create_send_recv_table(train_keys, eval_keys): eval_model, comm_group=dp_group, src_rank=dp_group.ranks[0], fuse_params=False ) - group_nums = self.args.logical_process_index // eval_tp_size + old_dp_workers = self.args.world_size // (max(sd_group.nranks, 1) * max(dp_group.nranks, 1)) + group_nums = self.args.logical_process_index // old_dp_workers * eval_tp_size + eval_tp_rank + gp = create_data_trans_group(global_rank, group_nums) + + return gp + + +def create_data_trans_group(global_rank, group_nums): all_split_table = [] paddle.distributed.all_gather_object(all_split_table, [(global_rank, group_nums)]) all_split_table = flatten_list(all_split_table) @@ -737,8 +745,8 @@ def create_send_recv_table(train_keys, eval_keys): if global_rank in ranks: group = gp - print("all_split_table:", all_split_table) - print("export", group) + # print("all_split_table:", all_split_table) + print("export_group", group) return group @@ -1533,10 +1541,12 @@ def gen_epoch_data(): self._policy_model_eval, with_offload=self.args.offload_level is not None, ) - # todo: zhui + # gp = create_data_trans_group(self.args.logical_process_index, paddle.distributed.get_rank(), self._policy_model_eval.config.tensor_parallel_degree) + # # todo: zhui self.value_trainer.export_evaluate_model( self.value_trainer.model, self._value_model_eval, with_offload=self.args.offload_level is not None ) + # self.reference_model.reload() # self.reward_model.reload() # reload_tensor_to_gpu(self.reference_model.state_dict()) @@ -2098,11 +2108,9 @@ def post_rollout( reward_seq = sequence = reward_tokenize_output["input_ids"] reward_attention_mask = attention_mask = reward_tokenize_output["attention_mask"] else: - # for text in self.tokenizer.batch_decode( - # sequence, - # skip_special_tokens=True - # ): - # print(text) + # actor_model_in_use gen + for text in self.tokenizer.batch_decode(sequence, skip_special_tokens=True): + print(text) reward_seq = sequence reward_attention_mask = attention_mask diff --git a/paddlenlp/transformers/model_utils.py b/paddlenlp/transformers/model_utils.py index 1b85dcc06780..79f28ca65e6d 100644 --- a/paddlenlp/transformers/model_utils.py +++ b/paddlenlp/transformers/model_utils.py @@ -2256,20 +2256,19 @@ def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs): if "quant_weight" in key: quantization_linear_list.append(key[:-13]) - model, missing_keys, unexpected_keys, mismatched_keys = model, [], [], [] - # model, missing_keys, unexpected_keys, mismatched_keys = cls._load_pretrained_model( - # model=model, - # state_dict=state_dict, - # loaded_keys=loaded_state_dict_keys, - # resolved_archive_file=resolved_sharded_files if is_sharded else resolved_archive_file, - # pretrained_model_name_or_path=pretrained_model_name_or_path, - # config=config, - # ignore_mismatched_sizes=ignore_mismatched_sizes, - # low_cpu_mem_usage=low_cpu_mem_usage, - # dtype=dtype, - # keep_in_fp32_modules=keep_in_fp32_modules, - # quantization_linear_list=quantization_linear_list, - # ) + model, missing_keys, unexpected_keys, mismatched_keys = cls._load_pretrained_model( + model=model, + state_dict=state_dict, + loaded_keys=loaded_state_dict_keys, + resolved_archive_file=resolved_sharded_files if is_sharded else resolved_archive_file, + pretrained_model_name_or_path=pretrained_model_name_or_path, + config=config, + ignore_mismatched_sizes=ignore_mismatched_sizes, + low_cpu_mem_usage=low_cpu_mem_usage, + dtype=dtype, + keep_in_fp32_modules=keep_in_fp32_modules, + quantization_linear_list=quantization_linear_list, + ) # load generation_config.json if model.can_generate() and pretrained_model_name_or_path is not None: From 17863578ab09c83e234a2196bb67c265c76f287d Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Tue, 5 Mar 2024 15:21:56 +0800 Subject: [PATCH 19/46] add tp rank guard --- examples/RLHF/ppo_trainer.py | 80 +++++++++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 1 deletion(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 75e5976fab41..3ff00b484ab6 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -17,6 +17,7 @@ import itertools import math import os +import sys import time from contextlib import contextmanager from typing import Any, Callable, Dict, List, Optional, Tuple, Union @@ -550,13 +551,90 @@ def data_group_merge(tensors, group): elif isinstance(tensors, paddle.Tensor): tensor_list = [] print("Mergeing ", tensors) - paddle.distributed.all_gather(tensor_list, tensors, group=group) + # paddle.distributed.all_gather(tensor_list, tensors, group=group) + all_gather_nd(tensor_list, tensors, group=group) return paddle.concat(tensor_list) else: logger.warning(f"Can't parse for type {type(tensors)}") return tensors +# https://stackoverflow.com/questions/12594148/skipping-execution-of-with-block +class SkipWithBlock(Exception): + pass + + +class SkipContextManager: + def __init__(self, skip): + self.skip = skip + + def __enter__(self): + if self.skip: + sys.settrace(lambda *args, **keys: None) + frame = sys._getframe(1) + frame.f_trace = self.trace + + def trace(self, frame, event, arg): + raise SkipWithBlock() + + def __exit__(self, type, value, traceback): + if type is None: + return # No exception + if issubclass(type, SkipWithBlock): + return True # Suppress special SkipWithBlock exception + + +# with SkipContextManager(skip=True): +# print('In the with block') # Won't be called +# print('Out of the with block') + +# with SkipContextManager(skip=tp_group.rank!=0): +# print('In the with block') # Won't be called +# dist.barrier() + + +def all_gather_nd(tensor_list, tensor, group=None): + """ + Gathers tensor arrays of different lengths in a list. + The length dimension is 0. This supports any number of extra dimensions in the tensors. + All the other dimensions should be equal between the tensors. + + Args: + tensor (Tensor): Tensor to be broadcast from current process. + + Returns: + (Tensor): output list of tensors that can be of different sizes + """ + world_size = group.nranks + local_size = paddle.to_tensor(tensor.shape, place=tensor.place) + all_sizes = [paddle.zeros_like(local_size) for _ in range(world_size)] + dist.all_gather(all_sizes, local_size, group=group) + + # max_length = max(size[0] for size in all_sizes) + + # length_diff = max_length.item() - local_size[0].item() + # if length_diff: + # pad_size = (length_diff, *tensor.size()[1:]) + # padding = paddle.zeros(pad_size, place=tensor.place(), dtype=tensor.dtype) + # tensor = padle.concat((tensor, padding)) + + max_length = max(size[-1] for size in all_sizes) + + length_diff = max_length.item() - local_size[-1].item() + if length_diff: + pad_size = (*tensor.shape[:-1], length_diff) + padding = paddle.zeros(pad_size, dtype=tensor.dtype) + tensor = paddle.concat([tensor, padding], axis=-1) + + all_tensors_padded = [] + dist.all_gather(all_tensors_padded, tensor, group=group) + # all_tensors = [] + for tensor_, size in zip(all_tensors_padded, all_sizes): + pass + tensor_list.append(tensor_[..., : size[-1]]) + return tensor_list + + def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): if eval_model is None: return None From 3bc48cb8518d205129e88e2dd426b9b37af9390e Mon Sep 17 00:00:00 2001 From: whucsgs Date: Wed, 6 Mar 2024 05:47:19 +0000 Subject: [PATCH 20/46] Support rollout label data both with target length or source+target length. --- examples/RLHF/models/model_pp.py | 27 ++++- examples/RLHF/models/ppo_model_utils.py | 40 ++++++- examples/RLHF/ppo_trainer.py | 150 +++++++++++++++++++++++- 3 files changed, 205 insertions(+), 12 deletions(-) diff --git a/examples/RLHF/models/model_pp.py b/examples/RLHF/models/model_pp.py index 65c93e0401a0..f955ad4bc6a3 100644 --- a/examples/RLHF/models/model_pp.py +++ b/examples/RLHF/models/model_pp.py @@ -169,9 +169,24 @@ def _fwd(self, output, label_info): @paddle.no_grad() -def make_position_ids(attention_mask): +def make_position_ids(attention_mask, source=None): + attention_mask_bool = attention_mask attention_mask = attention_mask.cast(paddle.int64) position_ids = attention_mask.cumsum(-1) - 1 + # Make padding positions in source be 0, since reward model use position_ids + # plus with padding size (number of 0s) in source to calculate end offsets. + # It does not matter when source is left padding and target is right padding + # which is the output of non-FuseMT generation, while when using FuseMT whose + # output is right padding source and right padding target, we have to set + # padding positions in source be 0 to make compatible. + if source is not None: + src_len = position_ids[:, source.shape[-1] - 1].unsqueeze(-1) + position_ids = paddle.where( + paddle.logical_and(paddle.logical_not(attention_mask_bool), position_ids <= src_len), + attention_mask, + position_ids, + ) + return position_ids position_ids = paddle.where(position_ids == -1, attention_mask, position_ids) return position_ids @@ -278,7 +293,9 @@ def _prepare_pipeline_inputs_func(self, inputs): max_len = max([x.shape[-1] for x in inputs_batch["input_ids"]]) pad_len = [max_len - x.shape[-1] for x in inputs_batch["input_ids"]] for key in src_tgt_keys: - padding_value = self._ignore_index if key == "labels" else 0 + # Do not pad position_ids with 0 since 0s in position_ids has special + # usage in reward model. We use 1 to pad. + padding_value = self._ignore_index if key == "labels" else 1 if key == "position_ids" else 0 inputs_batch[key] = pad_batches_inputs(inputs_batch[key], padding_value, pad_len=pad_len) # 2. For old_log_probs/reward_advantages/sequence_mask (target) padding: # hard to pad acorss batches, think in some cases one batch might have the @@ -387,7 +404,9 @@ def _prepare_pipeline_inputs_func(self, inputs): max_len = max([x.shape[-1] for x in inputs_batch["input_ids"]]) pad_len = [max_len - x.shape[-1] for x in inputs_batch["input_ids"]] for key in src_tgt_keys: - padding_value = self._ignore_index if key == "labels" else 0 + # Do not pad position_ids with 0 since 0s in position_ids has special + # usage in reward model. We use 1 to pad. + padding_value = self._ignore_index if key == "labels" else 1 if key == "position_ids" else 0 inputs_batch[key] = pad_batches_inputs(inputs_batch[key], padding_value, pad_len=pad_len) # 2. For old_reward_values/reward_returns/sequence_mask (target) padding: tgt_keys = ["old_reward_values", "reward_returns", "sequence_mask"] @@ -396,7 +415,7 @@ def _prepare_pipeline_inputs_func(self, inputs): inputs_batch[key] = pad_batches_inputs(inputs_batch[key], padding_value, pad_len=pad_len) # for key, value in inputs_batch.items(): # inputs_batch[key] = pad_batches_inputs(value, padding_value=0) - # if "position_ids" not in inputs: + # if "position_ids" not in inputs[0]: # inputs_batch["position_ids"] = [ # make_position_ids(attention_mask) for attention_mask in inputs_batch["attention_mask"] # ] diff --git a/examples/RLHF/models/ppo_model_utils.py b/examples/RLHF/models/ppo_model_utils.py index cb605ca29a62..4d8dd8f7d903 100644 --- a/examples/RLHF/models/ppo_model_utils.py +++ b/examples/RLHF/models/ppo_model_utils.py @@ -123,10 +123,22 @@ def forward(self, logits, input_ids, old_log_probs, reward_advantages, sequence_ # batches of accumulation steps, they just need to pad as prompt+target # fields such as input_ids. log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:]) - if start is not None: - old_log_probs = old_log_probs[:, start:] - sequence_mask = sequence_mask[:, start:] - log_probs = log_probs[:, -old_log_probs.shape[1] :] + if log_probs.shape[1] == old_log_probs.shape[1]: + # labels (old_log_probs, reward_advantages, sequence_mask) has + # src+tgt-1 length, valid length is determined by sequence_mask + pass + elif log_probs.shape[1] < old_log_probs.shape[1]: + # labels (old_log_probs, reward_advantages, sequence_mask) has + # src+tgt length and the last one is a padding to be consistent + # with input_ids + assert log_probs.shape[1] == old_log_probs.shape[1] - 1 + log_probs = paddle.concat([log_probs, paddle.zeros([log_probs.shape[0], 1], dtype=log_probs.dtype)], -1) + else: + # labels (old_log_probs, reward_advantages, sequence_mask) has tgt length + log_probs = log_probs[:, -old_log_probs.shape[1] :] + # if start is not None: + # old_log_probs = old_log_probs[:, start:] + # sequence_mask = sequence_mask[:, start:] actor_loss = self.actor_loss_fn( log_probs, old_log_probs, @@ -197,12 +209,28 @@ def forward( ): # old_reward_values, reward_returns, sequence_mask = label_info reward_values = reward_values if isinstance(reward_values, paddle.Tensor) else reward_values[0] + reward_values = reward_values.squeeze(axis=-1)[:, :-1] + if reward_values.shape[1] == old_reward_values.shape[1]: + # labels (old_reward_values, reward_returns, sequence_mask) has + # src+tgt-1 length, valid length is determined by sequence_mask + pass + elif reward_values.shape[1] < old_reward_values.shape[1]: + # labels (old_reward_values, reward_returns, sequence_mask) has + # src+tgt length and the last one is a padding to be consistent + # with input_ids + assert reward_values.shape[1] == old_reward_values.shape[1] - 1 + reward_values = paddle.concat( + [reward_values, paddle.zeros([reward_values.shape[0], 1], dtype=reward_values.dtype)], -1 + ) + else: + # labels (old_reward_values, reward_returns, sequence_mask) has + # tgt length + reward_values = reward_values[:, -old_reward_values.shape[1] :] # if start is not None: # old_reward_values = old_reward_values[:, start:] # sequence_mask = sequence_mask[:, start:] - reward_values = reward_values.squeeze(axis=-1)[:, :-1] reward_critic_loss = self.critic_loss_fn( - reward_values[:, -old_reward_values.shape[1] :], + reward_values, old_reward_values, reward_returns, sequence_mask, diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 683b4cfe4066..a168d3222459 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -1572,6 +1572,7 @@ def get_advantages_and_returns( rewards: paddle.Tensor, sequence_mask: paddle.Tensor, start: int, + use_tgt_len_return: bool = True, ) -> Tuple[paddle.Tensor, paddle.Tensor]: """Compute advantages and returns using Generalized Advantage Estimation (GAE).""" # Modified from https://github.com/CarperAI/trlx/blob/main/trlx/models/modeling_ppo.py @@ -1580,6 +1581,16 @@ def get_advantages_and_returns( values = values * sequence_mask rewards = rewards * sequence_mask length = rewards.shape[-1] + if use_tgt_len_return and start > 0: + # consistent with Beaver + # values length is src+tgt-1, start is src-1, return length is tgt + pass + elif use_tgt_len_return: + # values length is tgt, start is 0, return length is tgt + assert start == 0 + else: + # values length is src+tgt-1, start is src-1, return length is src+tgt-1 + pass for t in reversed(range(start, length)): # pylint: disable=invalid-name next_values = values[:, t + 1] if t < length - 1 else 0.0 delta = rewards[:, t] + self.gamma * next_values - values[:, t] @@ -1587,9 +1598,14 @@ def get_advantages_and_returns( advantages_reversed.append(last_gae_lambda) advantages = paddle.stack(advantages_reversed[::-1], axis=1) returns = advantages + values[:, start:] + if not use_tgt_len_return: + advantages = paddle.concat( + [paddle.zeros([advantages.shape[0], start], dtype=advantages.dtype), advantages], -1 + ) + returns = paddle.concat([paddle.zeros([returns.shape[0], start], dtype=returns.dtype), returns], -1) return advantages.detach(), returns - def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: + def _rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: prompt = rl_batch["prompt"] old_log_probs = rl_batch["log_probs"] ref_log_probs = rl_batch["ref_log_probs"] @@ -1771,6 +1787,10 @@ def post_rollout( # print(text) reward_seq = sequence reward_attention_mask = attention_mask + # position_ids is necessary for non-right padding + # If using right padding source + left padding target, make padding positions + # in source be 0, since reward model use position_ids plus with padding size + # (number of 0s) in source to calculate end offsets. position_ids = make_position_ids(attention_mask) # pipe model outputs a logits tensor with LMHead, while non-pipe model @@ -1815,15 +1835,141 @@ def post_rollout( reward_value = reward_value[:, :-1] log_probs = gather_log_probabilities(logits[:, :-1], sequence[:, 1:]) ref_log_probs = gather_log_probabilities(ref_logits[:, :-1], sequence[:, 1:]) - return { + rollout_data = { "prompt": prompt, "input_ids": sequence, + "position_ids": position_ids, "attention_mask": attention_mask, "rewards": reward_score, "reward_values": reward_value, "log_probs": log_probs, "ref_log_probs": ref_log_probs, } + rollout_data = self.normalize_data(rollout_data, use_tgt_len_value=False) + return rollout_data + + @paddle.no_grad() + def normalize_data( + self, + rl_batch: Dict[str, paddle.Tensor], + use_tgt_len_value: bool = False, + ) -> Dict[str, Any]: + """ + data dispatch comm among devices needs padding, while the lengths of + all data fields are different and related, and it's hard to pad. + """ + prompt = rl_batch["prompt"] # length: src + attention_mask = rl_batch["attention_mask"] # length: src + tgt + old_log_probs = rl_batch["log_probs"] # length: src + tgt -1 + ref_log_probs = rl_batch["ref_log_probs"] # length: src + tgt -1 + rewards = rl_batch["rewards"] # length: 1 + old_reward_values = rl_batch["reward_values"] # length: src + tgt -1 + + # Beaver uses label data with target length, while we do not slice from + # inputs and use label data with target length: + # 1. Sometimes we cannot use label data with target length, mostly because + # it is hard to pad acorss batches. Think in some cases one batch might + # have the longest prompt+target length but the shortest target lengh, which + # might cause mismatch between inputs with prompt+target length and labels + # with target length. Padding acorss batches is needed in PP and data comm. + # 2. Additionally, when using flash_attn with casual mask and right padding + # we cannot use label data with target length. + start = prompt.shape[-1] - 1 + # sequence_mask is for label masking, make source be masked out + # clone to avoid to change attention_mask + sequence_mask = attention_mask[:, 1:].clone() # length: src + tgt -1 + sequence_mask[:, :start] = False + if use_tgt_len_value: + ref_log_probs = ref_log_probs[:, start:] + old_log_probs = old_log_probs[:, start:] + old_reward_values = old_reward_values[:, start:] + sequence_mask = sequence_mask[:, start:] + old_rewards = self.add_kl_divergence_regularization( + None, # prompt, + old_log_probs, + ref_log_probs, + rewards, + sequence_mask, + ) # length: tgt if use_tgt_len_value src + tgt -1 + reward_advantages, reward_returns = self.get_advantages_and_returns( + old_reward_values, + old_rewards, + sequence_mask, + start=0 if use_tgt_len_value else start, + use_tgt_len_return=use_tgt_len_value, + ) # length: tgt if use_tgt_len_value src + tgt -1 + # metric + kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask).sum(axis=-1).mean() + mean_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).mean() + max_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).max() + rewards = rewards.mean() + rl_batch.update( + { + "log_probs": old_log_probs, + "reward_values": old_reward_values, + "reward_advantages": reward_advantages, + "reward_returns": reward_returns, + "sequence_mask": sequence_mask, + "kl_divergence": kl_divergence, + "rewards": rewards, + "mean_generated_length": mean_generated_length, + "max_generated_length": max_generated_length, + } + ) + # pop out to reduce data dispatch comm overhead + rl_batch.pop("prompt") + rl_batch.pop("ref_log_probs") + return rl_batch + + def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: + # inputs shared by policy and value trainer + input_ids = rl_batch["input_ids"] # length: src+tgt + attention_mask = rl_batch["attention_mask"] # length: src+tgt + position_ids = rl_batch["position_ids"] # length: src+tgt + sequence_mask = rl_batch["sequence_mask"] # length: src+tgt(-1) + # inputs used by policy trainer + old_log_probs = rl_batch["log_probs"] # length: src+tgt(-1) + reward_advantages = rl_batch["reward_advantages"] # length: src+tgt(-1) + # inputs used by value trainer + old_reward_values = rl_batch["reward_values"] # length: src+tgt(-1) + reward_returns = rl_batch["reward_returns"] # length: src+tgt(-1) + # metrics. Now it depends on sequence_mask instead of prompt length and + # thus would not be bothered by prompt padding, maybe move these metrics + # calculation to here + rewards = rl_batch["rewards"] + kl_divergence = rl_batch["kl_divergence"] + mean_generated_length = rl_batch["mean_generated_length"] + max_generated_length = rl_batch["max_generated_length"] + + policy_trainer_inputs = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "position_ids": position_ids, + "old_log_probs": old_log_probs, + "reward_advantages": reward_advantages, + "sequence_mask": sequence_mask, + } + kwargs = self.policy_trainer.full_training_step(policy_trainer_inputs, **kwargs) + + value_trainer_inputs = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "position_ids": position_ids, + "old_reward_values": old_reward_values, + "reward_returns": reward_returns, + "sequence_mask": sequence_mask, + } + kwargs = self.value_trainer.full_training_step(value_trainer_inputs, **kwargs) + return { + "train/actor_loss": kwargs["actor_loss"], + "train/reward_critic_loss": kwargs["reward_critic_loss"], + "train/reward": rewards, + "train/kl_divergence": kl_divergence, + "train/mean_generated_length": mean_generated_length, + "train/max_generated_length": max_generated_length, + "train/actor_lr": self.policy_trainer._get_learning_rate(), + "train/reward_critic_lr": self.value_trainer._get_learning_rate(), + }, kwargs # @paddle.no_grad() # def post_rollout( From 1b5086977b646b683724670ae8b9a30b8cb32097 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Thu, 7 Mar 2024 08:10:37 +0000 Subject: [PATCH 21/46] Move metric calculation to rl_step to avoid comm. --- examples/RLHF/ppo_trainer.py | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index a168d3222459..ca923adf779e 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -1898,11 +1898,7 @@ def normalize_data( start=0 if use_tgt_len_value else start, use_tgt_len_return=use_tgt_len_value, ) # length: tgt if use_tgt_len_value src + tgt -1 - # metric - kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask).sum(axis=-1).mean() - mean_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).mean() - max_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).max() - rewards = rewards.mean() + rl_batch.update( { "log_probs": old_log_probs, @@ -1910,15 +1906,12 @@ def normalize_data( "reward_advantages": reward_advantages, "reward_returns": reward_returns, "sequence_mask": sequence_mask, - "kl_divergence": kl_divergence, + "ref_log_probs": ref_log_probs, "rewards": rewards, - "mean_generated_length": mean_generated_length, - "max_generated_length": max_generated_length, } ) # pop out to reduce data dispatch comm overhead rl_batch.pop("prompt") - rl_batch.pop("ref_log_probs") return rl_batch def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: @@ -1933,13 +1926,6 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any # inputs used by value trainer old_reward_values = rl_batch["reward_values"] # length: src+tgt(-1) reward_returns = rl_batch["reward_returns"] # length: src+tgt(-1) - # metrics. Now it depends on sequence_mask instead of prompt length and - # thus would not be bothered by prompt padding, maybe move these metrics - # calculation to here - rewards = rl_batch["rewards"] - kl_divergence = rl_batch["kl_divergence"] - mean_generated_length = rl_batch["mean_generated_length"] - max_generated_length = rl_batch["max_generated_length"] policy_trainer_inputs = { "input_ids": input_ids, @@ -1960,6 +1946,15 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any "sequence_mask": sequence_mask, } kwargs = self.value_trainer.full_training_step(value_trainer_inputs, **kwargs) + + # metric + rewards = rl_batch["rewards"] + rewards = rewards.mean() + ref_log_probs = rl_batch["ref_log_probs"] + kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask).sum(axis=-1).mean() + mean_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).mean() + max_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).max() + return { "train/actor_loss": kwargs["actor_loss"], "train/reward_critic_loss": kwargs["reward_critic_loss"], From bc80256070a7347697268d80b5df73f0258b65aa Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Thu, 7 Mar 2024 17:11:50 +0800 Subject: [PATCH 22/46] fix pad --- examples/RLHF/ppo_trainer.py | 38 ++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 6f34370ad407..c3d0fdd05bdc 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -553,13 +553,27 @@ def data_group_merge(tensors, group): tensor_list = [] print("Mergeing ", tensors) # paddle.distributed.all_gather(tensor_list, tensors, group=group) - all_gather_nd(tensor_list, tensors, group=group) + all_gather_nd(tensor_list, tensors, group=group, padded=True) return paddle.concat(tensor_list) else: logger.warning(f"Can't parse for type {type(tensors)}") return tensors +def repad_rl_batches(batches, input_lengths): + if "position_ids" in batches: + v = batches["position_ids"] + for x in range(v.shape[0]): + v[x, input_lengths[x] :] = 1 + batches["position_ids"] = v + for key in list(batches.keys()): + if batches[key].shape == input_lengths.shape: + print(key) + batches[key] = batches[key].mean() + + return batches + + # https://stackoverflow.com/questions/12594148/skipping-execution-of-with-block class SkipWithBlock(Exception): pass @@ -594,7 +608,7 @@ def __exit__(self, type, value, traceback): # dist.barrier() -def all_gather_nd(tensor_list, tensor, group=None): +def all_gather_nd(tensor_list, tensor, group=None, padded=False): """ Gathers tensor arrays of different lengths in a list. The length dimension is 0. This supports any number of extra dimensions in the tensors. @@ -606,6 +620,11 @@ def all_gather_nd(tensor_list, tensor, group=None): Returns: (Tensor): output list of tensors that can be of different sizes """ + if len(tensor.shape) == 0: + tensor = tensor.reshape([1]) + dist.all_gather(tensor_list, tensor, group=group) + return tensor_list + world_size = group.nranks local_size = paddle.to_tensor(tensor.shape, place=tensor.place) all_sizes = [paddle.zeros_like(local_size) for _ in range(world_size)] @@ -630,8 +649,11 @@ def all_gather_nd(tensor_list, tensor, group=None): all_tensors_padded = [] dist.all_gather(all_tensors_padded, tensor, group=group) # all_tensors = [] + if padded: + tensor_list.extend(all_tensors_padded) + return all_tensors_padded + for tensor_, size in zip(all_tensors_padded, all_sizes): - pass tensor_list.append(tensor_[..., : size[-1]]) return tensor_list @@ -1654,7 +1676,15 @@ def gen_epoch_data(): print("rl_batches =", rl_batches) # todo, merge data - rl_batches = data_group_merge(rl_batches, group=gp) + if gp is not None: + input_ids_length = rl_batches[0]["input_ids"].shape[-1] + rl_batches[0]["input_ids_length"] = paddle.to_tensor( + [input_ids_length] * rl_batches[0]["input_ids"].shape[0], dtype="int64" + ) + rl_batches = data_group_merge(rl_batches, group=gp) + input_ids_length_batchs = rl_batches[0].pop("input_ids_length") + rl_batches[0] = repad_rl_batches(rl_batches[0], input_ids_length_batchs) + paddle.device.cuda.empty_cache() # # 数据造好, 开始训练 From b3f22c2de4ad14b321315bd1200f6cfe7cacc21a Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Thu, 7 Mar 2024 19:33:26 +0800 Subject: [PATCH 23/46] fix create group. --- examples/RLHF/ppo_trainer.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index f62a03de5559..e1a063d42979 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -531,7 +531,7 @@ def data_group_split(tensors, group): new_dict[k] = data_group_split(v, group) return new_dict elif isinstance(tensors, paddle.Tensor): - print("Spliting ", tensors) + print("Spliting ", tensors.shape, tensors.dtype) return tensors.split(group.nranks)[group.rank] else: logger.warning(f"Can't parse for type {type(tensors)}") @@ -551,7 +551,7 @@ def data_group_merge(tensors, group): return new_dict elif isinstance(tensors, paddle.Tensor): tensor_list = [] - print("Mergeing ", tensors) + print("Mergeing ", tensors.shape, tensors.dtype) # paddle.distributed.all_gather(tensor_list, tensors, group=group) all_gather_nd(tensor_list, tensors, group=group, padded=True) return paddle.concat(tensor_list) @@ -567,8 +567,8 @@ def repad_rl_batches(batches, input_lengths): v[x, input_lengths[x] :] = 1 batches["position_ids"] = v for key in list(batches.keys()): - if batches[key].shape == input_lengths.shape: - print(key) + if batches[key].shape[0] != input_lengths.shape[0]: + print("set mean", key, batches[key]) batches[key] = batches[key].mean() return batches @@ -820,9 +820,10 @@ def create_send_recv_table(train_keys, eval_keys): old_dp_workers = self.args.world_size // (max(sd_group.nranks, 1) * max(dp_group.nranks, 1)) group_nums = self.args.logical_process_index // old_dp_workers * eval_tp_size + eval_tp_rank - gp = create_data_trans_group(global_rank, group_nums) + if self._policy_model_eval_group is None: + self._policy_model_eval_group = create_data_trans_group(global_rank, group_nums) - return gp + return None def create_data_trans_group(global_rank, group_nums): @@ -1223,6 +1224,7 @@ def __init__( (policy_model, reference_model, reward_model, value_model, policy_model_eval, value_model_eval) = model self._policy_model_eval = policy_model_eval + self._policy_model_eval_group = None self._value_model_eval = value_model_eval # policy_tokenizer and value_tokenizer should be same @@ -1640,11 +1642,12 @@ def gen_epoch_data(): offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) - gp = self.policy_trainer.export_evaluate_model( + self.policy_trainer.export_evaluate_model( self.policy_trainer.model, self._policy_model_eval, with_offload=self.args.offload_level is not None, ) + gp = self._policy_model_eval_group # gp = create_data_trans_group(self.args.logical_process_index, paddle.distributed.get_rank(), self._policy_model_eval.config.tensor_parallel_degree) # # todo: zhui self.value_trainer.export_evaluate_model( From 8c7e6128e09bf9c335319b6e0bf5a986bc7d7bdc Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Thu, 7 Mar 2024 20:31:38 +0800 Subject: [PATCH 24/46] no print --- examples/RLHF/ppo_config.json | 2 +- examples/RLHF/ppo_trainer.py | 25 ++++++++++++------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/examples/RLHF/ppo_config.json b/examples/RLHF/ppo_config.json index 672434a9e12f..19539bc452cc 100644 --- a/examples/RLHF/ppo_config.json +++ b/examples/RLHF/ppo_config.json @@ -7,7 +7,7 @@ "_actor_model_name_or_path": "facebook/llama-7b", "_reward_model_name_or_path": "facebook/llama-7b", "output_dir": "./ppo-sd14pp2-test", - "max_length": 100, + "max_length": 512, "temperature": 1.0, "num_return_sequences":1, "repetition_penalty": 1.0, diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index e1a063d42979..1c38c26807fb 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -531,7 +531,7 @@ def data_group_split(tensors, group): new_dict[k] = data_group_split(v, group) return new_dict elif isinstance(tensors, paddle.Tensor): - print("Spliting ", tensors.shape, tensors.dtype) + # print("Spliting ", tensors.shape, tensors.dtype) return tensors.split(group.nranks)[group.rank] else: logger.warning(f"Can't parse for type {type(tensors)}") @@ -551,7 +551,7 @@ def data_group_merge(tensors, group): return new_dict elif isinstance(tensors, paddle.Tensor): tensor_list = [] - print("Mergeing ", tensors.shape, tensors.dtype) + # print("Mergeing ", tensors.shape, tensors.dtype) # paddle.distributed.all_gather(tensor_list, tensors, group=group) all_gather_nd(tensor_list, tensors, group=group, padded=True) return paddle.concat(tensor_list) @@ -568,7 +568,7 @@ def repad_rl_batches(batches, input_lengths): batches["position_ids"] = v for key in list(batches.keys()): if batches[key].shape[0] != input_lengths.shape[0]: - print("set mean", key, batches[key]) + # print("set mean", key, batches[key]) batches[key] = batches[key].mean() return batches @@ -678,7 +678,7 @@ def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): train_state_dict = train_model.state_dict() eval_state_dict = eval_model.state_dict() - print(sd_group) + # print(sd_group) if dp_group.rank <= 0 and sd_group.rank <= 0: train_pp_size = pp_group.nranks @@ -820,7 +820,7 @@ def create_send_recv_table(train_keys, eval_keys): old_dp_workers = self.args.world_size // (max(sd_group.nranks, 1) * max(dp_group.nranks, 1)) group_nums = self.args.logical_process_index // old_dp_workers * eval_tp_size + eval_tp_rank - if self._policy_model_eval_group is None: + if not hasattr(self, "_policy_model_eval_group") or self._policy_model_eval_group is None: self._policy_model_eval_group = create_data_trans_group(global_rank, group_nums) return None @@ -848,7 +848,7 @@ def create_data_trans_group(global_rank, group_nums): group = gp # print("all_split_table:", all_split_table) - print("export_group", group) + # print("export_group", group) return group @@ -1224,7 +1224,6 @@ def __init__( (policy_model, reference_model, reward_model, value_model, policy_model_eval, value_model_eval) = model self._policy_model_eval = policy_model_eval - self._policy_model_eval_group = None self._value_model_eval = value_model_eval # policy_tokenizer and value_tokenizer should be same @@ -1647,7 +1646,7 @@ def gen_epoch_data(): self._policy_model_eval, with_offload=self.args.offload_level is not None, ) - gp = self._policy_model_eval_group + gp = self.policy_trainer._policy_model_eval_group # gp = create_data_trans_group(self.args.logical_process_index, paddle.distributed.get_rank(), self._policy_model_eval.config.tensor_parallel_degree) # # todo: zhui self.value_trainer.export_evaluate_model( @@ -1661,23 +1660,23 @@ def gen_epoch_data(): # todo, split prompt_only_batch # pp2tp2dp2 -> dp4tp2 prompt_only_batch - print("create gp", gp) + # print("create gp", gp) prompt_only_batch = data_group_split(prompt_only_batch, group=gp) - print("prompt_only_batch =", prompt_only_batch) + # print("prompt_only_batch =", prompt_only_batch) # 生成数据 rl_batches = self.split_rl_micro_batches(prompt_only_batch) # rl_batches = self.load_sing_gen_data(as_batches=True, # use_counter=True) if self.use_ptx: ptx_batch = data_group_split(ptx_batch, group=gp) - print("ptx_batch =", ptx_batch) + # print("ptx_batch =", ptx_batch) ptx_batches = self.split_ptx_micro_batches(ptx_batch) - print("ptx_batchs =", ptx_batches) + # print("ptx_batchs =", ptx_batches) ptx_batches = data_group_merge(ptx_batches, group=gp) else: ptx_batches = [None for _ in range(len(rl_batches))] - print("rl_batches =", rl_batches) + # print("rl_batches =", rl_batches) # todo, merge data if gp is not None: input_ids_length = rl_batches[0]["input_ids"].shape[-1] From 2e3bf85dd612da97774629974cf22156dbcfc926 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Thu, 7 Mar 2024 13:03:26 +0000 Subject: [PATCH 25/46] Suppport inference model generation. --- examples/RLHF/ppo_trainer.py | 407 +++++++++++++++++++++++++++++++++-- 1 file changed, 393 insertions(+), 14 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index ca923adf779e..1fca672aa3f0 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextlib import copy import inspect import itertools @@ -31,7 +32,7 @@ from models.ppo_model_utils import RLHFPPOMixedLoss, RLHFValueLoss, create_loss from paddle.distributed import fleet from paddle.io import DataLoader, Dataset, DistributedBatchSampler -from paddle.utils import map_structure +from paddle.utils import map_structure, try_import from rich.console import Console from rich.table import Table @@ -62,6 +63,7 @@ from paddlenlp.transformers import BatchEncoding, PretrainedModel, PretrainedTokenizer from paddlenlp.transformers.configuration_utils import PretrainedConfig from paddlenlp.transformers.model_outputs import ModelOutput +from paddlenlp.transformers.model_utils import dtype_guard from paddlenlp.transformers.tokenizer_utils_base import ( PaddingStrategy, TruncationStrategy, @@ -800,6 +802,382 @@ def prepare_inputs_for_generation(self, *args, **kwargs): return model_inputs +import types + +from predictor import DygraphInferencePredictor, InferencePredictorMixin + + +class Timer: + def __init__(self, name): + self.name = name + self.tic_time = None + self.run_times = 0 + + def tic(self): + self.tic_time = time.time() + + def toc(self): + self.run_times += time.time() - self.tic_time + + +class Predictor(DygraphInferencePredictor): + def __init__(self, config, model: PretrainedModel = None, tokenizer: PretrainedTokenizer = None): + self.model_config = model.config + self.config = config + self.tokenizer = tokenizer + self.model = model + self.is_available = False + self._weights_mapping = None + # use multi stream to load weights + # self._stream_num = 4 + # self._streams = [] + # for i in range(self._stream_num): + # stream = paddle.device.cuda.Stream() + # self._streams.append(stream) + # self._param_counter = 0 + # print("=" * 20, "cache shape", self.cache_kvs_shape, list(self.__dict__.keys())) + # exit(0) + + @staticmethod + def create_predictor(trainer): + from predictor import ( + PdArgumentParser, + PredictorArgument, + get_model_max_position_embeddings, + ) + + # create infer model + # NOTE:infer model use static name param_attr to create and cannot be + # created multiple times. + def create_infer_model(model, dtype, set_state=False): + # patches for inference model to make FuseMT adapt + import paddlenlp_ops + + # should patch before infer model import + paddlenlp_ops.save_with_output = lambda *args, **kwargs: None + # TODO(guosheng): update the custom op code directly. + ori_set_ends = paddlenlp_ops.set_stop_value_multi_ends + + def _set_ends(topk_ids, stop_flags, end_ids, mode): + # infer model uses eos_token_id to pad and discriminate ending, + # patch to use pad_token_id to pad to unify with non-infer model. + topk_ids_out, stop_flags_out = ori_set_ends(topk_ids, stop_flags, end_ids, mode) + if trainer.tokenizer.pad_token_id != trainer.tokenizer.eos_token_id: + topk_ids_out = paddle.where(stop_flags, trainer.tokenizer.pad_token_id, topk_ids_out) + return topk_ids_out, stop_flags_out + + paddlenlp_ops.set_stop_value_multi_ends = _set_ends + from models.infer_model_utils import _update_model_kwargs + + import paddlenlp.experimental.transformers as infer_transformers + + config = copy.deepcopy(model.config) + hcg = fleet.get_hybrid_communicate_group() # may differ with training + config.tensor_parallel_degree = hcg.get_model_parallel_world_size() + config.tensor_parallel_rank = hcg.get_model_parallel_rank() + config.weight_only_quant_bits = -1 + config.quant_type = None + config.use_cachekv_int8 = False + config.single_card_ptq = True + infer_model_cls = getattr(infer_transformers, model.__class__.__name__ + "InferenceModel") + with dtype_guard(dtype): + infer_model = infer_model_cls(config) + # apply patches + infer_model.update_model_kwargs_for_generation = types.MethodType(_update_model_kwargs, infer_model) + if set_state: + state_dict = {} + for k, v in model.state_dict().items(): + # state_dict[k] = np.from_dlpack(paddle.utils.dlpack.to_dlpack(v)) + state_dict[k] = v.numpy() + infer_model.set_state_dict(state_dict) + return infer_model + + # to avoid oom, clear param of infer_model imediately + ori_creat_param = paddle.nn.Layer.create_parameter + + def _create_param(self, *args, **kwargs): + param = ori_creat_param(self, *args, **kwargs) + param._clear_data() + return param + + paddle.nn.Layer.create_parameter = _create_param + infer_model = create_infer_model(trainer.model, dtype=trainer.amp_dtype) + paddle.nn.Layer.create_parameter = ori_creat_param + + # create predictor + parser = PdArgumentParser((PredictorArgument,)) + predictor_args = parser.parse_dict( + { + "src_length": get_model_max_position_embeddings( # can be changed dynamically by predictor.input_length + trainer.model.config + ), + "max_length": trainer.args.max_length, + "dtype": trainer.amp_dtype, + "batch_size": trainer.args.per_device_train_batch_size, + # infer model do not support top_k, and differ with non-infer model + # generation which gets default top_K=50 using generation_config.top_k + "top_p": 0.8, + # trainer.args.top_p, + "temperature": trainer.args.temperature, + "repetition_penalty": trainer.args.repetition_penalty, + } + )[0] + policy_predictor = Predictor(predictor_args, model=infer_model, tokenizer=trainer.tokenizer) + return policy_predictor + + def _create_caches(self): + """inputs can be reused among multiple predictions, such as cache""" + if hasattr(self, "cache_kvs_shape"): # has created cache + input_length = getattr(self, "input_length", 0) + if input_length <= self.config.src_length: # reuse cahce + return + else: # create longer cache + self._clear_caches() + self.config.src_length = getattr(self, "input_length", self.config.src_length) + if not hasattr(self, "_buffer_attrs"): + pre_attrs = set(self.__dict__.keys()) + self.cache_kvs_shape = self.model.get_cache_kvs_shape( + self.model_config, self.config.batch_size, self.config.total_max_length + ) + # TODO: remove GenerationConfig.from_pretrained + InferencePredictorMixin.__init__(self, self.config, self.tokenizer) + if not hasattr(self, "_buffer_attrs"): + self._buffer_attrs = set(self.__dict__.keys()) - pre_attrs + + def _clear_caches(self): + # del or offload + for attr in self._buffer_attrs: + delattr(self, attr) + + def disable(self, model, onload_model=True): + # clear caches + self._clear_caches() + # clear params + for _, param in self.model.state_dict().items(): + param._clear_data() + if onload_model: + model.to(paddle.device.get_device()) + self.is_available = False + + def enable(self, model, offload_model=True): + if self.is_available: + return + # set params + self.set_state_dict(model, offload_model) + self.is_available = True + + @paddle.no_grad() + def set_state_dict(self, model, offload_model=True): + offload_place = paddle.CUDAPinnedPlace() + state_dict = {} + for k, v in model.state_dict().items(): + # maybe use dlpack or some other zero-copy methods + state_dict[k] = v # .numpy() + # state_dict[k] = v.to(offload_place) + # self.model.set_state_dict(state_dict) + # return + + if getattr(self, "_weights_mapping", None) is None: + self._weights_mapping = self.model.get_weights_mapping() + convert_timer = Timer("cpu-convert") + set_timer = Timer("cpu-convert") + set_timer.tic() + # import nvtx + + # set_rng = nvtx.start_range(message=f"set_state_dict", color="yellow") + + for k, v in self._weights_mapping.items(): + # with paddle.device.cuda.stream_guard( + # self._streams[self._param_counter % self._stream_num]): + with contextlib.nullcontext(): + # set_param_rng = nvtx.start_range(message=f"set_param", + # color="green") + param, (convert_fun, args) = k, v + args = [state_dict[name] for name in args] + # maybe use thread pool to speedup cpu convert + # value = paddle.to_tensor(convert_fun(*args)) + convert_timer.tic() + # with device_guard("cpu"): + # op with pinmemory input tensors get gpu output tensor + value = convert_fun(*args) + if offload_model: + for arg in args: + # shared params no need to offload + if value is not arg: + arg.to(offload_place, blocking=False) + convert_timer.toc() + if not isinstance(value, paddle.Tensor): + param.set_value(value) + elif isinstance(value.place, paddle.CUDAPlace): + # param.get_tensor()._share_data_with(value) + value._share_buffer_to(param) + else: + param.copy_(value, False) + # nvtx.end_range(set_param_rng) + # self._param_counter += 1 + paddle.device.cuda.synchronize() + set_timer.toc() + # nvtx.end_range(set_rng) + print("=" * 20, "cpu-convert time", convert_timer.run_times, set_timer.run_times) + # exit(0) + # print("=" * 20, "lm_head.weight", self.model.lm_head.weight) + # print("=" * 20, "llama.embed_tokens.weight", + # self.model.llama.embed_tokens.weight) + # print("=" * 20, "llama.transformer_block.qkv_weights", + # self.model.llama.transformer_block.qkv_weights[0]) + # print("=" * 20, "llama.transformer_block.ffn1_weights", + # self.model.llama.transformer_block.ffn1_weights[0]) + # print("=" * 20, "llama.transformer_block.linear_weights", + # self.model.llama.transformer_block.linear_weights[0]) + + def _preprocess(self, source): + # make cache when infer happens to get actual shape to save memory + self._create_caches() + return super()._preprocess(source) + + @paddle.no_grad() + def _infer(self, inputs): + for key in inputs.keys(): + if paddle.is_tensor(inputs[key]): + continue + if isinstance(inputs[key], list): + if paddle.is_tensor(inputs[key]): + continue + inputs[key] = [paddle.to_tensor(item) for item in inputs[key]] + else: + inputs[key] = paddle.to_tensor(inputs[key]) + + inputs["cache_kvs"] = self.cache_kvs + print("=" * 20, "infer input_ids", inputs["input_ids"]) + return self.model.generate(**inputs) + + def _postprocess(self, predictions): + return predictions + + +policy_predictor: Predictor = None + + +def check_memory_usage(msg=""): + import paddle + + max_memory_allocated_size = paddle.device.cuda.max_memory_allocated() / (1024 * 1024 * 1024) + max_memory_reserved_size = paddle.device.cuda.max_memory_reserved() / (1024 * 1024 * 1024) + memory_allocated_size = paddle.device.cuda.memory_allocated() / (1024 * 1024 * 1024) + memory_reserved_size = paddle.device.cuda.memory_reserved() / (1024 * 1024 * 1024) + mem = { + f"{msg}_max_memory_allocated_size": max_memory_allocated_size, + f"{msg}_max_memory_reserved_size": max_memory_reserved_size, + f"{msg}_memory_allocated_size": memory_allocated_size, + f"{msg}_memory_reserved_size": memory_reserved_size, + } + print(mem) + + +@contextmanager +def infer_guard(trainer, offload_model=True): + try: + try_import("paddlenlp_ops") + except: + yield + return + check_memory_usage("before infer generation") + global policy_predictor + # offload training params before infer model creation + model = trainer.model + import time + + # start_time = time.time() + # model.to(paddle.CUDAPinnedPlace()) + # print("=" * 20, "offload time", time.time() - start_time) + start_time = time.time() + if policy_predictor is None: + policy_predictor = Predictor.create_predictor(trainer) + if not policy_predictor.is_available: + policy_predictor.enable(model, offload_model=offload_model) + print("=" * 20, "create infer time", time.time() - start_time) + # TODO(guosheng): patch for dist.all_recude to use tp group, fix it later + import paddle.distributed as dist + + ori_all_reduce = dist.all_reduce + ori_broadcast = dist.broadcast + hcg = fleet.get_hybrid_communicate_group() + dist.all_reduce = lambda x: ori_all_reduce(x, group=hcg.get_model_parallel_group()) + dist.broadcast = lambda x, rank: ori_broadcast( + x, src=hcg.get_model_parallel_group_src_rank(), group=hcg.get_model_parallel_group() + ) + check_memory_usage("begin infer generation") + yield + dist.all_reduce = ori_all_reduce + dist.broadcast = ori_broadcast + print("=" * 20, "infer generation finished") + import sys + + print("=" * 20, "predictor refcount", sys.getrefcount(policy_predictor)) + # policy_predictor = None + # policy_predictor.model = None + policy_predictor.disable(model, onload_model=offload_model) + # start_time = time.time() + # model.to(paddle.device.get_device()) + # print("=" * 20, "onload time", time.time() - start_time) + check_memory_usage("end infer generation") + + +class InferEvalModel: + """For faster generation, not support PipelineParallel yet.""" + + def __init__(self, trainer: Trainer): + self.model: PretrainedModel = trainer.model + self.tokenizer: PretrainedTokenizer = trainer.tokenizer + + def eval(self): + self.model.eval() + + def train(self): + self.model.train() + + def __call__(self, *args, **kwargs): + # assert model is on GPU + assert policy_predictor is None or not policy_predictor.is_available + return self.model(*args, **kwargs) + + def generate(self, *args, **kwargs): + if policy_predictor is None or not policy_predictor.is_available: + return self.model.generate(*args, **kwargs) + + arg_dict = inspect.signature(self.model.generate).bind(*args, **kwargs).arguments + input_ids = arg_dict["input_ids"] + generation_config = arg_dict["generation_config"] + # convert text and tokenize again to convert left padding to right padding + # remove this if inputs is right padding + print("=" * 20, "raw input_ids", input_ids) + # TODO(guosheng): allow to use right padding to infer directly + prompts = self.tokenizer.batch_decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) + print("=" * 20, "prompts", prompts) + # decoded prompts has been applied with chat_template + # NOTE(guosheng): Whether to add special token should be checked, None + # chat_template would not add special token in predictor, since it assumes + # chat_template includes special tokens. While Beaver dataset tokenization + # does not use chat_template, it uses hard coded template which excludes + # special tokens. + with guard_set_args( + policy_predictor.tokenizer, + { + # predictor use right padding for infer model by default + # "padding_side": "right", + # "chat_template": None + }, + ): + policy_predictor.input_length = input_ids.shape[-1] + outputs = policy_predictor.predict(prompts) + outputs = (outputs[0][:, input_ids.shape[-1] :],) if generation_config.trunc_input else (outputs[0],) + if self.tokenizer.padding_side == "left": + # convert back to left padding inputs + outputs[0][:, : input_ids.shape[-1]] = input_ids + print("=" * 20, "infer output_ids", outputs[0]) + return outputs + + class PPOTrainer(Trainer): def __init__( self, @@ -1008,7 +1386,9 @@ def actor_model(self): model = PipeEvalModel(self.policy_trainer) self._actor_model = model else: - model = self.policy_trainer.model + # model = self.policy_trainer.model + model = InferEvalModel(self.policy_trainer) + self._actor_model = model return model @property @@ -1736,13 +2116,15 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: ) # NOTE: generation output of paddlenlp do not contain prompt, we should # change sequences here. - sequences = self.actor_model.generate( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - generation_config=self.generation_config, - synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, - )[0] + with infer_guard(self.policy_trainer): + # with contextlib.nullcontext(): + sequences = self.actor_model.generate( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + generation_config=self.generation_config, + synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, + )[0] sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) # sequences = [self.load_sing_gen_data(as_batches=False, use_counter=False)["input_ids"]] @@ -1780,11 +2162,8 @@ def post_rollout( reward_seq = sequence = reward_tokenize_output["input_ids"] reward_attention_mask = attention_mask = reward_tokenize_output["attention_mask"] else: - # for text in self.tokenizer.batch_decode( - # sequence, - # skip_special_tokens=True - # ): - # print(text) + for text in self.tokenizer.batch_decode(sequence, skip_special_tokens=True): + print(text) reward_seq = sequence reward_attention_mask = attention_mask # position_ids is necessary for non-right padding From df452d12a10533d344499a4a9c125b4c00d13a20 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Fri, 8 Mar 2024 14:57:43 +0800 Subject: [PATCH 26/46] fix compatible for no eval model. --- examples/RLHF/ppo_trainer.py | 10 +++++++--- examples/RLHF/run.sh | 15 +++++++++++++++ 2 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 examples/RLHF/run.sh diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 1c38c26807fb..6de1d97652ec 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -1286,7 +1286,7 @@ def __init__( }, ): - assert args.pipeline_parallel_degree == 1, "error" + # assert args.pipeline_parallel_degree == 1, "error" self.reference_trainer = Trainer( reference_model, criterion, @@ -1301,7 +1301,7 @@ def __init__( preprocess_logits_for_metrics, ) - assert args.pipeline_parallel_degree == 1, "error" + # assert args.pipeline_parallel_degree == 1, "error" self.reward_trainer = Trainer( reward_model, criterion, @@ -1646,7 +1646,11 @@ def gen_epoch_data(): self._policy_model_eval, with_offload=self.args.offload_level is not None, ) - gp = self.policy_trainer._policy_model_eval_group + gp = ( + self.policy_trainer._policy_model_eval_group + if hasattr(self.policy_trainer, "_policy_model_eval_group") + else None + ) # gp = create_data_trans_group(self.args.logical_process_index, paddle.distributed.get_rank(), self._policy_model_eval.config.tensor_parallel_degree) # # todo: zhui self.value_trainer.export_evaluate_model( diff --git a/examples/RLHF/run.sh b/examples/RLHF/run.sh new file mode 100644 index 000000000000..a4bdca9e974e --- /dev/null +++ b/examples/RLHF/run.sh @@ -0,0 +1,15 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +PYTHONPATH=../../ GLOG_minloglevel=2 python3.10 -m paddle.distributed.launch --gpus "0,1,2,3,4,5,6,7" ppo_main.py ppo_config.json From d73b8a35205625f2a0571a8ee76290fcfe969b8a Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Fri, 8 Mar 2024 16:50:47 +0800 Subject: [PATCH 27/46] fix pp sync. --- examples/RLHF/ppo_trainer.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 6de1d97652ec..af866a0376f0 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -791,6 +791,11 @@ def create_send_recv_table(train_keys, eval_keys): # Offload train model if need if global_rank == src_rank and with_offload: offload_tensor_to_cpu(train_state_dict[key]) + + if pp_group.nranks > 1: + paddle.distributed.parallel.sync_params_buffers( + eval_model, comm_group=pp_group, src_rank=pp_group.ranks[0], fuse_params=False + ) else: # 其他 DP rank 的state dict, 适配 offload 和初始化 if with_offload: @@ -816,6 +821,10 @@ def create_send_recv_table(train_keys, eval_keys): paddle.distributed.parallel.sync_params_buffers( eval_model, comm_group=dp_group, src_rank=dp_group.ranks[0], fuse_params=False ) + # paddle.save(eval_state_dict, f"./tmp/eval_{sd_group.rank}_tp_{eval_tp_rank}_pp_{pp_group.rank}.pdparams") + # paddle.save(train_state_dict, f"./tmp/train_{sd_group.rank}_tp_{tp_group.rank}_pp_{pp_group.rank}.pdparams") + # paddle.distributed.barrier() + # exit(-1) old_dp_workers = self.args.world_size // (max(sd_group.nranks, 1) * max(dp_group.nranks, 1)) group_nums = self.args.logical_process_index // old_dp_workers * eval_tp_size + eval_tp_rank From e14e04b1f77c7f9244419563b7fe3e2cae7fe042 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Fri, 8 Mar 2024 16:56:40 +0800 Subject: [PATCH 28/46] remove debug info --- examples/RLHF/ppo_trainer.py | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index af866a0376f0..ed1a52a1e14c 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -531,7 +531,6 @@ def data_group_split(tensors, group): new_dict[k] = data_group_split(v, group) return new_dict elif isinstance(tensors, paddle.Tensor): - # print("Spliting ", tensors.shape, tensors.dtype) return tensors.split(group.nranks)[group.rank] else: logger.warning(f"Can't parse for type {type(tensors)}") @@ -551,8 +550,6 @@ def data_group_merge(tensors, group): return new_dict elif isinstance(tensors, paddle.Tensor): tensor_list = [] - # print("Mergeing ", tensors.shape, tensors.dtype) - # paddle.distributed.all_gather(tensor_list, tensors, group=group) all_gather_nd(tensor_list, tensors, group=group, padded=True) return paddle.concat(tensor_list) else: @@ -568,7 +565,6 @@ def repad_rl_batches(batches, input_lengths): batches["position_ids"] = v for key in list(batches.keys()): if batches[key].shape[0] != input_lengths.shape[0]: - # print("set mean", key, batches[key]) batches[key] = batches[key].mean() return batches @@ -599,15 +595,6 @@ def __exit__(self, type, value, traceback): return True # Suppress special SkipWithBlock exception -# with SkipContextManager(skip=True): -# print('In the with block') # Won't be called -# print('Out of the with block') - -# with SkipContextManager(skip=tp_group.rank!=0): -# print('In the with block') # Won't be called -# dist.barrier() - - def all_gather_nd(tensor_list, tensor, group=None, padded=False): """ Gathers tensor arrays of different lengths in a list. @@ -678,8 +665,6 @@ def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): train_state_dict = train_model.state_dict() eval_state_dict = eval_model.state_dict() - # print(sd_group) - if dp_group.rank <= 0 and sd_group.rank <= 0: train_pp_size = pp_group.nranks if eval_tp_size > 1 and train_tp_size != eval_tp_size: @@ -700,22 +685,17 @@ def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): ignore_error=False, ) - # print(tp_actions.keys()) - is_dst = global_rank == 0 for key in eval_state_dict.keys(): - # print(f"get key {key}") tensor = train_state_dict[key] if key in tp_actions: ret = distributed_gather(tensor, dst=0, group=tp_group, offload=False) action = tp_actions.pop(key) tensor = action(ret) if is_dst else None - # print(f"merge {key}") else: tensor = tensor._copy_to(paddle.CPUPlace(), False) if is_dst else None if tensor is not None: - # print(tensor.shape) eval_state_dict[key].set_value(tensor) if not eval_state_dict[key]._is_initialized(): @@ -766,14 +746,9 @@ def create_send_recv_table(train_keys, eval_keys): # tp+pp->tp if eval_tp_size > 1 and train_pp_size > 1: table = create_send_recv_table(train_state_dict.keys(), eval_state_dict.keys()) - # print(table) for key, src_rank, dst_rank in table: # Init tensor for model is cleaned - # print(key, src_rank, dst_rank, eval_state_dict[key]._is_initialized()) - # if key in train_state_dict: - # print(train_state_dict[key]._is_initialized()) - if not eval_state_dict[key]._is_initialized(): v = eval_state_dict[key] t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) @@ -803,7 +778,6 @@ def create_send_recv_table(train_keys, eval_keys): offload_tensor_to_cpu(train_state_dict[key]) for k, v in eval_state_dict.items(): if not v._is_initialized(): - # print(f"init {k}") t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) v.get_tensor()._share_data_with(t.get_tensor()) @@ -856,8 +830,6 @@ def create_data_trans_group(global_rank, group_nums): if global_rank in ranks: group = gp - # print("all_split_table:", all_split_table) - # print("export_group", group) return group @@ -1673,23 +1645,18 @@ def gen_epoch_data(): # todo, split prompt_only_batch # pp2tp2dp2 -> dp4tp2 prompt_only_batch - # print("create gp", gp) prompt_only_batch = data_group_split(prompt_only_batch, group=gp) - # print("prompt_only_batch =", prompt_only_batch) # 生成数据 rl_batches = self.split_rl_micro_batches(prompt_only_batch) # rl_batches = self.load_sing_gen_data(as_batches=True, # use_counter=True) if self.use_ptx: ptx_batch = data_group_split(ptx_batch, group=gp) - # print("ptx_batch =", ptx_batch) ptx_batches = self.split_ptx_micro_batches(ptx_batch) - # print("ptx_batchs =", ptx_batches) ptx_batches = data_group_merge(ptx_batches, group=gp) else: ptx_batches = [None for _ in range(len(rl_batches))] - # print("rl_batches =", rl_batches) # todo, merge data if gp is not None: input_ids_length = rl_batches[0]["input_ids"].shape[-1] @@ -2288,7 +2255,6 @@ def post_rollout( else: reward_critic_model_in_use = self.reward_critic_model - logger.error("Get Here!!") # pipe model outputs a logits tensor with LMHead, while non-pipe model # outputs a tuple with logits tensor as the only one element. logits = actor_model_in_use( @@ -2297,7 +2263,6 @@ def post_rollout( position_ids=position_ids, # return_dict=True, ) # .logits - logger.error("Get Here 1.0!!") if not isinstance(logits, paddle.Tensor): logits = logits[0] ref_logits = self.reference_model( @@ -2306,7 +2271,6 @@ def post_rollout( position_ids=position_ids, # return_dict=True, ) # .logits - logger.error("Get Here 2.0!!") if not isinstance(ref_logits, paddle.Tensor): ref_logits = ref_logits[0] From 8015c8bb5b21ac62472846584afcf7a39693a064 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Tue, 12 Mar 2024 12:32:05 +0000 Subject: [PATCH 29/46] Refacor PPO training using StepTrainer. --- examples/RLHF/infer_utils.py | 304 +++ examples/RLHF/models/infer_model_utils.py | 185 ++ examples/RLHF/models/model_pp.py | 220 +- examples/RLHF/models/pp_model_utils.py | 92 + examples/RLHF/models/ppo_model_utils.py | 79 +- examples/RLHF/ppo_trainer.py | 1763 ++++------------- examples/RLHF/trainer_utils.py | 645 ++++++ .../transformers/llama/modeling.py | 2 +- 8 files changed, 1618 insertions(+), 1672 deletions(-) create mode 100644 examples/RLHF/infer_utils.py create mode 100644 examples/RLHF/models/infer_model_utils.py create mode 100644 examples/RLHF/models/pp_model_utils.py create mode 100644 examples/RLHF/trainer_utils.py diff --git a/examples/RLHF/infer_utils.py b/examples/RLHF/infer_utils.py new file mode 100644 index 000000000000..60f32dd2d0ab --- /dev/null +++ b/examples/RLHF/infer_utils.py @@ -0,0 +1,304 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import copy +import inspect +import types +from contextlib import contextmanager + +import paddle +import paddle.distributed as dist +from paddle.utils import try_import +from trainer_utils import guard_set_args + +import paddlenlp +from paddlenlp.trainer.trainer import Trainer, logger +from paddlenlp.transformers import PretrainedModel, PretrainedTokenizer +from paddlenlp.transformers.model_utils import dtype_guard + + +class Predictor: + def __init__(self, config, model: PretrainedModel = None, tokenizer: PretrainedTokenizer = None): + self.model_config = model.config + self.config = config + self.tokenizer = tokenizer + self.model = model + self.is_available = False + self._weights_mapping = None + # TODO(guosheng): Removde dependency on llm.Predictor + # 1. buffer_maker creates caches and other buffer inputs can be shared + # among multi time prediction. define caches and extra inputs creation + # method instead of using predictor.__init__ + # 2. inputs_processer creates caches and other inputs can be shared among + # multi time prediction. define caches and extra inputs creation method + # instead of using predictor.__init__ + from predictor import InferencePredictorMixin + + self._buffer_maker = types.MethodType(InferencePredictorMixin.__init__, self) + self._inputs_processer = types.MethodType(InferencePredictorMixin._preprocess, self) + + @staticmethod + def create_predictor(trainer): + from predictor import ( + PdArgumentParser, + PredictorArgument, + get_model_max_position_embeddings, + ) + + # create infer model + # NOTE: infer model use static name param_attr to create and cannot be + # created multiple times. + def create_infer_model(model, dtype, set_state=False): + from models.infer_model_utils import patch_infer_generate + + # apply patches to make FuseMT adapt + patch_infer_generate( + eos_token_id=trainer.tokenizer.eos_token_id, pad_token_id=trainer.tokenizer.pad_token_id + ) + config = copy.deepcopy(model.config) + hcg = dist.fleet.get_hybrid_communicate_group() # may differ with training + config.tensor_parallel_degree = hcg.get_model_parallel_world_size() + config.tensor_parallel_rank = hcg.get_model_parallel_rank() + config.weight_only_quant_bits = -1 + config.quant_type = None + config.use_cachekv_int8 = False + config.single_card_ptq = True + infer_model_cls = getattr(paddlenlp.experimental.transformers, model.__class__.__name__ + "InferenceModel") + with dtype_guard(dtype): + infer_model = infer_model_cls(config) + + if set_state: + state_dict = {} + for k, v in model.state_dict().items(): + # state_dict[k] = np.from_dlpack(paddle.utils.dlpack.to_dlpack(v)) + state_dict[k] = v.numpy() + infer_model.set_state_dict(state_dict) + return infer_model + + # to avoid oom, clear param of infer_model imediately + ori_creat_param = paddle.nn.Layer.create_parameter + + def _create_param(self, *args, **kwargs): + param = ori_creat_param(self, *args, **kwargs) + param._clear_data() + return param + + paddle.nn.Layer.create_parameter = _create_param + infer_model = create_infer_model(trainer.model, dtype=trainer.amp_dtype) + paddle.nn.Layer.create_parameter = ori_creat_param + + # create predictor + parser = PdArgumentParser((PredictorArgument,)) + predictor_args = parser.parse_dict( + { + "src_length": get_model_max_position_embeddings( # can be changed dynamically by predictor.input_length + trainer.model.config + ), + "max_length": trainer.args.max_length, + "dtype": trainer.amp_dtype, + "batch_size": trainer.args.per_device_train_batch_size, + # infer model do not support top_k, and differ with non-infer model + # generation which gets default top_K=50 using generation_config.top_k + "top_p": 0.0, + # trainer.args.top_p, + "temperature": trainer.args.temperature, + "repetition_penalty": trainer.args.repetition_penalty, + } + )[0] + policy_predictor = Predictor(predictor_args, model=infer_model, tokenizer=trainer.tokenizer) + return policy_predictor + + def _create_caches(self): + """inputs can be reused among multiple predictions, such as cache""" + if hasattr(self, "cache_kvs_shape"): # has created cache + input_length = getattr(self, "input_length", 0) + if input_length <= self.config.src_length: # reuse cahce + return + else: # create longer cache + self._clear_caches() + self.config.src_length = getattr(self, "input_length", self.config.src_length) + if not hasattr(self, "_buffer_attrs"): + pre_attrs = set(self.__dict__.keys()) + self.cache_kvs_shape = self.model.get_cache_kvs_shape( + self.model_config, self.config.batch_size, self.config.total_max_length + ) + self._buffer_maker(self.config, self.tokenizer) + if not hasattr(self, "_buffer_attrs"): + self._buffer_attrs = set(self.__dict__.keys()) - pre_attrs + + def _clear_caches(self): + # del or offload + for attr in self._buffer_attrs: + delattr(self, attr) + + def disable(self, model, onload_model=True): + # clear caches + self._clear_caches() + # clear params + for _, param in self.model.state_dict().items(): + param._clear_data() + if onload_model: + model.to(paddle.device.get_device()) + self.is_available = False + + def enable(self, model, offload_model=True): + if self.is_available: + return + # set params + self.set_state_dict(model, offload_model) + self.is_available = True + + @paddle.no_grad() + def set_state_dict(self, model, offload_model=True): + offload_place = paddle.CUDAPinnedPlace() + state_dict = {} + for k, v in model.state_dict().items(): + state_dict[k] = v + + if getattr(self, "_weights_mapping", None) is None: + self._weights_mapping = self.model.get_weights_mapping() + + for k, v in self._weights_mapping.items(): + param, (convert_fun, args) = k, v + args = [state_dict[name] for name in args] + value = convert_fun(*args) + if offload_model: + for arg in args: + # shared params no need to offload + if value is not arg: + arg.to(offload_place, blocking=False) + if not isinstance(value, paddle.Tensor): + param.set_value(value) + elif isinstance(value.place, paddle.CUDAPlace): + value._share_buffer_to(param) + else: + param.copy_(value, False) + paddle.device.cuda.synchronize() + + def _preprocess(self, source): + # make cache when infer happens to get actual shape to save memory + self._create_caches() + return self._inputs_processer(source) + + @paddle.no_grad() + def _infer(self, inputs): + for key in inputs.keys(): + if paddle.is_tensor(inputs[key]): + continue + if isinstance(inputs[key], list): + if paddle.is_tensor(inputs[key]): + continue + inputs[key] = [paddle.to_tensor(item) for item in inputs[key]] + else: + inputs[key] = paddle.to_tensor(inputs[key]) + + inputs["cache_kvs"] = self.cache_kvs + return self.model.generate(**inputs) + + def _postprocess(self, predictions): + return predictions + + @paddle.no_grad() + def predict(self, input_texts: str | list[str]): + tokenized_source = self._preprocess(input_texts) + predictions = self._infer(tokenized_source) + decoded_predictions = self._postprocess(predictions) + return decoded_predictions + + +policy_predictor: Predictor = None + + +@contextmanager +def infer_guard(trainer, offload_model=True): + try: + try_import("paddlenlp_ops") + except: + logger.warning("paddlenlp_ops does not exist, please install paddlenlp_ops for generation speedup.") + yield + return + + global policy_predictor + model = trainer.model + if policy_predictor is None: + policy_predictor = Predictor.create_predictor(trainer) + if not policy_predictor.is_available: + policy_predictor.enable(model, offload_model=offload_model) + + # TODO(guosheng): patch for dist.all_recude to use tp group, fix it later + ori_all_reduce = dist.all_reduce + ori_broadcast = dist.broadcast + hcg = dist.fleet.get_hybrid_communicate_group() + dist.all_reduce = lambda x: ori_all_reduce(x, group=hcg.get_model_parallel_group()) + dist.broadcast = lambda x, rank: ori_broadcast( + x, src=hcg.get_model_parallel_group_src_rank(), group=hcg.get_model_parallel_group() + ) + yield + dist.all_reduce = ori_all_reduce + dist.broadcast = ori_broadcast + + policy_predictor.disable(model, onload_model=offload_model) + + +class InferEvalModel: + """For faster generation, not support PipelineParallel yet.""" + + def __init__(self, trainer: Trainer): + self.model: PretrainedModel = trainer.model + self.tokenizer: PretrainedTokenizer = trainer.tokenizer + + def eval(self): + self.model.eval() + + def train(self): + self.model.train() + + def __call__(self, *args, **kwargs): + # assert model is on GPU + assert policy_predictor is None or not policy_predictor.is_available + return self.model(*args, **kwargs) + + def generate(self, *args, **kwargs): + if policy_predictor is None or not policy_predictor.is_available: + return self.model.generate(*args, **kwargs) + + arg_dict = inspect.signature(self.model.generate).bind(*args, **kwargs).arguments + input_ids = arg_dict["input_ids"] + generation_config = arg_dict["generation_config"] + # convert text and tokenize again to convert left padding to right padding + # remove this if inputs is right padding + # TODO(guosheng): allow to use right padding to infer directly + prompts = self.tokenizer.batch_decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) + # decoded prompts has been applied with chat_template + # NOTE(guosheng): Whether to add special token should be checked, None + # chat_template would not add special token in predictor, since it assumes + # chat_template includes special tokens. While Beaver dataset tokenization + # does not use chat_template, it uses hard coded template which excludes + # special tokens. + with guard_set_args( + policy_predictor.tokenizer, + { + # predictor use right padding for infer model by default + # "padding_side": "right", + # "chat_template": None + }, + ): + policy_predictor.input_length = input_ids.shape[-1] + outputs = policy_predictor.predict(prompts) + outputs = (outputs[0][:, input_ids.shape[-1] :],) if generation_config.trunc_input else (outputs[0],) + if self.tokenizer.padding_side == "left": + # convert back to left padding inputs + outputs[0][:, : input_ids.shape[-1]] = input_ids + return outputs diff --git a/examples/RLHF/models/infer_model_utils.py b/examples/RLHF/models/infer_model_utils.py new file mode 100644 index 000000000000..3d63fe52aa9b --- /dev/null +++ b/examples/RLHF/models/infer_model_utils.py @@ -0,0 +1,185 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# Copyright 2023 PKU-Alignment Team. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for inference model.""" + +import numpy as np +import paddle + + +def patch_paddlenlp_ops(eos_token_id, pad_token_id): + import paddlenlp_ops + + paddlenlp_ops.save_with_output = lambda *args, **kwargs: None + + # TODO(guosheng): update the custom op code directly. + ori_set_ends = paddlenlp_ops.set_stop_value_multi_ends + + def _set_ends(topk_ids, stop_flags, end_ids, mode): + # infer model uses eos_token_id to pad and discriminate ending, + # patch to use pad_token_id to pad to unify with non-infer model. + topk_ids_out, stop_flags_out = ori_set_ends(topk_ids, stop_flags, end_ids, mode) + if pad_token_id != eos_token_id: + topk_ids_out = paddle.where(stop_flags, pad_token_id, topk_ids_out) + return topk_ids_out, stop_flags_out + + paddlenlp_ops.set_stop_value_multi_ends = _set_ends + + +def patch_infer_generate(eos_token_id, pad_token_id): + """patches for inference model to make FuseMT adapt""" + # patch paddlenlp_ops, maybe update the custom op code directly later + # NOTE: should patch paddlenlp_ops before infer model import + patch_paddlenlp_ops(eos_token_id, pad_token_id) + + # patch get_weights_mapping for InferenceModel + patch_infer_model() + + # patch GenerationInferenceModel.sample + from paddlenlp.experimental.transformers.generation_utils import ( + GenerationInferenceModel, + ) + + ori_update_model_kwargs = GenerationInferenceModel.update_model_kwargs_for_generation + + def _update_model_kwargs(self, *args, **kwargs): + # update_model_kwargs_for_generation only returns , hack to. + model_kwargs = ori_update_model_kwargs(self, *args, **kwargs) + next_tokens = model_kwargs["next_tokens"] + all_input_ids = paddle.concat([model_kwargs["all_input_ids"], next_tokens], axis=1) + model_kwargs["next_tokens"] = all_input_ids + model_kwargs["all_input_ids"] = None + return model_kwargs + + GenerationInferenceModel.update_model_kwargs_for_generation = _update_model_kwargs + + +_model_weights_mapping_dict = {} + + +def register_model(model_cls_name): + def mark_cls_name(func): + # Do not register here although we can, otherwise infer model would import + # before paddlenlp_ops. + _model_weights_mapping_dict[model_cls_name] = func + return func + + return mark_cls_name + + +def patch_infer_model(): + import paddlenlp.experimental.transformers as infer_transformers + + for model_cls_name, get_weights_mapping in _model_weights_mapping_dict.items(): + model_cls = getattr(infer_transformers, model_cls_name) + model_cls.get_weights_mapping = get_weights_mapping + + +@register_model("LlamaForCausalLMInferenceModel") +def get_weights_mapping(self): + """model to infer model""" + head_size = self.config.hidden_size // self.config.num_attention_heads + + def _concat_qkv(q, k, v): + if isinstance(q, paddle.Tensor): + concated_qkv_weight = paddle.concat([q, k, v], axis=-1).T.reshape( + [ + 3 * (self.config.num_attention_heads // self.config.tensor_parallel_degree) * (head_size), + self.config.hidden_size, + ] + ) + else: + concated_qkv_weight = ( + np.concatenate( + [q, k, v], + axis=-1, + ) + .transpose(1, 0) + .reshape( + 3 * (self.config.num_attention_heads // self.config.tensor_parallel_degree) * (head_size), + self.config.hidden_size, + ) + ) + + return concated_qkv_weight + + def _concat_ffn1(w1, w2): + if isinstance(w1, paddle.Tensor): + concated_ffn1_weight = paddle.concat([w1, w2], axis=-1) + else: + concated_ffn1_weight = np.concatenate([w1, w2], axis=-1) + return concated_ffn1_weight + + identity = lambda x: x + + weight_mapping = {} + weight_mapping[self.lm_head.weight] = [ + identity, + [ + "lm_head.weight", + ], + ] + weight_mapping[self.llama.embed_tokens.weight] = [ + identity, + [ + "llama.embed_tokens.weight", + ], + ] + weight_mapping[self.llama.norm.weight] = [ + identity, + [ + "llama.norm.weight", + ], + ] + for idx in range(self.config.num_hidden_layers): + weight_mapping[self.llama.transformer_block.qkv_weights[idx]] = [ + _concat_qkv, + [ + f"llama.layers.{idx}.self_attn.q_proj.weight", + f"llama.layers.{idx}.self_attn.k_proj.weight", + f"llama.layers.{idx}.self_attn.v_proj.weight", + ], + ] + weight_mapping[self.llama.transformer_block.ffn1_weights[idx]] = [ + _concat_ffn1, + [ + f"llama.layers.{idx}.mlp.gate_proj.weight", + f"llama.layers.{idx}.mlp.up_proj.weight", + ], + ] + weight_mapping[self.llama.transformer_block.linear_weights[idx]] = [ + identity, + [ + f"llama.layers.{idx}.self_attn.o_proj.weight", + ], + ] + weight_mapping[self.llama.transformer_block.ffn2_weights[idx]] = [ + identity, + [ + f"llama.layers.{idx}.mlp.down_proj.weight", + ], + ] + weight_mapping[self.llama.transformer_block.ln_scales[idx]] = [ + identity, + [ + f"llama.layers.{idx}.input_layernorm.weight", + ], + ] + weight_mapping[self.llama.transformer_block.ffn_ln_scales[idx]] = [ + identity, + [ + f"llama.layers.{idx}.post_attention_layernorm.weight", + ], + ] + return weight_mapping diff --git a/examples/RLHF/models/model_pp.py b/examples/RLHF/models/model_pp.py index f955ad4bc6a3..57bcf9f465cf 100644 --- a/examples/RLHF/models/model_pp.py +++ b/examples/RLHF/models/model_pp.py @@ -12,10 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import importlib -import inspect -import types - import paddle import paddle.nn as nn from paddle.distributed.fleet.meta_parallel import LayerDesc @@ -28,217 +24,22 @@ return_args, ) -from .ppo_model_utils import RLHFPPOMixedLoss, RLHFValueLoss, create_loss +from .pp_model_utils import fwd_args_to_dict, get_expected_keys, pad_batches_inputs +from .ppo_model_utils import ( + RLHFPPOMixedLoss, + RLHFValueLoss, + create_loss, + make_position_ids, +) from .score_model_utils import ScoreModelMixin - -def print_patch(func, output, *args, **kwargs): - return - print("=" * 20, func.__name__, output) - - -def fwd_step_patch(func, output, self, *args, **kwargs): - # training patch - if self.training and self.is_pipeline_last_stage(): - if getattr(self, "_step_losses", None): - self._step_losses.append(output.detach()) - else: - self._step_losses = [output.detach()] - - -# def fwd_step_eval_patch(func, output, self, *args, **kwargs): -# # eval patch for actor/reference model -# logits = output -# # sequence = self. -# log_probs = gather_log_probabilities(logits[:, :-1], sequence[:, 1:]) -# if self.is_pipeline_first_stage(): -# if getattr(self, "_step_losses", None): -# self._step_losses.append(output.detach()) -# else: -# self._step_losses = [output.detach()] -# print("=" * 20, "fwd_step_patch", len(self._step_losses)) - - -def make_wrapper(func, pre_patch=None, post_patch=None): - def wrapper(*args, **kwargs): - if pre_patch is not None: - pre_patch(func, None, *args, **kwargs) - output = func(*args, **kwargs) - # print("=" * 20, func.__name__, output) - if post_patch is not None: - post_patch(func, output, *args, **kwargs) - return output - - return wrapper - - -funcs = [ - paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_forward_recv_backward, - paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_backward_recv_forward, - paddle.distributed.fleet.model.PipelineParallel._backward_step, - paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.recv_backward, - paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_backward, - (paddle.distributed.fleet.model.PipelineParallel._forward_step, fwd_step_patch), - paddle.distributed.fleet.meta_parallel.pipeline_parallel.FakeMicroDataset._load_micro_batch, - paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.recv_forward, - paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_forward, - paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.recv_meta, - paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.send_meta, -] - -for func in funcs: - if isinstance(func, tuple): - fun, patch = func - else: - fun, patch = func, print_patch - module = importlib.import_module(fun.__module__) - cls_name = fun.__qualname__[: -len(fun.__name__) - 1] - wrap_fun = make_wrapper(fun, post_patch=patch) - cls_obj = getattr(module, cls_name) - setattr(cls_obj, fun.__name__, wrap_fun) - - -# _raw_load_micro_batch = paddle.distributed.fleet.meta_parallel.pipeline_parallel.FakeMicroDataset._load_micro_batch -# _raw_forward_step = paddle.distributed.fleet.model.PipelineParallel._forward_step -# _raw_recv_forward = paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.recv_forward -# _raw_send_forward = paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_forward -# _raw_recv_meta = paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.recv_meta -# _raw_send_meta = paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.send_meta - - -# def _load_micro_batch(self, micro_step): -# output = _raw_load_micro_batch(self, micro_step) -# print("=" * 20, "_load_micro_batch", output) -# return output - -# def _forward_step(self, input_tensor, micro_dataset, chunk_id=None): -# if True: # self.is_pipeline_first_stage(): -# print("=" * 20, "_forward_step input", input_tensor, self._p2p_helper._use_cache) -# output = _raw_forward_step(self, input_tensor, micro_dataset, chunk_id) -# print("=" * 20, "_forward_step output", output, self._p2p_helper._use_cache) -# return output - - -# def recv_forward(self, pp_first_stage, sync_recv=True): -# input_tensor = _raw_recv_forward(self, pp_first_stage, sync_recv) -# print("=" * 20, "recv_forward", input_tensor) -# return input_tensor - - -# def send_forward(self, output_tensor, pp_last_stage): -# output = _raw_send_forward(self, output_tensor, pp_last_stage) -# print("=" * 20, "send_forward", output_tensor) -# return output - - -# def recv_meta(self, group): -# output = _raw_recv_meta(self, group) -# print("=" * 20, "recv_meta", self.recv_shape_message, self.recv_dtype_message) -# return output - - -# def send_meta(self, tensor, group): -# output = _raw_send_meta(self, tensor, group) -# print("=" * 20, "send_meta", self.send_shape_message, self.send_dtype_message) -# return output - -# paddle.distributed.fleet.model.PipelineParallel._forward_step = _forward_step -# paddle.distributed.fleet.meta_parallel.pipeline_parallel.FakeMicroDataset._load_micro_batch = _load_micro_batch -# paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.recv_forward = recv_forward -# paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.P2pHelper.send_forward = send_forward -# paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.recv_meta = recv_meta -# paddle.distributed.fleet.meta_parallel.pp_utils.p2p_communication.SendRecvMeta.send_meta = send_meta - - -def loss_fwd_wrapper(loss_maker): - def _wrapper(*args, **kwargs): - loss = loss_maker(*args, **kwargs) - ori_fwd = loss.forward - - def _fwd(self, output, label_info): - if isinstance(label_info, tuple): - loss = ori_fwd(self, output, *label_info) - else: - loss = ori_fwd(self, output, label_info) - return loss - - loss.forward = types.MethodType(_fwd, loss) - return loss - - return _wrapper - - -@paddle.no_grad() -def make_position_ids(attention_mask, source=None): - attention_mask_bool = attention_mask - attention_mask = attention_mask.cast(paddle.int64) - position_ids = attention_mask.cumsum(-1) - 1 - # Make padding positions in source be 0, since reward model use position_ids - # plus with padding size (number of 0s) in source to calculate end offsets. - # It does not matter when source is left padding and target is right padding - # which is the output of non-FuseMT generation, while when using FuseMT whose - # output is right padding source and right padding target, we have to set - # padding positions in source be 0 to make compatible. - if source is not None: - src_len = position_ids[:, source.shape[-1] - 1].unsqueeze(-1) - position_ids = paddle.where( - paddle.logical_and(paddle.logical_not(attention_mask_bool), position_ids <= src_len), - attention_mask, - position_ids, - ) - return position_ids - position_ids = paddle.where(position_ids == -1, attention_mask, position_ids) - return position_ids - - -@paddle.no_grad() -def pad_batches_inputs(inputs, padding_value=0, max_len=None, pad_len=None): - """Pad length for tensors shaped [bs, seq_len] to [bs, max(seq_lens)]""" - if pad_len is not None: - pad_len = [pad_len] * len(inputs) if isinstance(pad_len, int) else pad_len - elif max_len is None: - # max_len = max([x.shape[-1] for x in inputs if x is not None]) - max_len = max([x.shape[-1] if isinstance(x, paddle.Tensor) else 0 for x in inputs]) - pad_len = [max_len - x.shape[-1] if isinstance(x, paddle.Tensor) else 0 for x in inputs] - for i in range(len(inputs)): - x = inputs[i] - # if x is None or x.shape[-1] == max_len: - if not isinstance(x, paddle.Tensor) or x.shape[-1] == max_len: - continue - inputs[i] = paddle.concat([x, paddle.full([x.shape[0], pad_len[i]], padding_value, dtype=x.dtype)], -1) - return inputs - - -def get_expected_keys(inputs, keys): - ret = tuple([inputs.get(k, None) for k in keys if k in inputs]) - if len(ret) == 1: - ret = ret[0] - return ret - - # patches for base pipe model # non-pipe model class, can be used to parse and convert forward args +# mainly used for generation with PipelienParallel model LlamaForCausalLMPipe._non_pipe_model_class = LlamaForCausalLM LlamaForCausalLMPipe._non_pipe_decoder_layer_class = LlamaDecoderLayer -def fwd_args_to_dict(fun): - def _impl(self, *args, **kwargs): - try: - return fun(self, *args, **kwargs) - except TypeError: - # otherwise, inputs is any valid format of non_pipe_model forward args, - # convert to dict, to support more args format in prediction_pipeline_step - - arg_dict = ( - inspect.signature(self._non_pipe_model_class.forward).bind(*((self,) + args), **kwargs).arguments - ) - arg_dict.pop("self") - return fun(self, arg_dict) - - return _impl - - class LlamaPolicyPipe(LlamaForCausalLMPipe): # TODO(guosheng): maybe make a Mixin is better @@ -339,11 +140,16 @@ def get_loss_fn(self, config): @property def head_out_meta(self): + """mainly for eval/generation with PipelineParallel""" # None means to use actual data info return paddle.static.InputSpec(shape=[None, None, self.config.vocab_size], dtype=None) class _LlamaRMSNormPipe(LlamaRMSNormPipe): + """ + We need position_ids for reward model, so wrap LlamaRMSNormPipe to pass position_ids + """ + def __init__(self, config): super().__init__(config) diff --git a/examples/RLHF/models/pp_model_utils.py b/examples/RLHF/models/pp_model_utils.py new file mode 100644 index 000000000000..1444cdbdd2e2 --- /dev/null +++ b/examples/RLHF/models/pp_model_utils.py @@ -0,0 +1,92 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import inspect + +import paddle + + +def fwd_step_patch(func, output, self, *args, **kwargs): + # training patch + if self.training and self.is_pipeline_last_stage(): + if getattr(self, "_step_losses", None): + self._step_losses.append(output.detach()) + else: + self._step_losses = [output.detach()] + + +def make_wrapper(func, pre_patch=None, post_patch=None): + def wrapper(*args, **kwargs): + if pre_patch is not None: + pre_patch(func, None, *args, **kwargs) + output = func(*args, **kwargs) + if post_patch is not None: + post_patch(func, output, *args, **kwargs) + return output + + return wrapper + + +funcs = [(paddle.distributed.fleet.model.PipelineParallel._forward_step, fwd_step_patch)] + +for func in funcs: + fun, patch = func + module = importlib.import_module(fun.__module__) + cls_name = fun.__qualname__[: -len(fun.__name__) - 1] + wrap_fun = make_wrapper(fun, post_patch=patch) + cls_obj = getattr(module, cls_name) + setattr(cls_obj, fun.__name__, wrap_fun) + + +@paddle.no_grad() +def pad_batches_inputs(inputs, padding_value=0, max_len=None, pad_len=None): + """Pad length for tensors shaped [bs, seq_len] to [bs, max(seq_lens)]""" + if pad_len is not None: + pad_len = [pad_len] * len(inputs) if isinstance(pad_len, int) else pad_len + elif max_len is None: + # max_len = max([x.shape[-1] for x in inputs if x is not None]) + max_len = max([x.shape[-1] if isinstance(x, paddle.Tensor) else 0 for x in inputs]) + pad_len = [max_len - x.shape[-1] if isinstance(x, paddle.Tensor) else 0 for x in inputs] + for i in range(len(inputs)): + x = inputs[i] + # if x is None or x.shape[-1] == max_len: + if not isinstance(x, paddle.Tensor) or x.shape[-1] == max_len: + continue + inputs[i] = paddle.concat([x, paddle.full([x.shape[0], pad_len[i]], padding_value, dtype=x.dtype)], -1) + return inputs + + +def get_expected_keys(inputs, keys): + ret = tuple([inputs.get(k, None) for k in keys if k in inputs]) + if len(ret) == 1: + ret = ret[0] + return ret + + +def fwd_args_to_dict(fun): + def _impl(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except TypeError: + # otherwise, inputs is any valid format of non_pipe_model forward args, + # convert to dict, to support more args format in prediction_pipeline_step + # assume no arg is inspect.Parameter.VAR_KEYWORD + arg_dict = ( + inspect.signature(self._non_pipe_model_class.forward).bind(*((self,) + args), **kwargs).arguments + ) + arg_dict.pop("self") + return fun(self, arg_dict) + + return _impl diff --git a/examples/RLHF/models/ppo_model_utils.py b/examples/RLHF/models/ppo_model_utils.py index 4d8dd8f7d903..e7146adaa9f7 100644 --- a/examples/RLHF/models/ppo_model_utils.py +++ b/examples/RLHF/models/ppo_model_utils.py @@ -71,7 +71,22 @@ def loss_fwd(self, predict, labels): return loss_cls -def create_loss(loss_cls, config, extra_args): +def create_loss(loss_cls, config, extra_args, merge_labels=None): + """ + loss_cls(paddle.nn.Layer): loss class + config(PratrainedConfig): model config, to be consistent with loss defined + in transformers + extra_args(dict): create loss with more args not in config + merge_labels: use a wrapped loss_cls whose label args are merged into one arg, + this is useful to PipelineParallel and trainer.criterion since they only + support loss format corresponding to this format. + """ + # TODO(guosheng): merge_labels if loss_cls not + ori_fwd = loss_cls.forward + if merge_labels: + fwd_params = inspect.signature(ori_fwd).parameters + if len(fwd_params.keys()) > 3: # merge_fwd_labels has not done + loss_cls = merge_fwd_labels(loss_cls) # forward(self, predict, label1, label2, ...) loss_arg_names = list(inspect.signature(loss_cls.__init__).parameters.keys())[2:] if isinstance(extra_args, dict): @@ -79,7 +94,32 @@ def create_loss(loss_cls, config, extra_args): else: # create from TrainingArguments loss_kwargs = dict([(name, getattr(extra_args, name)) for name in loss_arg_names if hasattr(extra_args, name)]) - return loss_cls(config, **loss_kwargs) + loss = loss_cls(config, **loss_kwargs) + loss_cls.forward = ori_fwd + return loss + + +@paddle.no_grad() +def make_position_ids(attention_mask, source=None): + attention_mask_bool = attention_mask + attention_mask = attention_mask.cast(paddle.int64) + position_ids = attention_mask.cumsum(-1) - 1 + # Make padding positions in source be 0, since reward model use position_ids + # plus with padding size (number of 0s) in source to calculate end offsets. + # It does not matter when source is left padding and target is right padding + # which is the output of non-FuseMT generation, while when using FuseMT whose + # output is right padding source and right padding target, we have to set + # padding positions in source be 0 to make compatible. + if source is not None: + src_len = position_ids[:, source.shape[-1] - 1].unsqueeze(-1) + position_ids = paddle.where( + paddle.logical_and(paddle.logical_not(attention_mask_bool), position_ids <= src_len), + attention_mask, + position_ids, + ) + return position_ids + position_ids = paddle.where(position_ids == -1, attention_mask, position_ids) + return position_ids def gather_log_probabilities(logits: paddle.Tensor, labels: paddle.Tensor) -> paddle.Tensor: @@ -108,20 +148,7 @@ def actor_loss_fn( ) return paddle.sum(paddle.maximum(pg_loss1, pg_loss2) * mask) / mask.sum() - def forward(self, logits, input_ids, old_log_probs, reward_advantages, sequence_mask, start=None): - # tgt_mask or sequence_mask according to length - - # When used in pipe mode, batches among accumulation steps should be paded. - # Hard to pad acorss batches, think in some cases one batch might have the - # longest prompt+target length but the shortest target lengh, which might - # cause mismatch between inputs with prompt+target length and labels with - # target length. NOTE: Thus, we might make all fields be prompt+target - # length rather rather than target and company an extra start input. - # However trick can be used in pipe_model._prepare_pipeline_inputs_func, - # label fields with target length such as old_log_probs/reward_advantages/sequence_mask - # not need to join comm and thus there is no need to keep same shape among - # batches of accumulation steps, they just need to pad as prompt+target - # fields such as input_ids. + def forward(self, logits, input_ids, old_log_probs, reward_advantages, sequence_mask): log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:]) if log_probs.shape[1] == old_log_probs.shape[1]: # labels (old_log_probs, reward_advantages, sequence_mask) has @@ -136,9 +163,6 @@ def forward(self, logits, input_ids, old_log_probs, reward_advantages, sequence_ else: # labels (old_log_probs, reward_advantages, sequence_mask) has tgt length log_probs = log_probs[:, -old_log_probs.shape[1] :] - # if start is not None: - # old_log_probs = old_log_probs[:, start:] - # sequence_mask = sequence_mask[:, start:] actor_loss = self.actor_loss_fn( log_probs, old_log_probs, @@ -159,9 +183,6 @@ def __init__(self, config, ptx_coeff=16, clip_range_ratio=0.2): self.sft_criterion = PretrainingCriterion(config) def forward(self, logits, labels, input_ids, old_log_probs, reward_advantages, sequence_mask): - # def forward(self, logits, label_info): - # labels, input_ids, old_log_probs, reward_advantages, sequence_mask = label_info - logits = logits if isinstance(logits, paddle.Tensor) else logits[0] loss = None # sft, pt loss @@ -198,16 +219,7 @@ def critic_loss_fn( vf_loss2 = paddle.square(values_clipped - returns) return 0.5 * paddle.sum(paddle.maximum(vf_loss1, vf_loss2) * mask) / mask.sum() - def forward( - self, - reward_values, - old_reward_values, - reward_returns, - sequence_mask, - start=None, - # label_info, - ): - # old_reward_values, reward_returns, sequence_mask = label_info + def forward(self, reward_values, old_reward_values, reward_returns, sequence_mask): reward_values = reward_values if isinstance(reward_values, paddle.Tensor) else reward_values[0] reward_values = reward_values.squeeze(axis=-1)[:, :-1] if reward_values.shape[1] == old_reward_values.shape[1]: @@ -226,9 +238,6 @@ def forward( # labels (old_reward_values, reward_returns, sequence_mask) has # tgt length reward_values = reward_values[:, -old_reward_values.shape[1] :] - # if start is not None: - # old_reward_values = old_reward_values[:, start:] - # sequence_mask = sequence_mask[:, start:] reward_critic_loss = self.critic_loss_fn( reward_values, old_reward_values, diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 1fca672aa3f0..f784ffdd16f8 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -12,480 +12,82 @@ # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import copy -import inspect import itertools import math import os import time -from contextlib import contextmanager from typing import Any, Callable, Dict, List, Optional, Tuple, Union -import numpy as np import paddle import paddle.nn as nn -import paddle.nn.functional as F -import tqdm from data import DummyDataset, PromptOnlyBatch -from models.model_pp import make_position_ids -from models.ppo_model_utils import RLHFPPOMixedLoss, RLHFValueLoss, create_loss +from infer_utils import InferEvalModel, infer_guard +from models.ppo_model_utils import ( + RLHFPPOMixedLoss, + RLHFValueLoss, + create_loss, + gather_log_probabilities, + make_position_ids, +) from paddle.distributed import fleet from paddle.io import DataLoader, Dataset, DistributedBatchSampler -from paddle.utils import map_structure, try_import +from paddle.utils import map_structure from rich.console import Console from rich.table import Table +from trainer_utils import ( + MuteDefaultFlowCallback, + PipeEvalModel, + batch_retokenize, + guard_set_args, + is_same_tokenizer, +) from paddlenlp.data import DataCollator from paddlenlp.generation import GenerationConfig -from paddlenlp.generation.utils import GenerationMixin from paddlenlp.trainer.trainer import ( - TRAINER_STATE_NAME, EvalLoopOutput, EvalPrediction, - HybridParallelOptimizer, - NlpDistributedBatchSampler, ShardingOption, Trainer, TrainerCallback, - TrainerControl, - TrainerState, TrainingArguments, - _obtain_optimizer_parameters_list, - distributed_file, - distributed_isfile, - fused_allreduce_gradients, logger, - reshard_util, speed_metrics, - split_inputs_sequence_dim, -) -from paddlenlp.transformers import BatchEncoding, PretrainedModel, PretrainedTokenizer -from paddlenlp.transformers.configuration_utils import PretrainedConfig -from paddlenlp.transformers.model_outputs import ModelOutput -from paddlenlp.transformers.model_utils import dtype_guard -from paddlenlp.transformers.tokenizer_utils_base import ( - PaddingStrategy, - TruncationStrategy, ) +from paddlenlp.transformers import PretrainedModel, PretrainedTokenizer -def batch_retokenize( - input_ids: paddle.Tensor, - src_tokenizer: PretrainedTokenizer, - dest_tokenizer: PretrainedTokenizer, - *, - padding: bool | str | PaddingStrategy = PaddingStrategy.LONGEST, - truncation: bool | str | TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, - skip_special_tokens: bool = True, -) -> BatchEncoding: - """Re-tokenize a batch of input ids from one tokenizer to another.""" - output = dest_tokenizer( - [ - text + dest_tokenizer.eos_token - for text in src_tokenizer.batch_decode( - input_ids, - skip_special_tokens=skip_special_tokens, - ) - ], - padding=padding, - truncation=truncation, - return_tensors="pd", - ) - return output - - -def gather_log_probabilities(logits: paddle.Tensor, labels: paddle.Tensor) -> paddle.Tensor: - """Gather log probabilities of the given labels from the logits.""" - log_probs = F.log_softmax(logits, axis=-1) - log_probs_labels = paddle.take_along_axis(log_probs, axis=-1, indices=labels.unsqueeze(axis=-1)) - return log_probs_labels.squeeze(axis=-1) - - -def init_train_model_opt( - self: Trainer, max_steps: int, resume_from_checkpoint: bool = False, clear_master_weight: bool = False -) -> PretrainedModel: - # Copy of model/optimizer init and resuming related code in `Trainer.train`. - # NOTE: this `_load_from_checkpoint` is indeed to load model states in the - # following elif-else branches, though they are apart away in `Trainer.train`. - if not self.args.should_load_sharding_stage1_model: - self._load_from_checkpoint(resume_from_checkpoint) - - # delay_optimizer_creation = ( - # self.sharding is not None - # and ShardingOption.SHARD_OP in self.args.sharding - # ) - delay_optimizer_creation = False - - if not delay_optimizer_creation: - self.create_optimizer_and_scheduler(num_training_steps=max_steps) - - if self.args.should_load_sharding_stage1_model: - model = self._wrap_model_and_load_sharded_checkpoint(resume_from_checkpoint) - elif self.args.should_save_sharding_stage1_model: - # In the non-sharded mode, should invoke _load_from_checkpoint before _wrap_model. - # In this mode, the rank0 load all params and the _wrap_model implicitly broadcast params from rank0 to the other ranks. - model = self._wrap_model(self.model_wrapped) - if self.sharding_io is not None: - assert delay_optimizer_creation is False, "delay_optimizer_creation should be False" - # the self.optimizer should be wrapped and it is done in _wrap_model - self.sharding_io.set_optimizer(self.optimizer) - # for the rest of this function `model` is the outside model, whether it was wrapped or not - if model is not self.model: - self.model_wrapped = model - if delay_optimizer_creation: - self.create_optimizer_and_scheduler(num_training_steps=max_steps) - self._load_optimizer_and_scheduler(resume_from_checkpoint) - else: - model = self._wrap_model(self.model_wrapped) - # for the rest of this function `model` is the outside model, whether it was wrapped or not - if model is not self.model: - self.model_wrapped = model - if delay_optimizer_creation: - self.create_optimizer_and_scheduler(num_training_steps=max_steps) - self._load_optimizer_and_scheduler(resume_from_checkpoint) - - if ShardingOption.FULL_SHARD in self.args.sharding and clear_master_weight: - # for inference model to use Trainer sharding stage3, clear master_weight - # which is created in GroupShardedStage3.__init__ - self.optimizer._master_weights = None - - if self.args.device == "npu" and self.args.flatten_param_grads: - from .plugins.npu_plugin import npu_accelerate_plugin - - npu_accelerate_plugin(self.optimizer) - - return model - - -def init_train_state( - self: Trainer, - resume_from_checkpoint: bool, - train_dataloader: DataLoader, - max_steps: int, - num_train_epochs: int, - num_update_steps_per_epoch: int, -): - args = self.args - - self.state = TrainerState() - self.state.epoch = 0 - epochs_trained = 0 - steps_trained_in_current_epoch = 0 - steps_trained_progress_bar = None - - # Check if continuing training from a checkpoint - if resume_from_checkpoint is not None and distributed_isfile( - os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) - ): - self.state = TrainerState.load_from_json( - distributed_file(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) - ) - epochs_trained = self.state.global_step // num_update_steps_per_epoch - if not args.ignore_data_skip: - steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) - steps_trained_in_current_epoch *= args.gradient_accumulation_steps - else: - steps_trained_in_current_epoch = 0 - - logger.info(" Continuing training from checkpoint, will skip to saved global_step") - logger.info(f" Continuing training from epoch {epochs_trained}") - logger.info(f" Continuing training from global step {self.state.global_step}") - if not args.ignore_data_skip: - logger.info( - f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} " - "batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` " - "flag to your launch command, but you will resume the training on data already seen by your model." - ) - if self.is_local_process_zero() and not args.disable_tqdm: - steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch) - steps_trained_progress_bar.set_description("Skipping the first batches") - if not args.ignore_data_skip: - if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( - train_dataloader.batch_sampler, NlpDistributedBatchSampler - ): - consumed_samples = ( - self.state.global_step - * args.train_batch_size - * args.gradient_accumulation_steps - * args.dataset_world_size - ) - train_dataloader.batch_sampler.set_epoch(consumed_samples=consumed_samples) - logger.info(f"Set DistributedBatchSampler consumed_samples to {consumed_samples}") - - self.state.max_steps = int(max_steps) - self.state.num_train_epochs = num_train_epochs - self.state.is_local_process_zero = self.is_local_process_zero() - self.state.is_world_process_zero = self.is_world_process_zero() - - return epochs_trained, steps_trained_in_current_epoch, steps_trained_progress_bar - - -def init_train_log( - self: Trainer, - num_examples: int, - num_train_epochs: int, - total_train_batch_size: int, - max_steps: int, - num_train_samples: int, - model: PretrainedModel, -): - args = self.args - - logger.info("***** Running training *****") - logger.info(f" Num examples = {num_examples:,}") - logger.info(f" Num Epochs = {num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {max_steps:,}") - logger.info(f" Total num train samples = {num_train_samples:,}") - # per_device_trainable_numel = sum(p.numel().item() for p in model.parameters() if not p.stop_gradient) - # TODO: Temporary fix since Tensor.numel() not supported in distributed mode - per_device_trainable_numel = sum(np.prod(p.shape) for p in model.parameters() if not p.stop_gradient) - logger.info(f" Number of trainable parameters = {per_device_trainable_numel:,} (per device)") - if self.args.use_hybrid_parallel: - # todo fix for pipeline_parallel_degree - parts_num = max(self.args.tensor_parallel_degree, 1) * max(self.args.pipeline_parallel_degree, 1) - if parts_num > 1: - all_reduce_dtype = "int64" - if paddle.get_device().split(":")[0] in ["npu", "xpu"]: - # TODO(duanyanhui): fix when NPU all_reduce supports int64 - all_reduce_dtype = "float32" - trainable_numel_tensor = paddle.to_tensor(per_device_trainable_numel, dtype=all_reduce_dtype) - paddle.distributed.all_reduce(trainable_numel_tensor) - trainable_numel = int(trainable_numel_tensor.item()) // self.args.dataset_world_size - # the numel is roughly, because the tensor parallel still hold own bias or layer_norm weight without splited - # so, the trainable numel is a little bigger than real. - logger.info(f" Number of trainable parameters = {trainable_numel:,} (all devices, roughly)") - - -def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): +class StepTrainer(Trainer): """ - Just a copy of single training step complete code in Trainer.train while loop - which including forward+backward+step, while wraps the inputs and outputs to - make the complicated copied code no need to change. Maybe a better way is to - add fine-grained methods including these steps to Trainer which is similar to - DeepSpeed engine. + Trainer enhanced with step-level training combining with patches of Trianer. + We can use this to do training whose step is composed of multi models (by + multiple instances of StepTrainer, such as PPO. Additionally, using a mixed + loss and get the separated loss metrics is supported. """ - # TODO(guosheng): step, steps_trained_in_current_epoch and steps_trained_progress_bar - # should use reference since they would be overwrite. - # for state update - epoch = kwargs.get("epoch", 0) - step = kwargs.get("step", 0) - steps_in_epoch = kwargs.get("steps_in_epoch", 0) - step_control = kwargs.get("step_control", 0) - # for step and progress update when resuming data - train_dataloader = kwargs.get("train_dataloader", None) - resume_from_checkpoint = kwargs.get("resume_from_checkpoint", None) - steps_trained_in_current_epoch = kwargs.get("steps_trained_in_current_epoch", 0) - steps_trained_progress_bar = kwargs.get("steps_trained_progress_bar", None) - # for eval output ignore to gather - ignore_keys_for_eval = kwargs.get("ignore_keys_for_eval", None) - tr_loss = kwargs.get("tr_loss", 0.0) - model = kwargs.get("model", self.model_wrapped) - - args = self.args - - if self.args.use_hybrid_parallel and self.args.sep_parallel_degree > 1: - inputs = split_inputs_sequence_dim(inputs) - self.timers and self.timers("read-data").stop() - os.environ["TRAINER_GLOBAL_STEP"] = str(self.state.global_step) - self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) - - # Skip past any already trained steps if resuming training - # for paddlenlp.utils.batch_sampler.DistributedBatchSampler - # We use consumed_samples to reset the status - if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( - train_dataloader.batch_sampler, NlpDistributedBatchSampler - ): - if step == 0: - if steps_trained_progress_bar is not None: - steps_trained_progress_bar.update(steps_trained_in_current_epoch) - steps_trained_progress_bar.close() - steps_trained_progress_bar = None - self._load_rng_state(resume_from_checkpoint) - step += steps_trained_in_current_epoch - elif steps_trained_in_current_epoch > 0: - steps_trained_in_current_epoch -= 1 - if steps_trained_progress_bar is not None: - steps_trained_progress_bar.update(1) - if steps_trained_in_current_epoch == 0: - self._load_rng_state(resume_from_checkpoint) - # continue - final_local_vars = locals() - for k in kwargs.keys(): - if k in final_local_vars: - kwargs[k] = final_local_vars[k] - return kwargs - elif steps_trained_progress_bar is not None: - steps_trained_progress_bar.close() - steps_trained_progress_bar = None - - if step_control % args.gradient_accumulation_steps == 0: - self.control = self.callback_handler.on_step_begin(args, self.state, self.control) - self.timers and self.timers("forward-backward").start() - - dp_enabled = self.args.data_parallel_degree > 1 if self.args.use_hybrid_parallel else args.local_rank != -1 - forbidden_no_sync = False - # stage2 and stage3 should not no_sync, because the is no DDP wrapper and no_sync API - # hybrid_parallel (tp or pp or sharding stage 1) should not no_sync - if self.args.use_hybrid_parallel: - forbidden_no_sync = True - - availiable_no_sync = dp_enabled and not forbidden_no_sync - - is_no_sync = ( - ((step_control + 1) % args.gradient_accumulation_steps != 0) - and availiable_no_sync - and args._no_sync_in_gradient_accumulation - ) or (args.recompute and availiable_no_sync) - # sharding - # stage1. the same as ddp - # stage2. manualy collect gradient on dp group - - dp_master_grad = self.args.world_size > 1 and self.args.amp_master_grad and not self.args.use_hybrid_parallel - if dp_master_grad: - is_no_sync = True - - if is_no_sync: - # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. - with model.no_sync(): - tr_loss_step = self.training_step(model, inputs) - else: - tr_loss_step = self.training_step(model, inputs) - - tr_loss += tr_loss_step - - if (step_control + 1) % args.gradient_accumulation_steps == 0 or ( - # last step in epoch but step is always smaller than gradient_accumulation_steps - steps_in_epoch <= args.gradient_accumulation_steps - and (step + 1) == steps_in_epoch - ): - if self.args.pipeline_parallel_degree <= 1 and self._enable_delay_scale_loss(): - tr_loss /= self.args.gradient_accumulation_steps - - self.timers and self.timers("forward-backward").stop() - # Maunally collect gradients - # Case 1: Use recompute and dp - # Case 2: Hack dp with master_grad - # Case 3: Pipeline or sharding overlap - # local_rank != -1 don't means dp in networks. - self.timers and self.timers("all-reduce").start() - - # Case 1: Use recompute and dp / sharding stage1, - # manualy collect gradient for dp. - if args.recompute and availiable_no_sync: - fused_allreduce_gradients(list(model.parameters()), None) - - # Case 2: hack dp with master_grad - if dp_master_grad and not (args.recompute and availiable_no_sync): - fused_allreduce_gradients(list(model.parameters()), None) - - # Pipeline parallel mode, handle gradient reduce here to overlap - pipeline_parallel_config = ( - set(args.pipeline_parallel_config.split(" ")) if args.pipeline_parallel_degree > 1 else set() - ) - enable_dp_comm_overlap = "enable_dp_comm_overlap" in pipeline_parallel_config - enable_release_grads = "enable_release_grads" in pipeline_parallel_config - - # Case 3: Pipeline parallel mode, overlap with dp - if isinstance(self.optimizer, HybridParallelOptimizer) and not self.do_grad_scaling: - parameters_list = _obtain_optimizer_parameters_list(self.optimizer._inner_opt) - - if not enable_dp_comm_overlap: - if self.optimizer._sharding_enable: - assert reshard_util.is_sharding_opt(self.optimizer) - self.optimizer._inner_opt.reduce_gradients(list(parameters_list), self.optimizer._hcg) - - if self.optimizer._dp_enable or getattr(self.optimizer, "_sep_enable", False): - fused_allreduce_gradients(list(parameters_list), self.optimizer._hcg) - - self.timers and self.timers("all-reduce").stop() - self.timers and self.timers("optimizer-step").start() - - if self.args.gradient_accumulation_steps > 1 and self._enable_delay_scale_loss(): - for p in model._layers.parameters(): - with paddle.no_grad(): - if hasattr(p, "main_grad") and p.main_grad is not None: - assert p.grad is None - p.main_grad.scale_(1.0 / self.args.gradient_accumulation_steps) - elif p.grad is not None: - p.grad.scale_(1.0 / self.args.gradient_accumulation_steps) - - # Optimizer step - self.callback_handler.on_optimizer_begin( - args, self.state, self.control, scaler=self.scaler if self.do_grad_scaling else None - ) - optimizer_was_run = True - if self.do_grad_scaling: - scale_before = paddle.assign(self.scaler._scale) - self.scaler.step(self.optimizer) - self.scaler.update() - scale_after = self.scaler._scale - optimizer_was_run = not self.scaler._cache_founf_inf - if not optimizer_was_run: - scale_before_value = scale_before.cpu().numpy() - scale_after_value = scale_after.cpu().numpy() - logger.warning( - f"optimizer not run, scale_before: {scale_before_value[0]}, scale_after: {scale_after_value[0]}" - ) - elif isinstance(self.optimizer, HybridParallelOptimizer): - self.optimizer._step(parameters_list) - else: - self.optimizer.step() - - self.timers and self.timers("optimizer-step").stop() - - if optimizer_was_run: - self.lr_scheduler.step() - - if enable_release_grads and args.pipeline_parallel_degree > 1: - self.optimizer.clear_grad(set_to_zero=False) - for _, buffers in model._chunk_2_comm_buffers.items(): - for buffer in buffers: - buffer._clear_grad_storage() - else: - self.optimizer.clear_grad() + # used to create criterion for trainer + loss_cls: type + # Moreover, a model/StepTrainer instance may use a mixed loss which uses a + # different loss for different step and inputs, while we often want to get + # the separated loss metric. We use a callable discriminator using inputs + # (dict) as arguments and returning corresponding loss name to identify + # current loss. NOTE: please make the loss name ends with "_loss". `tr_loss` + # is the default loss name used in trainer.train. + loss_identifier: callable + # refer to mark_step_loss. NOTE: This is transparent to users + loss_step_indice: Dict + # When using multiple instances of StepTrainer collaborate to do one training + # step, each should use its own vars such as loss/model/step_control which are + # local vars in Trainer.train, we define these vars by `train_step_vars`. They + # are vars needed by full_training_step for training control, as following: + # tr_loss, model, epoch, step, step_control. NOTE: This is transparent to users. + # some vars such as `epoch` are meaningless, they are needed just because + # full_training_step copies code from Trainer.train which is designed for + # complete training process. + # TODO(guosheng): use namedtuple or dataclass to make it more readable. + train_step_vars: Dict - self.callback_handler.on_optimizer_end( - args, self.state, self.control, scaler=self.scaler if self.do_grad_scaling else None - ) - - self.state.global_step += 1 - self.state.epoch = epoch + (step + 1) / steps_in_epoch - self.control = self.callback_handler.on_step_end(args, self.state, self.control) - self._maybe_log_save_evaluate(tr_loss, model, epoch, ignore_keys_for_eval, inputs=inputs) - self._print_timer() - step_control = 0 - else: - self.control = self.callback_handler.on_substep_end(args, self.state, self.control) - step_control += 1 - - if self.control.should_epoch_stop or self.control.should_training_stop: - # break - final_local_vars = locals() - for k in kwargs.keys(): - if k in final_local_vars: - kwargs[k] = final_local_vars[k] - return kwargs - self.timers and self.timers("read-data").start() - - final_local_vars = locals() - for k in kwargs.keys(): - if k in final_local_vars: - kwargs[k] = final_local_vars[k] - return kwargs - - -Trainer.init_train_model_opt = init_train_model_opt -Trainer.init_train_log = init_train_log -Trainer.init_train_state = init_train_state -Trainer.full_training_step = full_training_step - - -class PolicyTrainer(Trainer): def __init__( self, model: Union[PretrainedModel, nn.Layer] = None, @@ -500,8 +102,6 @@ def __init__( optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, ): - # only used for non-PipelineParallel models - criterion = create_loss(RLHFPPOMixedLoss, model.config, args) super().__init__( model, criterion, @@ -515,77 +115,114 @@ def __init__( optimizers, preprocess_logits_for_metrics, ) + # criterion is only used for non-PipelineParallel models. criterion is + # included in model for PipelineParallel. + if getattr(self, "loss_cls", None) and self.criterion is None: + self.criterion = self.create_criterion() - def _prepare_inputs(self, inputs: Dict[str, Union[paddle.Tensor, Any]]) -> Dict[str, Union[paddle.Tensor, Any]]: - inputs = super()._prepare_input(inputs) - label_names = self.criterion.__class__.label_names - # some data fields are used both in model and loss - shared_fields = set(["input_ids", "attention_mask"]) - labels = [] - for name in label_names: - if name not in inputs: - label = self.criterion.__class__.label_default_values.get(name, None) - elif name in shared_fields: - label = inputs[name] - else: - label = inputs.pop(name) - labels.append(label) - # "labels" is the pre-defined label name in Trainer - inputs["labels"] = labels - # NOTE: TensorParallel model requires non-Tensor inputs to be lists and - # broadcast them, thus do not or optionally use these inputs. labels use - # in criterion not send to model can workaround this. - return inputs + def create_criterion(self): + """loss creator for trainer.""" + criterion = create_loss(self.loss_cls, self.model.config, self.args, merge_labels=True) + return criterion - def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): - labels = inputs.get("labels", None) - if labels is not None: # use ptx - loss_name = "ptx_loss" + def loss_identifier(self, inputs: Dict) -> str: + """ + Moreover, a model/StepTrainer instance may use a mixed loss which uses a + different loss for different step and inputs, while we often want to get + the separated loss metric. We use a callable discriminator using inputs + (dict) as arguments and returning corresponding loss name to identify + current loss. NOTE: please make the loss name ends with "_loss". `tr_loss` + is the default loss name used in trainer.train. + """ + return "tr_loss" + + def get_model(self, train=False): + """ + model visitor wrapps PipelineParalle and Inference model to do evaulation + and generation. + """ + if train: + return self.model_wrapped + model = getattr(self, "_eval_model", None) + if model is not None: + return model + if self.args.pipeline_parallel_degree > 1: + # Only accept wrapped model for pipeline_parallel mode + model = PipeEvalModel(self) + self._eval_model = model else: - loss_name = "actor_loss" - kwargs["model"] = kwargs.pop("policy_model") - kwargs["step_control"] = kwargs.pop("policy_step_control") - kwargs["tr_loss"] = kwargs.pop(loss_name) - kwargs = super().full_training_step(inputs, **kwargs) - kwargs["policy_model"] = kwargs.pop("model") - kwargs["policy_step_control"] = kwargs.pop("step_control") - kwargs[loss_name] = kwargs.pop("tr_loss") - return kwargs + model = InferEvalModel(self) + self._eval_model = model + return model + def get_train_step_vars(self, vars: Dict = None) -> Dict: + """ + return `train_step_vars`. If not exists, create it first. If `vars` is + not None, update `train_step_vars` with it. + """ + if not hasattr(self, "train_step_vars"): + # should be called after model is wrapped since the model field should + # use model_wrapped. + + assert self.model is not self.model_wrapped + self.train_step_vars = { + # meaningless vars can pass from outter, dummy value is enough + "epoch": 0, # meaningless for step training + "step": 0, # meaningless for step training + "steps_in_epoch": 100000, # meaningless for step training + "step_control": 0, # to control training process + "model": self.model_wrapped, + # "tr_loss": paddle.to_tensor(0.0), # lazy create + } + if vars: + self.train_step_vars.update(vars) + return self.train_step_vars -class ValueTrainer(Trainer): - def __init__( - self, - model: Union[PretrainedModel, nn.Layer] = None, - criterion: nn.Layer = None, - args: TrainingArguments = None, - data_collator: Optional[DataCollator] = None, - train_dataset: Optional[Dataset] = None, - eval_dataset: Union[Dataset, Dict[str, Dataset]] = None, - tokenizer: Optional[PretrainedTokenizer] = None, - compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, - callbacks: Optional[List[TrainerCallback]] = None, - optimizers: Tuple[paddle.optimizer.Optimizer, paddle.optimizer.lr.LRScheduler] = (None, None), - preprocess_logits_for_metrics: Callable[[paddle.Tensor, paddle.Tensor], paddle.Tensor] = None, - ): - # only used for non-PipelineParallel models - criterion = create_loss(RLHFValueLoss, model.config, args) - super().__init__( - model, - criterion, - args, - data_collator, - train_dataset, - eval_dataset, - tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, + def full_training_step(self, **inputs) -> paddle.Tensor: + """ + Accept any valid key word arguments of model and loss as inputs, they + would be sent to model and then loss. Mostly it is similar to output from + data collator. + Return loss var. However when using PipelienParallel, the loss returned + is 0 when not reach accumulated step and the loss returned at accumulated + step is a mixed loss. We can use `get_step_loss` to get the actual loss. + """ + # if model has multi losses which are combined into one mixed criterion, + # loss statistic var may change for different training steps according + # to inputs. + train_step_vars = self.get_train_step_vars() + loss_name = self.loss_identifier(inputs) + loss_var = train_step_vars.get(loss_name, None) + if loss_var is None: + loss_var = paddle.to_tensor(0.0) + train_step_vars[loss_name] = loss_var + # trainer.train use `tr_loss` as loss var + train_step_vars["tr_loss"] = loss_var + + new_train_step_vars = super().full_training_step(inputs, **train_step_vars) + + # minimally update + train_step_vars = self.get_train_step_vars( + {"step_control": new_train_step_vars["step_control"], loss_name: new_train_step_vars["tr_loss"]} ) + if loss_name != "tr_loss": + train_step_vars.pop("tr_loss") + + self.mark_step_loss(loss_name) + + return train_step_vars[loss_name] def _prepare_inputs(self, inputs: Dict[str, Union[paddle.Tensor, Any]]) -> Dict[str, Union[paddle.Tensor, Any]]: + """ + trainer.criterion only support criterion(prediction, labels), so we need + to reorganize the inputs to extract label data into one argument. This is + only used in non-PipelineParallel model training since loss is included + in PipelineLayer. + """ inputs = super()._prepare_input(inputs) + if self.criterion is None or getattr(self.criterion, "label_names", None) is None: + return inputs + # criterion created by create_loss has `label_names` and `label_default_values` label_names = self.criterion.__class__.label_names # some data fields are used both in model and loss shared_fields = set(["input_ids", "attention_mask"]) @@ -605,577 +242,104 @@ def _prepare_inputs(self, inputs: Dict[str, Union[paddle.Tensor, Any]]) -> Dict[ # in criterion not send to model can workaround this. return inputs - def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): - # TODO(guosheng): Make these training control vars mapping as class attr, - # then PPOTrainer can extract and reuse them to avoid hard code. - kwargs["model"] = kwargs.pop("value_model") - kwargs["step_control"] = kwargs.pop("value_step_control") - kwargs["tr_loss"] = kwargs.pop("reward_critic_loss") - kwargs = super().full_training_step(inputs, **kwargs) - kwargs["value_model"] = kwargs.pop("model") - kwargs["value_step_control"] = kwargs.pop("step_control") - kwargs["reward_critic_loss"] = kwargs.pop("tr_loss") - return kwargs - - -@contextmanager -def guard_set_args(args, arg_name_values): - for k, v in arg_name_values.items(): - old_value = getattr(args, k, None) - setattr(args, k, v) - arg_name_values[k] = old_value - yield - for k, v in arg_name_values.items(): - old_value = getattr(args, k) - setattr(args, k, v) - arg_name_values[k] = old_value - - -class MuteDefaultFlowCallback(TrainerCallback): - def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): - control.should_save = False - control.should_evaluate = False - control.should_log = False - return control - - -def is_same_tokenizer( - tokenizer: PretrainedTokenizer, - other_tokenizer: PretrainedTokenizer, -) -> bool: - """Check if two tokenizers are the same.""" - return tokenizer is other_tokenizer or ( - tokenizer.__class__ == other_tokenizer.__class__ and tokenizer.get_vocab() == other_tokenizer.get_vocab() - ) - - -class PipeEvalModel(GenerationMixin): - def __init__(self, trainer: Trainer): - self.model: fleet.model.PipelineParallel = trainer.model_wrapped - self.config: PretrainedConfig = trainer.model.config - self._is_gen = False - # self.gen_fn = None - # self.fwd_fn = None - # use non-pipe model generetion related methods - # self.prepare_inputs_for_generation = types.MethodType( - # self.model._layers._non_pipe_model_class.prepare_inputs_for_generation, self - # ) - self.update_model_kwargs_for_generation = ( - self.model._layers._non_pipe_model_class.update_model_kwargs_for_generation - ) - - @property - def pp_group(self): - return self.model.pp_group - - def eval(self): - self.model.eval() - - def train(self): - self.model.train() - - def _broadcast_outputs(self, outputs): - # outputs is PipelineParallel.eval_batch which is a list of batches. - out = [] - outputs = (outputs,) if isinstance(outputs, paddle.Tensor) else outputs - for tensors in outputs: - if not self.model.is_pipeline_last_stage(): - tensor = tensors if isinstance(tensors, paddle.Tensor) else tensors[0] - head_out_meta = ( - (self.model._layers.head_out_meta,) - if isinstance(self.model._layers.head_out_meta, paddle.static.InputSpec) - else self.model._layers.head_out_meta - ) - tensors = tuple( - paddle.empty( - shape=[ - tensor.shape[i] if (meta.shape[i] is None or meta.shape[i] < 0) else meta.shape[i] - for i in range(len(meta.shape)) - ], - dtype=tensor.dtype if meta.dtype is None else meta.dtype, - ) - for meta in head_out_meta - ) - else: - # Currently use tuple instead of ModelOutput and require the - # caller use the return result as tuple. - tensors = ( - (tensors,) - if isinstance(tensors, paddle.Tensor) - else tensors.to_tuple() - if isinstance(tensors, ModelOutput) - else tensors - ) + def mark_step_loss(self, loss_name): + """ + When using a mixed loss we often want to get the separated loss metrics, + thus we mark loss type of each training step to separate them. This is + not necessary since the loss would be returnd after each training step. + However when using PipelienParallel, the loss returned is 0 when not reach + accumulated step and the loss returned at accumulated step is a mixed loss. + To separate loss metrics in PipelienParallel: + 1. We hack PipelineParallel._forward_step to record actual loss for each + step in a list. + 2. We mark the loss type only once for each step using `loss_step_indice` + (dict), then wen can check out the corresponding loss metrics from the + loss list. + We assume a static order of multi-losses and mark the loss indice only once. + """ + self.loss_step_indice = getattr(self, "loss_step_indice", {}) + if loss_name not in self.loss_step_indice: + self.loss_step_indice[loss_name] = len(self.loss_step_indice) - # map_structure( - # lambda tensor: paddle.distributed.broadcast( - # tensor, - # src=self.model.pp_group.ranks[-1], - # group=self.model.pp_group), tensors) - for tensor in tensors: - paddle.distributed.broadcast(tensor, src=self.model.pp_group.ranks[-1], group=self.model.pp_group) - out.append(tensors[0] if len(tensors) == 1 else tensors) - return out[0] if len(out) == 1 else out - - def __call__(self, *args, **kwargs): - model = self.model - assert self.model.training is False - if self._is_gen: - # inputs by `prepare_inputs_for_generation` is a dict with following keys: - # "input_ids", "position_ids", "past_key_values", "use_cache", "attention_mask" - # NOTE: 1. cache/past_key_values should be passed across decoding steps - # by using as model attr rather than input args to reduce comm overhead. - # Also, pipe model defined for training not support this cache input. - # 2. ignore use_cache since _check_data_vaild requires tensor if not None. - # 3. attention_mask can reuse _prepare_decoder_attention_mask in LlamaEmbeddingPipe. - # 4. position_ids pass through _prepare_pipeline_inputs_func and PipeLayer. - inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) - # currently, set accumulate_steps to 1 to avoid multi-batch eval/gen - with guard_set_args(model, {"_compute_loss": False, "accumulate_steps": 1}): - outputs = model.eval_batch([inputs, labels], compute_loss=False) - # TODO(guosheng): Broadcasted logits are used to get next_scores, remove - # it to reduce comm overhead. Also note that we still need broadcast - # next_tokens though logits are broadcasted since pp ranks' seeds differs. - # Currently, just slice the last token to reduce comm overhead. - outputs = [ - micro_batch_output[:, -1, :].unsqueeze(1) - if isinstance(micro_batch_output, paddle.Tensor) - else micro_batch_output[0][:, -1, :].unsqueeze(1) - for micro_batch_output in outputs - ] - outputs = self._broadcast_outputs(outputs) - else: - # use _prepare_pipeline_inputs_func to convert pipeline inputs - inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) - # NOTE(guosheng): bug seems exist. pp.eval_batch(compute_loss=False) - # will set pp._compute_loss to False and would not set it back. Thus - # hack here to set it back. - with guard_set_args(model, {"_compute_loss": False, "accumulate_steps": 1}): - outputs = model.eval_batch([inputs, labels], compute_loss=False) - outputs = self._broadcast_outputs(outputs) - return outputs - - def generate(self, *args, **kwargs): - self._is_gen = True - # patch DecoderLayerPipe to use cache, DecoderLayerPipe is subclass of - # DecoderLayer, and would call super().forward - ori_decoder_layer_forward = self.model._layers._non_pipe_decoder_layer_class.forward - - def decoder_layer_forward(layer_self, *args, **kwargs): - kwargs.update({"use_cache": True, "past_key_value": getattr(layer_self, "_cache", None)}) - outputs = ori_decoder_layer_forward(layer_self, *args, **kwargs) - output = outputs[0] - layer_self._cache = outputs[1] - self._has_cache = True - return output - - with guard_set_args(self.model._layers._non_pipe_decoder_layer_class, {"forward": decoder_layer_forward}): - outputs = super().generate(*args, **kwargs) - self._is_gen = False - # clear cache of decoder layers, sublayers is incursive thus suitable - # to both 1F1B and interleave - for layer in self.model._layers.sublayers(): - if isinstance(layer, self.model._layers._non_pipe_decoder_layer_class): - layer._cache = None - self._has_cache = False - return outputs - - def prepare_inputs_for_generation(self, *args, **kwargs): - arg_bind = inspect.signature(self.model._layers._non_pipe_model_class.prepare_inputs_for_generation).bind( - *((self,) + args), **kwargs - ) - arg_bind.apply_defaults() - arg_dict = arg_bind.arguments - last_arg_name, last_arg_value = arg_dict.popitem() - if arg_bind.signature.parameters[last_arg_name].kind == inspect.Parameter.VAR_KEYWORD: - arg_dict.update(last_arg_value) + def get_step_loss(self, loss_prefix: str = "") -> Dict: + """ + Return a dict mapping loss name to value of current training step. This + is mainly to get loss for metric logging, and it would not affect the + training. Overwrite it when we want to change the logging value. + """ + model = self.get_model(train=True) + if not hasattr(self, "loss_dict"): + self.loss_dict = {} + for var_name, value in self.get_train_step_vars().items(): + if var_name.endswith("_loss"): + self.loss_dict[var_name] = value + loss_dict = {} # return a new dict because of new metric names + if isinstance(model, fleet.model.PipelineParallel) and len(self.loss_dict) > 1: + # NOTE: PipelineParallel only returns a accumulated loss after + # accumulated steps, which is a mixed loss of ppo-loss and + # ptx-loss. We hack PipelineParallel._forward_step to record + # loss metrics and postprocess the recorded losses here. + # Maybe better to make the last_stage worker log to reduce + # comm and for simplicity. + with paddle.no_grad(): + if model.is_pipeline_last_stage(): + # loss is 0D tensor, use stack rather than concat + mix_loss = paddle.stack(model._step_losses) + model._step_losses = None + else: + # The tessor shape is not policy_model.accumulate_steps + # (args.accu_steps) but policy_trainer.args.accu_steps, + # since policy_model is created with global pp_config + # using global args.accu_steps which is only half of + # policy_trainer.args.accu_steps, and indeed trainer hack + # model.accumulate_steps in training_pipeline_step to use + # trainer.args.accu_steps. The dtype is fp32(to be check), + # thus no need to broadcast. + mix_loss = paddle.empty(shape=[self.args.gradient_accumulation_steps], dtype=paddle.float32) + paddle.distributed.broadcast(mix_loss, src=model.pp_group.ranks[-1], group=model.pp_group) + for loss_name in self.loss_dict: + # We assume a static order of multi-losses and mark the loss + # indice only once. + value = mix_loss[self.loss_step_indice[loss_name] :: len(self.loss_dict)].mean() + loss_name = loss_prefix + loss_name if loss_prefix else loss_name + loss_dict[loss_name] = value + return loss_dict + + for loss_name in self.loss_dict: + value = self.get_train_step_vars()[loss_name] + loss_name = loss_prefix + loss_name if loss_prefix else loss_name + loss_dict[loss_name] = value + return loss_dict + + +class PolicyTrainer(StepTrainer): + loss_cls = RLHFPPOMixedLoss + + def loss_identifier(self, inputs: Dict) -> str: + labels = inputs.get("labels", None) + if labels is not None: # use ptx + loss_name = "ptx_loss" else: - arg_dict[last_arg_name] = last_arg_value - arg_dict.pop("self") - past_key_values = arg_dict.get("past_key_values", None) - # prepare_inputs_for_generation use past_key_values to discrimate prefill - # or decode and slice inputs accordingly. - if getattr(self, "_has_cache", False): - arg_dict.update({"past_key_values": True}) - model_inputs = self.model._layers._non_pipe_model_class.prepare_inputs_for_generation(self, **arg_dict) - model_inputs.update({"past_key_values": past_key_values}) - return model_inputs - - -import types - -from predictor import DygraphInferencePredictor, InferencePredictorMixin - - -class Timer: - def __init__(self, name): - self.name = name - self.tic_time = None - self.run_times = 0 - - def tic(self): - self.tic_time = time.time() - - def toc(self): - self.run_times += time.time() - self.tic_time - - -class Predictor(DygraphInferencePredictor): - def __init__(self, config, model: PretrainedModel = None, tokenizer: PretrainedTokenizer = None): - self.model_config = model.config - self.config = config - self.tokenizer = tokenizer - self.model = model - self.is_available = False - self._weights_mapping = None - # use multi stream to load weights - # self._stream_num = 4 - # self._streams = [] - # for i in range(self._stream_num): - # stream = paddle.device.cuda.Stream() - # self._streams.append(stream) - # self._param_counter = 0 - # print("=" * 20, "cache shape", self.cache_kvs_shape, list(self.__dict__.keys())) - # exit(0) - - @staticmethod - def create_predictor(trainer): - from predictor import ( - PdArgumentParser, - PredictorArgument, - get_model_max_position_embeddings, - ) + loss_name = "actor_loss" + return loss_name - # create infer model - # NOTE:infer model use static name param_attr to create and cannot be - # created multiple times. - def create_infer_model(model, dtype, set_state=False): - # patches for inference model to make FuseMT adapt - import paddlenlp_ops - - # should patch before infer model import - paddlenlp_ops.save_with_output = lambda *args, **kwargs: None - # TODO(guosheng): update the custom op code directly. - ori_set_ends = paddlenlp_ops.set_stop_value_multi_ends - - def _set_ends(topk_ids, stop_flags, end_ids, mode): - # infer model uses eos_token_id to pad and discriminate ending, - # patch to use pad_token_id to pad to unify with non-infer model. - topk_ids_out, stop_flags_out = ori_set_ends(topk_ids, stop_flags, end_ids, mode) - if trainer.tokenizer.pad_token_id != trainer.tokenizer.eos_token_id: - topk_ids_out = paddle.where(stop_flags, trainer.tokenizer.pad_token_id, topk_ids_out) - return topk_ids_out, stop_flags_out - - paddlenlp_ops.set_stop_value_multi_ends = _set_ends - from models.infer_model_utils import _update_model_kwargs - - import paddlenlp.experimental.transformers as infer_transformers - - config = copy.deepcopy(model.config) - hcg = fleet.get_hybrid_communicate_group() # may differ with training - config.tensor_parallel_degree = hcg.get_model_parallel_world_size() - config.tensor_parallel_rank = hcg.get_model_parallel_rank() - config.weight_only_quant_bits = -1 - config.quant_type = None - config.use_cachekv_int8 = False - config.single_card_ptq = True - infer_model_cls = getattr(infer_transformers, model.__class__.__name__ + "InferenceModel") - with dtype_guard(dtype): - infer_model = infer_model_cls(config) - # apply patches - infer_model.update_model_kwargs_for_generation = types.MethodType(_update_model_kwargs, infer_model) - if set_state: - state_dict = {} - for k, v in model.state_dict().items(): - # state_dict[k] = np.from_dlpack(paddle.utils.dlpack.to_dlpack(v)) - state_dict[k] = v.numpy() - infer_model.set_state_dict(state_dict) - return infer_model - - # to avoid oom, clear param of infer_model imediately - ori_creat_param = paddle.nn.Layer.create_parameter - - def _create_param(self, *args, **kwargs): - param = ori_creat_param(self, *args, **kwargs) - param._clear_data() - return param - - paddle.nn.Layer.create_parameter = _create_param - infer_model = create_infer_model(trainer.model, dtype=trainer.amp_dtype) - paddle.nn.Layer.create_parameter = ori_creat_param - - # create predictor - parser = PdArgumentParser((PredictorArgument,)) - predictor_args = parser.parse_dict( - { - "src_length": get_model_max_position_embeddings( # can be changed dynamically by predictor.input_length - trainer.model.config - ), - "max_length": trainer.args.max_length, - "dtype": trainer.amp_dtype, - "batch_size": trainer.args.per_device_train_batch_size, - # infer model do not support top_k, and differ with non-infer model - # generation which gets default top_K=50 using generation_config.top_k - "top_p": 0.8, - # trainer.args.top_p, - "temperature": trainer.args.temperature, - "repetition_penalty": trainer.args.repetition_penalty, - } - )[0] - policy_predictor = Predictor(predictor_args, model=infer_model, tokenizer=trainer.tokenizer) - return policy_predictor - - def _create_caches(self): - """inputs can be reused among multiple predictions, such as cache""" - if hasattr(self, "cache_kvs_shape"): # has created cache - input_length = getattr(self, "input_length", 0) - if input_length <= self.config.src_length: # reuse cahce - return - else: # create longer cache - self._clear_caches() - self.config.src_length = getattr(self, "input_length", self.config.src_length) - if not hasattr(self, "_buffer_attrs"): - pre_attrs = set(self.__dict__.keys()) - self.cache_kvs_shape = self.model.get_cache_kvs_shape( - self.model_config, self.config.batch_size, self.config.total_max_length - ) - # TODO: remove GenerationConfig.from_pretrained - InferencePredictorMixin.__init__(self, self.config, self.tokenizer) - if not hasattr(self, "_buffer_attrs"): - self._buffer_attrs = set(self.__dict__.keys()) - pre_attrs - - def _clear_caches(self): - # del or offload - for attr in self._buffer_attrs: - delattr(self, attr) - - def disable(self, model, onload_model=True): - # clear caches - self._clear_caches() - # clear params - for _, param in self.model.state_dict().items(): - param._clear_data() - if onload_model: - model.to(paddle.device.get_device()) - self.is_available = False - - def enable(self, model, offload_model=True): - if self.is_available: - return - # set params - self.set_state_dict(model, offload_model) - self.is_available = True + def get_step_loss(self, loss_prefix: str = "") -> Dict: + loss_dict = super().get_step_loss(loss_prefix=loss_prefix) + # use_ptx would double the gradient_accumulation_steps which causes + # actor_loss and ptx_loss reduced by half. Moreover, ptx_loss should + # be divided by ptx_coeff for logging. + # TODO(guosheng): maybe should consider self._enable_delay_scale_loss() + # if "ptx_loss" in loss_dict: + # loss_dict[loss_prefix + "ptx_loss"] = loss_dict[ + # "ptx_loss"] * 2 / self.criterion.ptx_coeff + # loss_dict[loss_prefix + "actor_loss"] = loss_dict["actor_loss"] * 2 + return loss_dict - @paddle.no_grad() - def set_state_dict(self, model, offload_model=True): - offload_place = paddle.CUDAPinnedPlace() - state_dict = {} - for k, v in model.state_dict().items(): - # maybe use dlpack or some other zero-copy methods - state_dict[k] = v # .numpy() - # state_dict[k] = v.to(offload_place) - # self.model.set_state_dict(state_dict) - # return - - if getattr(self, "_weights_mapping", None) is None: - self._weights_mapping = self.model.get_weights_mapping() - convert_timer = Timer("cpu-convert") - set_timer = Timer("cpu-convert") - set_timer.tic() - # import nvtx - - # set_rng = nvtx.start_range(message=f"set_state_dict", color="yellow") - - for k, v in self._weights_mapping.items(): - # with paddle.device.cuda.stream_guard( - # self._streams[self._param_counter % self._stream_num]): - with contextlib.nullcontext(): - # set_param_rng = nvtx.start_range(message=f"set_param", - # color="green") - param, (convert_fun, args) = k, v - args = [state_dict[name] for name in args] - # maybe use thread pool to speedup cpu convert - # value = paddle.to_tensor(convert_fun(*args)) - convert_timer.tic() - # with device_guard("cpu"): - # op with pinmemory input tensors get gpu output tensor - value = convert_fun(*args) - if offload_model: - for arg in args: - # shared params no need to offload - if value is not arg: - arg.to(offload_place, blocking=False) - convert_timer.toc() - if not isinstance(value, paddle.Tensor): - param.set_value(value) - elif isinstance(value.place, paddle.CUDAPlace): - # param.get_tensor()._share_data_with(value) - value._share_buffer_to(param) - else: - param.copy_(value, False) - # nvtx.end_range(set_param_rng) - # self._param_counter += 1 - paddle.device.cuda.synchronize() - set_timer.toc() - # nvtx.end_range(set_rng) - print("=" * 20, "cpu-convert time", convert_timer.run_times, set_timer.run_times) - # exit(0) - # print("=" * 20, "lm_head.weight", self.model.lm_head.weight) - # print("=" * 20, "llama.embed_tokens.weight", - # self.model.llama.embed_tokens.weight) - # print("=" * 20, "llama.transformer_block.qkv_weights", - # self.model.llama.transformer_block.qkv_weights[0]) - # print("=" * 20, "llama.transformer_block.ffn1_weights", - # self.model.llama.transformer_block.ffn1_weights[0]) - # print("=" * 20, "llama.transformer_block.linear_weights", - # self.model.llama.transformer_block.linear_weights[0]) - - def _preprocess(self, source): - # make cache when infer happens to get actual shape to save memory - self._create_caches() - return super()._preprocess(source) - @paddle.no_grad() - def _infer(self, inputs): - for key in inputs.keys(): - if paddle.is_tensor(inputs[key]): - continue - if isinstance(inputs[key], list): - if paddle.is_tensor(inputs[key]): - continue - inputs[key] = [paddle.to_tensor(item) for item in inputs[key]] - else: - inputs[key] = paddle.to_tensor(inputs[key]) - - inputs["cache_kvs"] = self.cache_kvs - print("=" * 20, "infer input_ids", inputs["input_ids"]) - return self.model.generate(**inputs) - - def _postprocess(self, predictions): - return predictions - - -policy_predictor: Predictor = None - - -def check_memory_usage(msg=""): - import paddle - - max_memory_allocated_size = paddle.device.cuda.max_memory_allocated() / (1024 * 1024 * 1024) - max_memory_reserved_size = paddle.device.cuda.max_memory_reserved() / (1024 * 1024 * 1024) - memory_allocated_size = paddle.device.cuda.memory_allocated() / (1024 * 1024 * 1024) - memory_reserved_size = paddle.device.cuda.memory_reserved() / (1024 * 1024 * 1024) - mem = { - f"{msg}_max_memory_allocated_size": max_memory_allocated_size, - f"{msg}_max_memory_reserved_size": max_memory_reserved_size, - f"{msg}_memory_allocated_size": memory_allocated_size, - f"{msg}_memory_reserved_size": memory_reserved_size, - } - print(mem) - - -@contextmanager -def infer_guard(trainer, offload_model=True): - try: - try_import("paddlenlp_ops") - except: - yield - return - check_memory_usage("before infer generation") - global policy_predictor - # offload training params before infer model creation - model = trainer.model - import time - - # start_time = time.time() - # model.to(paddle.CUDAPinnedPlace()) - # print("=" * 20, "offload time", time.time() - start_time) - start_time = time.time() - if policy_predictor is None: - policy_predictor = Predictor.create_predictor(trainer) - if not policy_predictor.is_available: - policy_predictor.enable(model, offload_model=offload_model) - print("=" * 20, "create infer time", time.time() - start_time) - # TODO(guosheng): patch for dist.all_recude to use tp group, fix it later - import paddle.distributed as dist - - ori_all_reduce = dist.all_reduce - ori_broadcast = dist.broadcast - hcg = fleet.get_hybrid_communicate_group() - dist.all_reduce = lambda x: ori_all_reduce(x, group=hcg.get_model_parallel_group()) - dist.broadcast = lambda x, rank: ori_broadcast( - x, src=hcg.get_model_parallel_group_src_rank(), group=hcg.get_model_parallel_group() - ) - check_memory_usage("begin infer generation") - yield - dist.all_reduce = ori_all_reduce - dist.broadcast = ori_broadcast - print("=" * 20, "infer generation finished") - import sys - - print("=" * 20, "predictor refcount", sys.getrefcount(policy_predictor)) - # policy_predictor = None - # policy_predictor.model = None - policy_predictor.disable(model, onload_model=offload_model) - # start_time = time.time() - # model.to(paddle.device.get_device()) - # print("=" * 20, "onload time", time.time() - start_time) - check_memory_usage("end infer generation") - - -class InferEvalModel: - """For faster generation, not support PipelineParallel yet.""" - - def __init__(self, trainer: Trainer): - self.model: PretrainedModel = trainer.model - self.tokenizer: PretrainedTokenizer = trainer.tokenizer - - def eval(self): - self.model.eval() - - def train(self): - self.model.train() - - def __call__(self, *args, **kwargs): - # assert model is on GPU - assert policy_predictor is None or not policy_predictor.is_available - return self.model(*args, **kwargs) - - def generate(self, *args, **kwargs): - if policy_predictor is None or not policy_predictor.is_available: - return self.model.generate(*args, **kwargs) - - arg_dict = inspect.signature(self.model.generate).bind(*args, **kwargs).arguments - input_ids = arg_dict["input_ids"] - generation_config = arg_dict["generation_config"] - # convert text and tokenize again to convert left padding to right padding - # remove this if inputs is right padding - print("=" * 20, "raw input_ids", input_ids) - # TODO(guosheng): allow to use right padding to infer directly - prompts = self.tokenizer.batch_decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) - print("=" * 20, "prompts", prompts) - # decoded prompts has been applied with chat_template - # NOTE(guosheng): Whether to add special token should be checked, None - # chat_template would not add special token in predictor, since it assumes - # chat_template includes special tokens. While Beaver dataset tokenization - # does not use chat_template, it uses hard coded template which excludes - # special tokens. - with guard_set_args( - policy_predictor.tokenizer, - { - # predictor use right padding for infer model by default - # "padding_side": "right", - # "chat_template": None - }, - ): - policy_predictor.input_length = input_ids.shape[-1] - outputs = policy_predictor.predict(prompts) - outputs = (outputs[0][:, input_ids.shape[-1] :],) if generation_config.trunc_input else (outputs[0],) - if self.tokenizer.padding_side == "left": - # convert back to left padding inputs - outputs[0][:, : input_ids.shape[-1]] = input_ids - print("=" * 20, "infer output_ids", outputs[0]) - return outputs +class ValueTrainer(StepTrainer): + loss_cls = RLHFValueLoss + # define loss name + loss_identifier = lambda self, inputs: "reward_critic_loss" class PPOTrainer(Trainer): @@ -1267,43 +431,43 @@ def __init__( optimizers, preprocess_logits_for_metrics, ) + # disable inner trainers' callback/state/control + self.policy_trainer.add_callback(MuteDefaultFlowCallback) + self.value_trainer.add_callback(MuteDefaultFlowCallback) # use trainer for reference_model/reward_model to enable sharding stage-3 - # maybe we should allow models to use different dist strategies later - if True: # ShardingOption.FULL_SHARD in args.sharding: - self.reference_trainer = Trainer( - reference_model, - criterion, - args, - data_collator, - train_dataset, - eval_dataset, - reference_tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - self.reward_trainer = Trainer( - reward_model, - criterion, - args, - data_collator, - train_dataset, - eval_dataset, - reward_tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - # TODO(guosheng): sharding stage3 should create master weight optionally - # instead of creation and clear. - self.reference_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps - self.reward_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps - else: - self._reference_model = reference_model - self._reward_model = reward_model + # and PipelineParallel. maybe we should allow models to use different dist + # strategies later + self.reference_trainer = StepTrainer( + reference_model, + criterion, + args, + data_collator, + train_dataset, + eval_dataset, + reference_tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + self.reward_trainer = StepTrainer( + reward_model, + criterion, + args, + data_collator, + train_dataset, + eval_dataset, + reward_tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + # TODO(guosheng): sharding stage3 should create master weight optionally + # instead of creation and clear. + self.reference_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps + self.reward_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps self.reference_model.eval() self.reward_model.eval() @@ -1320,6 +484,7 @@ def __init__( # top_k=self.args.top_k, repetition_penalty=self.args.repetition_penalty, do_sample=True, + # allow generation output to contain input trunc_input=False, bos_token_id=self.tokenizer.bos_token_id, eos_token_id=self.tokenizer.eos_token_id, @@ -1327,9 +492,7 @@ def __init__( ) # Those value can be changed self.kl_coeff = self.args.kl_coeff - self.policy_trainer.clip_range_ratio = self.clip_range_ratio = self.args.clip_range_ratio self.clip_range_score = self.args.clip_range_score - self.value_trainer.clip_range_value = self.clip_range_value = self.args.clip_range_value self.policy_trainer.ptx_coeff = self.ptx_coeff = self.args.ptx_coeff self.gamma = 1.0 self.gae_lambda = 0.95 @@ -1340,77 +503,26 @@ def __init__( "DummyPPOModel", (object,), {"eval": lambda _: self.set_eval(), "train": lambda _: self.set_train()} ) self.model = self.model_wrapped = self.DummyPPOModel() - # self.optimizer = self.policy_trainer.optimizer - # self.scaler = self.reference_trainer.scaler = self.reward_trainer.scaler = None @property def reference_model(self): - model = getattr(self, "_reference_model", None) - if model is not None: - return model - # use model with Trainer - if self.reference_trainer.args.pipeline_parallel_degree > 1: - # Only accept wrapped model for pipeline_parallel mode - # model = self.reference_trainer.model_wrapped - model = PipeEvalModel(self.reference_trainer) - self._reference_model = model - else: - model = self.reference_trainer.model - return model + return self.reference_trainer.get_model(train=False) @property def reward_model(self): - model = getattr(self, "_reward_model", None) - if model is not None: - return model - # use model with Trainer - if self.reward_trainer.args.pipeline_parallel_degree > 1: - # Only accept wrapped model for pipeline_parallel mode - # model = self.reward_trainer.model_wrapped - model = PipeEvalModel(self.reward_trainer) - self._reward_model = model - else: - model = self.reward_trainer.model - return model + return self.reward_trainer.get_model(train=False) @property def actor_model(self): - if self.training: - return self.policy_trainer.model_wrapped - model = getattr(self, "_actor_model", None) - if model is not None: - return model - if self.policy_trainer.args.pipeline_parallel_degree > 1: - # Only accept wrapped model for pipeline_parallel mode - # model = self.policy_trainer.model_wrapped - model = PipeEvalModel(self.policy_trainer) - self._actor_model = model - else: - # model = self.policy_trainer.model - model = InferEvalModel(self.policy_trainer) - self._actor_model = model - return model + return self.policy_trainer.get_model(train=self.training) @property def reward_critic_model(self): - if self.training: - return self.value_trainer.model_wrapped - model = getattr(self, "_reward_critic_model", None) - if model is not None: - return model - if self.value_trainer.args.pipeline_parallel_degree > 1: - # Only accept wrapped model for pipeline_parallel mode - # model = self.value_trainer.model_wrapped - model = PipeEvalModel(self.value_trainer) - self._reward_critic_model = model - else: - model = self.value_trainer.model - return model + return self.value_trainer.get_model(train=self.training) def set_train(self, mode: bool = True) -> None: """Set training mode for all models.""" if mode: - # self.is_in_train = True self.training = True self.actor_model.train() self.reward_critic_model.train() @@ -1531,23 +643,6 @@ def _save_checkpoint(self, model, metrics=None): with guard_set_args(self.value_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "value")}): self.value_trainer._save_checkpoint(model, metrics) - # def _load_from_checkpoint(self, resume_from_checkpoint=None): - # with guard_set_args(self.policy_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "policy")}): - # self.policy_trainer._load_from_checkpoint(resume_from_checkpoint) - # with guard_set_args(self.value_trainer.args, {"output_dir": os.path.join(self.args.output_dir, "value")}): - # self.value_trainer._load_from_checkpoint(resume_from_checkpoint) - - # def _load_optimizer_and_scheduler(self, checkpoint): - # # NOTE: `Trainer._load_optimizer_and_scheduler` would not seek the latest - # # state as in `_load_from_checkpoint``, and it just use `resume_from_checkpoint` - # # as value of `checkpoint` to load. - # self.policy_trainer._load_optimizer_and_scheduler( - # checkpoint if checkpoint is None else os.path.join(checkpoint, "policy") - # ) - # self.value_trainer._load_optimizer_and_scheduler( - # checkpoint if checkpoint is None else os.path.join(checkpoint, "value") - # ) - def init_train_model_opt( self: Trainer, max_steps: int, resume_from_checkpoint: bool = False, clear_master_weight: bool = False ) -> PretrainedModel: @@ -1569,39 +664,6 @@ def init_train_model_opt( ) return policy_model, value_model - def load_sing_gen_data(self, as_batches=True, use_counter=False, data_dir="pkl_data"): - if use_counter: - iter_counter = getattr(self, "iter_counter", 0) - self.iter_counter = iter_counter + 1 - else: - iter_counter = "" - import pickle - - from paddle.distributed import fleet - - hcg = fleet.get_hybrid_communicate_group() - data_rank = hcg.get_sharding_parallel_rank() - with open(os.path.join(data_dir, f"{iter_counter}rl_batch-{data_rank}.data"), "rb") as f: - data = pickle.load(f) - rl_batch = map_structure(lambda x: paddle.to_tensor(x), data) - rl_batches = [rl_batch] if as_batches else rl_batch - return rl_batches - - def save_single_gen_data(self, rl_batch, use_counter=False, data_dir="pkl_data"): - if use_counter: - iter_counter = getattr(self, "iter_counter", 0) - self.iter_counter = iter_counter + 1 - else: - iter_counter = "" - import pickle - - import paddle.distributed as dist - - with open(os.path.join(data_dir, f"{iter_counter}rl_batch-{dist.get_rank()}.data"), "wb") as f: - rl_batch = map_structure(lambda x: x.numpy(), rl_batch) - pickle.dump(rl_batch, f) - # exit(0) - def get_epoch_iterator(self): # TODO(guosheng): support iter dataset num_prompt_only_batches = len(self.prompt_only_dataloader) @@ -1616,8 +678,6 @@ def gen_epoch_data(): # generate batches self.set_eval() rl_batches = self.split_rl_micro_batches(prompt_only_batch) - # rl_batches = self.load_sing_gen_data(as_batches=True, - # use_counter=True) if self.use_ptx: ptx_batches = self.split_ptx_micro_batches(ptx_batch) else: @@ -1627,7 +687,6 @@ def gen_epoch_data(): self.set_train() for _ in range(self.args.update_iters): for rl_batch, ptx_batch in zip(rl_batches, ptx_batches): - # self.save_single_gen_data(rl_batch, use_counter=True) yield rl_batch, ptx_batch class EpochIterator: @@ -1670,6 +729,18 @@ def init_train_num(self: Trainer, train_dataloader: DataLoader): num_train_samples, ) + def is_step_end(self): + # reach accumulation_steps, value trainer has the same step_control and + # gradient_accumulation_steps as PPO trainer. + # if (step_control + 1) % args.gradient_accumulation_steps == 0 + return self.value_trainer.get_train_step_vars()["step_control"] == 0 + + def get_step_loss(self, loss_prefix: str = "") -> Dict: + rl_loss = self.policy_trainer.get_step_loss(loss_prefix) + value_loss = self.value_trainer.get_step_loss(loss_prefix) + rl_loss.update(value_loss) + return rl_loss + def train( self, resume_from_checkpoint: Optional[Union[str, bool]] = None, @@ -1712,16 +783,8 @@ def train( ) = self.init_train_num(train_dataloader) # ##### model and optimizer related setting ##### - # policy_trainer/value_trainer only init train with init_train_model_opt, - # maybe more training setting used in full_training_step should be set here, - # such as trainer.control and trainer.state - # policy_model = self.policy_trainer.init_train_model_opt(max_steps, resume_from_checkpoint) - # value_model = self.value_trainer.init_train_model_opt(max_steps, resume_from_checkpoint) policy_model, value_model = self.init_train_model_opt(max_steps, resume_from_checkpoint) paddle.device.cuda.empty_cache() - # disable inner trainers' callback/state/control - self.policy_trainer.add_callback(MuteDefaultFlowCallback) - self.value_trainer.add_callback(MuteDefaultFlowCallback) # ##### traing statistic logging ##### # Number of trainable parameters only account for policy_model @@ -1750,39 +813,10 @@ def train( self.control = self.callback_handler.on_train_begin(args, self.state, self.control) - actor_loss = paddle.to_tensor(0.0) - reward_critic_loss = paddle.to_tensor(0.0) - ptx_loss = paddle.to_tensor(0.0) - # used when logging and last step - self._total_actor_loss_scalar = 0.0 - self._total_reward_critic_loss_scalar = 0.0 - self._total_ptx_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step - # train_step_kwargs is used to provide arguments more than model inputs - # for full_training_step which is copied from Trainer.train and needs - # these arguments to control training process. - train_step_kwargs = { - "ignore_keys_for_eval": None, # no need - # TODO(guosheng): commented args mean to resume data, not support yet - # "resume_from_checkpoint": resume_from_checkpoint, - # "train_dataloader": train_dataloader, - # "epochs_trained": epochs_trained, - # "steps_trained_in_current_epoch": steps_trained_in_current_epoch, - # "steps_trained_progress_bar": steps_trained_progress_bar, - "steps_in_epoch": steps_in_epoch, # to control training process - # the following args are corresponding to tr_loss and model used in - # Trainer.train, and they would be used as tr_loss and model in - # PolicyTranier and ValueTrainer. - "actor_loss": actor_loss, - "reward_critic_loss": reward_critic_loss, - "ptx_loss": ptx_loss, - "policy_model": policy_model, - "value_model": value_model, - } - start_time = time.time() - self._globalstep_last_start_time = start_time # time.time() + self._globalstep_last_start_time = start_time # self.timers and self.timers("read-data").start() for epoch in range(epochs_trained, num_train_epochs): @@ -1791,63 +825,25 @@ def train( ): train_dataloader.batch_sampler.set_epoch(epoch) - step_control = 0 # used in loop control, reset to 0 after every step - train_step_kwargs.update({"policy_step_control": step_control, "value_step_control": step_control}) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) for step, inputs in enumerate(epoch_iterator): # self.timers and self.timers("read-data").stop() - os.environ["TRAINER_GLOBAL_STEP"] = str(self.state.global_step) - self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) - # epoch, step and steps_in_epoch only mostly used in train_step by - # `self.state.epoch = epoch + (step + 1) / steps_in_epoch` if not - # resume data - train_step_kwargs.update({"epoch": epoch, "step": step}) + # os.environ["TRAINER_GLOBAL_STEP"] = str(self.state.global_step) + # self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) rl_batch, ptx_batch = inputs # TODO(guosheng): make rl_step/ptx_step run with autocast_smart_context_manager - rl_info, train_step_kwargs = self.rl_step(rl_batch, **train_step_kwargs) + rl_info = self.rl_step(rl_batch) paddle.device.cuda.empty_cache() if self.use_ptx: - ptx_info, train_step_kwargs = self.ptx_step(ptx_batch, **train_step_kwargs) + ptx_info = self.ptx_step(ptx_batch) rl_info.update(ptx_info) paddle.device.cuda.empty_cache() - self.state.global_step = self.value_trainer.state.global_step - self.state.epoch = self.value_trainer.state.epoch - if train_step_kwargs["value_step_control"] == 0: - # NOTE: PipelineParallel only returns a accumulated loss after - # accumulated steps, which is a mixed loss of ppo-loss and - # ptx-loss. We hack PipelineParallel._forward_step to record - # loss metrics and postprocess the recorded losses here. - # Maybe better to make the last_stage worker log to reduce - # comm and for simplicity. - if isinstance(policy_model, fleet.model.PipelineParallel): - with paddle.no_grad(): - # TODO(guosheng): maybe move this to model_pp.py and - # using interface here is better - # interleave betweeen ppo-loss and ptx-loss - if policy_model.is_pipeline_last_stage(): - # loss is 0D tensor, use stack rather than concat - mix_loss = paddle.stack(policy_model._step_losses) - policy_model._step_losses = None - else: - # The tessor shape is not policy_model.accumulate_steps - # (args.accu_steps) but policy_trainer.args.accu_steps, - # since policy_model is created with global pp_config - # using global args.accu_steps which is only half of - # policy_trainer.args.accu_steps, and indeed trainer hack - # model.accumulate_steps in training_pipeline_step to use - # trainer.args.accu_steps. The dtype is fp32(to be check), - # thus no need to broadcast. - mix_loss = paddle.empty( - shape=[self.policy_trainer.args.gradient_accumulation_steps], dtype=paddle.float32 - ) - paddle.distributed.broadcast( - mix_loss, src=policy_model.pp_group.ranks[-1], group=policy_model.pp_group - ) - real_actor_loss = mix_loss[0::2].mean() - real_ptx_loss = mix_loss[1::2].mean() - rl_info.update({"train/actor_loss": real_actor_loss, "train/ptx_loss": real_ptx_loss}) + self.state.global_step += 1 + self.state.epoch = epoch + (step + 1) / steps_in_epoch + if self.is_step_end(): + rl_info.update(self.get_step_loss(loss_prefix="train/")) # on_step_end self.control = self.callback_handler.on_step_end(args, self.state, self.control) else: @@ -1985,48 +981,18 @@ def get_advantages_and_returns( returns = paddle.concat([paddle.zeros([returns.shape[0], start], dtype=returns.dtype), returns], -1) return advantages.detach(), returns - def _rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: - prompt = rl_batch["prompt"] - old_log_probs = rl_batch["log_probs"] - ref_log_probs = rl_batch["ref_log_probs"] - rewards = rl_batch["rewards"] - old_reward_values = rl_batch["reward_values"] - input_ids = rl_batch["input_ids"] - attention_mask = rl_batch["attention_mask"] - - # log_probs has shifted by one for predicted logits - # TODO(guosheng): When using flash_attn with casual mask and right padding - # inputs, responses of batch input cannot be got by sliced from start. And - # use sequences (as labels) with full length instead of target length. - start = prompt.shape[-1] - 1 - sequence_mask = attention_mask[:, 1:] - - with paddle.no_grad(): - # maybe these two can also be put into rollout - old_rewards = self.add_kl_divergence_regularization( - prompt, - old_log_probs, - ref_log_probs, - rewards, - sequence_mask, - ) - reward_advantages, reward_returns = self.get_advantages_and_returns( - old_reward_values, - old_rewards, - sequence_mask, - start, - ) - # metric - kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask)[:, start:].sum(axis=-1).mean() - mean_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).mean() - max_generated_length = sequence_mask[:, start:].cast(paddle.float32).sum(axis=-1).max() - rewards = rewards.mean() - # trainer inputs with target length - old_log_probs = old_log_probs[:, start:] - old_reward_values = old_reward_values[:, start:] - sequence_mask = sequence_mask[:, start:] - # position_ids is necessayr for left padding - position_ids = make_position_ids(attention_mask) + def rl_step(self, rl_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: + # inputs shared by policy and value trainer + input_ids = rl_batch["input_ids"] # length: src+tgt + attention_mask = rl_batch["attention_mask"] # length: src+tgt + position_ids = rl_batch["position_ids"] # length: src+tgt + sequence_mask = rl_batch["sequence_mask"] # length: src+tgt(-1) + # inputs used by policy trainer + old_log_probs = rl_batch["log_probs"] # length: src+tgt(-1) + reward_advantages = rl_batch["reward_advantages"] # length: src+tgt(-1) + # inputs used by value trainer + old_reward_values = rl_batch["reward_values"] # length: src+tgt(-1) + reward_returns = rl_batch["reward_returns"] # length: src+tgt(-1) policy_trainer_inputs = { "input_ids": input_ids, @@ -2035,11 +1001,8 @@ def _rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, An "old_log_probs": old_log_probs, "reward_advantages": reward_advantages, "sequence_mask": sequence_mask, - # "start": start, - # "use_cache": False, - # "return_dict": True, } - kwargs = self.policy_trainer.full_training_step(policy_trainer_inputs, **kwargs) + actor_loss = self.policy_trainer.full_training_step(**policy_trainer_inputs) value_trainer_inputs = { "input_ids": input_ids, @@ -2048,27 +1011,37 @@ def _rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, An "old_reward_values": old_reward_values, "reward_returns": reward_returns, "sequence_mask": sequence_mask, - # "start": start, - # "use_cache": False, - # "return_dict": True, } - kwargs = self.value_trainer.full_training_step(value_trainer_inputs, **kwargs) + reward_critic_loss = self.value_trainer.full_training_step(**value_trainer_inputs) + + # metric + rewards = rl_batch["rewards"] + rewards = rewards.mean() + ref_log_probs = rl_batch["ref_log_probs"] + kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask).sum(axis=-1).mean() + mean_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).mean() + max_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).max() return { - "train/actor_loss": kwargs["actor_loss"], - "train/reward_critic_loss": kwargs["reward_critic_loss"], + # when using PipelienParallel, the loss returned is 0 when not reach + # accumulated step and the loss returned at accumulated step is a + # mixed loss. + "train/actor_loss": actor_loss, + "train/reward_critic_loss": reward_critic_loss, "train/reward": rewards, "train/kl_divergence": kl_divergence, "train/mean_generated_length": mean_generated_length, "train/max_generated_length": max_generated_length, "train/actor_lr": self.policy_trainer._get_learning_rate(), "train/reward_critic_lr": self.value_trainer._get_learning_rate(), - }, kwargs + } - def ptx_step(self, ptx_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: + def ptx_step(self, ptx_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: """Perform a single update step with PTX loss.""" - kwargs = self.policy_trainer.full_training_step(ptx_batch, **kwargs) - return {"train/ptx_loss": kwargs["ptx_loss"]}, kwargs + ptx_loss = self.policy_trainer.full_training_step(**ptx_batch) + return { + "train/ptx_loss": ptx_loss, + } def split_ptx_micro_batches( self, @@ -2114,10 +1087,7 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: if "position_ids" in prompt_only_batch else make_position_ids(attention_mask) ) - # NOTE: generation output of paddlenlp do not contain prompt, we should - # change sequences here. with infer_guard(self.policy_trainer): - # with contextlib.nullcontext(): sequences = self.actor_model.generate( input_ids=input_ids, attention_mask=attention_mask, @@ -2126,7 +1096,6 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, )[0] sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) - # sequences = [self.load_sing_gen_data(as_batches=False, use_counter=False)["input_ids"]] return [ # TODO(guosheng): move post_rollout out to split_rl_micro_batches @@ -2151,7 +1120,7 @@ def post_rollout( sequence: paddle.Tensor, attention_mask: paddle.Tensor, ) -> Dict[str, Any]: - if False: # self.reward_tokenizer is not self.tokenizer: + if self.reward_tokenizer is not self.tokenizer: # right padding reward_tokenize_output = batch_retokenize( sequence, @@ -2159,11 +1128,11 @@ def post_rollout( dest_tokenizer=self.reward_tokenizer, skip_special_tokens=True, ) - reward_seq = sequence = reward_tokenize_output["input_ids"] - reward_attention_mask = attention_mask = reward_tokenize_output["attention_mask"] + reward_seq = reward_tokenize_output["input_ids"] + reward_attention_mask = reward_tokenize_output["attention_mask"] else: - for text in self.tokenizer.batch_decode(sequence, skip_special_tokens=True): - print(text) + # for text in self.tokenizer.batch_decode(sequence, skip_special_tokens=True): + # print(text) reward_seq = sequence reward_attention_mask = attention_mask # position_ids is necessary for non-right padding @@ -2207,10 +1176,8 @@ def post_rollout( )[ 0 ] # .scores - # TODO(guosheng): move these to model methods such as get_logprobs reward_score = reward_score.squeeze(axis=-1) reward_value = reward_value.squeeze(axis=-1) - reward_value = reward_value[:, :-1] log_probs = gather_log_probabilities(logits[:, :-1], sequence[:, 1:]) ref_log_probs = gather_log_probabilities(ref_logits[:, :-1], sequence[:, 1:]) @@ -2292,65 +1259,3 @@ def normalize_data( # pop out to reduce data dispatch comm overhead rl_batch.pop("prompt") return rl_batch - - def rl_step(self, rl_batch: Dict[str, paddle.Tensor], **kwargs) -> Dict[str, Any]: - # inputs shared by policy and value trainer - input_ids = rl_batch["input_ids"] # length: src+tgt - attention_mask = rl_batch["attention_mask"] # length: src+tgt - position_ids = rl_batch["position_ids"] # length: src+tgt - sequence_mask = rl_batch["sequence_mask"] # length: src+tgt(-1) - # inputs used by policy trainer - old_log_probs = rl_batch["log_probs"] # length: src+tgt(-1) - reward_advantages = rl_batch["reward_advantages"] # length: src+tgt(-1) - # inputs used by value trainer - old_reward_values = rl_batch["reward_values"] # length: src+tgt(-1) - reward_returns = rl_batch["reward_returns"] # length: src+tgt(-1) - - policy_trainer_inputs = { - "input_ids": input_ids, - "attention_mask": attention_mask, - "position_ids": position_ids, - "old_log_probs": old_log_probs, - "reward_advantages": reward_advantages, - "sequence_mask": sequence_mask, - } - kwargs = self.policy_trainer.full_training_step(policy_trainer_inputs, **kwargs) - - value_trainer_inputs = { - "input_ids": input_ids, - "attention_mask": attention_mask, - "position_ids": position_ids, - "old_reward_values": old_reward_values, - "reward_returns": reward_returns, - "sequence_mask": sequence_mask, - } - kwargs = self.value_trainer.full_training_step(value_trainer_inputs, **kwargs) - - # metric - rewards = rl_batch["rewards"] - rewards = rewards.mean() - ref_log_probs = rl_batch["ref_log_probs"] - kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask).sum(axis=-1).mean() - mean_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).mean() - max_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).max() - - return { - "train/actor_loss": kwargs["actor_loss"], - "train/reward_critic_loss": kwargs["reward_critic_loss"], - "train/reward": rewards, - "train/kl_divergence": kl_divergence, - "train/mean_generated_length": mean_generated_length, - "train/max_generated_length": max_generated_length, - "train/actor_lr": self.policy_trainer._get_learning_rate(), - "train/reward_critic_lr": self.value_trainer._get_learning_rate(), - }, kwargs - - # @paddle.no_grad() - # def post_rollout( - # self, - # prompt: paddle.Tensor, - # sequence: paddle.Tensor, - # attention_mask: paddle.Tensor, - # ) -> Dict[str, Any]: - # if self.reward_tokenizer is not self.tokenizer: - # reward_tokenize_output = batch_retokenize diff --git a/examples/RLHF/trainer_utils.py b/examples/RLHF/trainer_utils.py new file mode 100644 index 000000000000..43ba33be2318 --- /dev/null +++ b/examples/RLHF/trainer_utils.py @@ -0,0 +1,645 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +from contextlib import contextmanager +from typing import Dict + +import numpy as np +import paddle +import tqdm +from paddle.distributed import fleet +from paddle.io import DataLoader + +from paddlenlp.generation.utils import GenerationMixin +from paddlenlp.trainer.trainer import ( + TRAINER_STATE_NAME, + HybridParallelOptimizer, + NlpDistributedBatchSampler, + ShardingOption, + Trainer, + TrainerCallback, + TrainerControl, + TrainerState, + TrainingArguments, + _obtain_optimizer_parameters_list, + distributed_file, + distributed_isfile, + fused_allreduce_gradients, + logger, + reshard_util, + split_inputs_sequence_dim, +) +from paddlenlp.transformers import BatchEncoding, PretrainedModel, PretrainedTokenizer +from paddlenlp.transformers.configuration_utils import PretrainedConfig +from paddlenlp.transformers.model_outputs import ModelOutput +from paddlenlp.transformers.tokenizer_utils_base import ( + PaddingStrategy, + TruncationStrategy, +) + + +# ########## patches for Trianer ########## +def init_train_model_opt( + self: Trainer, max_steps: int, resume_from_checkpoint: bool = False, clear_master_weight: bool = False +) -> PretrainedModel: + # Copy of model/optimizer init and resuming related code in `Trainer.train`. + # NOTE: this `_load_from_checkpoint` is indeed to load model states in the + # following elif-else branches, though they are apart away in `Trainer.train`. + if not self.args.should_load_sharding_stage1_model: + self._load_from_checkpoint(resume_from_checkpoint) + + # delay_optimizer_creation = ( + # self.sharding is not None + # and ShardingOption.SHARD_OP in self.args.sharding + # ) + delay_optimizer_creation = False + + if not delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + + if self.args.should_load_sharding_stage1_model: + model = self._wrap_model_and_load_sharded_checkpoint(resume_from_checkpoint) + elif self.args.should_save_sharding_stage1_model: + # In the non-sharded mode, should invoke _load_from_checkpoint before _wrap_model. + # In this mode, the rank0 load all params and the _wrap_model implicitly broadcast params from rank0 to the other ranks. + model = self._wrap_model(self.model_wrapped) + if self.sharding_io is not None: + assert delay_optimizer_creation is False, "delay_optimizer_creation should be False" + # the self.optimizer should be wrapped and it is done in _wrap_model + self.sharding_io.set_optimizer(self.optimizer) + # for the rest of this function `model` is the outside model, whether it was wrapped or not + if model is not self.model: + self.model_wrapped = model + if delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + self._load_optimizer_and_scheduler(resume_from_checkpoint) + else: + model = self._wrap_model(self.model_wrapped) + # for the rest of this function `model` is the outside model, whether it was wrapped or not + if model is not self.model: + self.model_wrapped = model + if delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + self._load_optimizer_and_scheduler(resume_from_checkpoint) + + if ShardingOption.FULL_SHARD in self.args.sharding and clear_master_weight: + # for inference model to use Trainer sharding stage3, clear master_weight + # which is created in GroupShardedStage3.__init__ + self.optimizer._master_weights = None + + if self.args.device == "npu" and self.args.flatten_param_grads: + from .plugins.npu_plugin import npu_accelerate_plugin + + npu_accelerate_plugin(self.optimizer) + + return model + + +def init_train_state( + self: Trainer, + resume_from_checkpoint: bool, + train_dataloader: DataLoader, + max_steps: int, + num_train_epochs: int, + num_update_steps_per_epoch: int, +): + args = self.args + + self.state = TrainerState() + self.state.epoch = 0 + epochs_trained = 0 + steps_trained_in_current_epoch = 0 + steps_trained_progress_bar = None + + # Check if continuing training from a checkpoint + if resume_from_checkpoint is not None and distributed_isfile( + os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) + ): + self.state = TrainerState.load_from_json( + distributed_file(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) + ) + epochs_trained = self.state.global_step // num_update_steps_per_epoch + if not args.ignore_data_skip: + steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) + steps_trained_in_current_epoch *= args.gradient_accumulation_steps + else: + steps_trained_in_current_epoch = 0 + + logger.info(" Continuing training from checkpoint, will skip to saved global_step") + logger.info(f" Continuing training from epoch {epochs_trained}") + logger.info(f" Continuing training from global step {self.state.global_step}") + if not args.ignore_data_skip: + logger.info( + f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} " + "batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` " + "flag to your launch command, but you will resume the training on data already seen by your model." + ) + if self.is_local_process_zero() and not args.disable_tqdm: + steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch) + steps_trained_progress_bar.set_description("Skipping the first batches") + if not args.ignore_data_skip: + if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( + train_dataloader.batch_sampler, NlpDistributedBatchSampler + ): + consumed_samples = ( + self.state.global_step + * args.train_batch_size + * args.gradient_accumulation_steps + * args.dataset_world_size + ) + train_dataloader.batch_sampler.set_epoch(consumed_samples=consumed_samples) + logger.info(f"Set DistributedBatchSampler consumed_samples to {consumed_samples}") + + self.state.max_steps = int(max_steps) + self.state.num_train_epochs = num_train_epochs + self.state.is_local_process_zero = self.is_local_process_zero() + self.state.is_world_process_zero = self.is_world_process_zero() + + return epochs_trained, steps_trained_in_current_epoch, steps_trained_progress_bar + + +def init_train_log( + self: Trainer, + num_examples: int, + num_train_epochs: int, + total_train_batch_size: int, + max_steps: int, + num_train_samples: int, + model: PretrainedModel, +): + args = self.args + + logger.info("***** Running training *****") + logger.info(f" Num examples = {num_examples:,}") + logger.info(f" Num Epochs = {num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {max_steps:,}") + logger.info(f" Total num train samples = {num_train_samples:,}") + # per_device_trainable_numel = sum(p.numel().item() for p in model.parameters() if not p.stop_gradient) + # TODO: Temporary fix since Tensor.numel() not supported in distributed mode + per_device_trainable_numel = sum(np.prod(p.shape) for p in model.parameters() if not p.stop_gradient) + logger.info(f" Number of trainable parameters = {per_device_trainable_numel:,} (per device)") + if self.args.use_hybrid_parallel: + # todo fix for pipeline_parallel_degree + parts_num = max(self.args.tensor_parallel_degree, 1) * max(self.args.pipeline_parallel_degree, 1) + if parts_num > 1: + all_reduce_dtype = "int64" + if paddle.get_device().split(":")[0] in ["npu", "xpu"]: + # TODO(duanyanhui): fix when NPU all_reduce supports int64 + all_reduce_dtype = "float32" + trainable_numel_tensor = paddle.to_tensor(per_device_trainable_numel, dtype=all_reduce_dtype) + paddle.distributed.all_reduce(trainable_numel_tensor) + trainable_numel = int(trainable_numel_tensor.item()) // self.args.dataset_world_size + # the numel is roughly, because the tensor parallel still hold own bias or layer_norm weight without splited + # so, the trainable numel is a little bigger than real. + logger.info(f" Number of trainable parameters = {trainable_numel:,} (all devices, roughly)") + + +def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs): + """ + Just a copy of single training step complete code in Trainer.train while loop + which including forward+backward+step, while wraps the inputs and outputs to + make the complicated copied code no need to change. Maybe a better way is to + add fine-grained methods including these steps to Trainer which is similar to + DeepSpeed engine. + """ + + # TODO(guosheng): step, steps_trained_in_current_epoch and steps_trained_progress_bar + # should use reference since they would be overwrite. + # for state update + epoch = kwargs.get("epoch", 0) + step = kwargs.get("step", 0) + steps_in_epoch = kwargs.get("steps_in_epoch", 0) + step_control = kwargs.get("step_control", 0) + # for step and progress update when resuming data + train_dataloader = kwargs.get("train_dataloader", None) + resume_from_checkpoint = kwargs.get("resume_from_checkpoint", None) + steps_trained_in_current_epoch = kwargs.get("steps_trained_in_current_epoch", 0) + steps_trained_progress_bar = kwargs.get("steps_trained_progress_bar", None) + # for eval output ignore to gather + ignore_keys_for_eval = kwargs.get("ignore_keys_for_eval", None) + tr_loss = kwargs.get("tr_loss", 0.0) + model = kwargs.get("model", self.model_wrapped) + + args = self.args + + if self.args.use_hybrid_parallel and self.args.sep_parallel_degree > 1: + inputs = split_inputs_sequence_dim(inputs) + self.timers and self.timers("read-data").stop() + os.environ["TRAINER_GLOBAL_STEP"] = str(self.state.global_step) + self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) + + # Skip past any already trained steps if resuming training + # for paddlenlp.utils.batch_sampler.DistributedBatchSampler + # We use consumed_samples to reset the status + if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( + train_dataloader.batch_sampler, NlpDistributedBatchSampler + ): + if step == 0: + if steps_trained_progress_bar is not None: + steps_trained_progress_bar.update(steps_trained_in_current_epoch) + steps_trained_progress_bar.close() + steps_trained_progress_bar = None + self._load_rng_state(resume_from_checkpoint) + step += steps_trained_in_current_epoch + elif steps_trained_in_current_epoch > 0: + steps_trained_in_current_epoch -= 1 + if steps_trained_progress_bar is not None: + steps_trained_progress_bar.update(1) + if steps_trained_in_current_epoch == 0: + self._load_rng_state(resume_from_checkpoint) + # continue + final_local_vars = locals() + for k in kwargs.keys(): + if k in final_local_vars: + kwargs[k] = final_local_vars[k] + return kwargs + elif steps_trained_progress_bar is not None: + steps_trained_progress_bar.close() + steps_trained_progress_bar = None + + if step_control % args.gradient_accumulation_steps == 0: + self.control = self.callback_handler.on_step_begin(args, self.state, self.control) + self.timers and self.timers("forward-backward").start() + + dp_enabled = self.args.data_parallel_degree > 1 if self.args.use_hybrid_parallel else args.local_rank != -1 + forbidden_no_sync = False + # stage2 and stage3 should not no_sync, because the is no DDP wrapper and no_sync API + # hybrid_parallel (tp or pp or sharding stage 1) should not no_sync + if self.args.use_hybrid_parallel: + forbidden_no_sync = True + + availiable_no_sync = dp_enabled and not forbidden_no_sync + + is_no_sync = ( + ((step_control + 1) % args.gradient_accumulation_steps != 0) + and availiable_no_sync + and args._no_sync_in_gradient_accumulation + ) or (args.recompute and availiable_no_sync) + # sharding + # stage1. the same as ddp + # stage2. manualy collect gradient on dp group + + dp_master_grad = self.args.world_size > 1 and self.args.amp_master_grad and not self.args.use_hybrid_parallel + if dp_master_grad: + is_no_sync = True + + if is_no_sync: + # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. + with model.no_sync(): + tr_loss_step = self.training_step(model, inputs) + else: + tr_loss_step = self.training_step(model, inputs) + + tr_loss += tr_loss_step + + if (step_control + 1) % args.gradient_accumulation_steps == 0 or ( + # last step in epoch but step is always smaller than gradient_accumulation_steps + steps_in_epoch <= args.gradient_accumulation_steps + and (step + 1) == steps_in_epoch + ): + if self.args.pipeline_parallel_degree <= 1 and self._enable_delay_scale_loss(): + tr_loss /= self.args.gradient_accumulation_steps + + self.timers and self.timers("forward-backward").stop() + # Maunally collect gradients + # Case 1: Use recompute and dp + # Case 2: Hack dp with master_grad + # Case 3: Pipeline or sharding overlap + # local_rank != -1 don't means dp in networks. + self.timers and self.timers("all-reduce").start() + + # Case 1: Use recompute and dp / sharding stage1, + # manualy collect gradient for dp. + if args.recompute and availiable_no_sync: + fused_allreduce_gradients(list(model.parameters()), None) + + # Case 2: hack dp with master_grad + if dp_master_grad and not (args.recompute and availiable_no_sync): + fused_allreduce_gradients(list(model.parameters()), None) + + # Pipeline parallel mode, handle gradient reduce here to overlap + pipeline_parallel_config = ( + set(args.pipeline_parallel_config.split(" ")) if args.pipeline_parallel_degree > 1 else set() + ) + enable_dp_comm_overlap = "enable_dp_comm_overlap" in pipeline_parallel_config + enable_release_grads = "enable_release_grads" in pipeline_parallel_config + + # Case 3: Pipeline parallel mode, overlap with dp + if isinstance(self.optimizer, HybridParallelOptimizer) and not self.do_grad_scaling: + parameters_list = _obtain_optimizer_parameters_list(self.optimizer._inner_opt) + + if not enable_dp_comm_overlap: + if self.optimizer._sharding_enable: + assert reshard_util.is_sharding_opt(self.optimizer) + self.optimizer._inner_opt.reduce_gradients(list(parameters_list), self.optimizer._hcg) + + if self.optimizer._dp_enable or getattr(self.optimizer, "_sep_enable", False): + fused_allreduce_gradients(list(parameters_list), self.optimizer._hcg) + + self.timers and self.timers("all-reduce").stop() + self.timers and self.timers("optimizer-step").start() + + if self.args.gradient_accumulation_steps > 1 and self._enable_delay_scale_loss(): + for p in model._layers.parameters(): + with paddle.no_grad(): + if hasattr(p, "main_grad") and p.main_grad is not None: + assert p.grad is None + p.main_grad.scale_(1.0 / self.args.gradient_accumulation_steps) + elif p.grad is not None: + p.grad.scale_(1.0 / self.args.gradient_accumulation_steps) + + # Optimizer step + self.callback_handler.on_optimizer_begin( + args, self.state, self.control, scaler=self.scaler if self.do_grad_scaling else None + ) + optimizer_was_run = True + if self.do_grad_scaling: + scale_before = paddle.assign(self.scaler._scale) + self.scaler.step(self.optimizer) + self.scaler.update() + scale_after = self.scaler._scale + optimizer_was_run = not self.scaler._cache_founf_inf + if not optimizer_was_run: + scale_before_value = scale_before.cpu().numpy() + scale_after_value = scale_after.cpu().numpy() + logger.warning( + f"optimizer not run, scale_before: {scale_before_value[0]}, scale_after: {scale_after_value[0]}" + ) + elif isinstance(self.optimizer, HybridParallelOptimizer): + self.optimizer._step(parameters_list) + else: + self.optimizer.step() + + self.timers and self.timers("optimizer-step").stop() + + if optimizer_was_run: + self.lr_scheduler.step() + + if enable_release_grads and args.pipeline_parallel_degree > 1: + self.optimizer.clear_grad(set_to_zero=False) + for _, buffers in model._chunk_2_comm_buffers.items(): + for buffer in buffers: + buffer._clear_grad_storage() + else: + self.optimizer.clear_grad() + + self.callback_handler.on_optimizer_end( + args, self.state, self.control, scaler=self.scaler if self.do_grad_scaling else None + ) + + self.state.global_step += 1 + self.state.epoch = epoch + (step + 1) / steps_in_epoch + self.control = self.callback_handler.on_step_end(args, self.state, self.control) + self._maybe_log_save_evaluate(tr_loss, model, epoch, ignore_keys_for_eval, inputs=inputs) + self._print_timer() + step_control = 0 + else: + self.control = self.callback_handler.on_substep_end(args, self.state, self.control) + step_control += 1 + + if self.control.should_epoch_stop or self.control.should_training_stop: + # break + final_local_vars = locals() + for k in kwargs.keys(): + if k in final_local_vars: + kwargs[k] = final_local_vars[k] + return kwargs + self.timers and self.timers("read-data").start() + + final_local_vars = locals() + for k in kwargs.keys(): + if k in final_local_vars: + kwargs[k] = final_local_vars[k] + return kwargs + + +Trainer.init_train_model_opt = init_train_model_opt +Trainer.init_train_log = init_train_log +Trainer.init_train_state = init_train_state +Trainer.full_training_step = full_training_step +# ########## patches for Trianer ########## + + +class MuteDefaultFlowCallback(TrainerCallback): + """ + Add this callback can cencel logging/evaluation/saving by DefaultFlowCallback. + Use this when having multi trainer. + """ + + def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): + control.should_save = False + control.should_evaluate = False + control.should_log = False + return control + + +@contextmanager +def guard_set_args(args, arg_name_values): + for k, v in arg_name_values.items(): + old_value = getattr(args, k, None) + setattr(args, k, v) + arg_name_values[k] = old_value + yield + for k, v in arg_name_values.items(): + old_value = getattr(args, k) + setattr(args, k, v) + arg_name_values[k] = old_value + + +class PipeEvalModel(GenerationMixin): + """ + Wrapper for PipelineParallel to do evaluate and generate. Currently only + support . + """ + + def __init__(self, trainer: Trainer): + self.model: fleet.model.PipelineParallel = trainer.model_wrapped + self.config: PretrainedConfig = trainer.model.config + self._is_gen = False + self.update_model_kwargs_for_generation = ( + self.model._layers._non_pipe_model_class.update_model_kwargs_for_generation + ) + + @property + def pp_group(self): + return self.model.pp_group + + def eval(self): + self.model.eval() + + def train(self): + self.model.train() + + def _broadcast_outputs(self, outputs): + # outputs is PipelineParallel.eval_batch which is a list of batches. + out = [] + outputs = (outputs,) if isinstance(outputs, paddle.Tensor) else outputs + for tensors in outputs: + if not self.model.is_pipeline_last_stage(): + tensor = tensors if isinstance(tensors, paddle.Tensor) else tensors[0] + head_out_meta = ( + (self.model._layers.head_out_meta,) + if isinstance(self.model._layers.head_out_meta, paddle.static.InputSpec) + else self.model._layers.head_out_meta + ) + tensors = tuple( + paddle.empty( + shape=[ + tensor.shape[i] if (meta.shape[i] is None or meta.shape[i] < 0) else meta.shape[i] + for i in range(len(meta.shape)) + ], + dtype=tensor.dtype if meta.dtype is None else meta.dtype, + ) + for meta in head_out_meta + ) + else: + # Currently use tuple instead of ModelOutput and require the + # caller use the return result as tuple. + tensors = ( + (tensors,) + if isinstance(tensors, paddle.Tensor) + else tensors.to_tuple() + if isinstance(tensors, ModelOutput) + else tensors + ) + + # use map_structure seems hung + for tensor in tensors: + paddle.distributed.broadcast(tensor, src=self.model.pp_group.ranks[-1], group=self.model.pp_group) + out.append(tensors[0] if len(tensors) == 1 else tensors) + return out[0] if len(out) == 1 else out + + def __call__(self, *args, **kwargs): + model = self.model + assert self.model.training is False + if self._is_gen: + # inputs by `prepare_inputs_for_generation` is a dict with following keys: + # "input_ids", "position_ids", "past_key_values", "use_cache", "attention_mask" + # NOTE: 1. cache/past_key_values should be passed across decoding steps + # by using as model attr rather than input args to reduce comm overhead. + # Also, pipe model defined for training not support this cache input. + # 2. ignore use_cache since _check_data_vaild requires tensor if not None. + # 3. attention_mask can reuse _prepare_decoder_attention_mask in LlamaEmbeddingPipe. + # 4. position_ids pass through _prepare_pipeline_inputs_func and PipeLayer. + inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) + # currently, set accumulate_steps to 1 to avoid multi-batch eval/gen + with guard_set_args(model, {"_compute_loss": False, "accumulate_steps": 1}): + outputs = model.eval_batch([inputs, labels], compute_loss=False) + # TODO(guosheng): Broadcasted logits are used to get next_scores, remove + # it to reduce comm overhead. Also note that we still need broadcast + # next_tokens though logits are broadcasted since pp ranks' seeds differs. + # Currently, just slice the last token to reduce comm overhead. + outputs = [ + micro_batch_output[:, -1, :].unsqueeze(1) + if isinstance(micro_batch_output, paddle.Tensor) + else micro_batch_output[0][:, -1, :].unsqueeze(1) + for micro_batch_output in outputs + ] + outputs = self._broadcast_outputs(outputs) + else: + # use _prepare_pipeline_inputs_func to convert pipeline inputs + inputs, labels = model._prepare_pipeline_inputs_func(*args, **kwargs) + # NOTE(guosheng): bug seems exist. pp.eval_batch(compute_loss=False) + # will set pp._compute_loss to False and would not set it back. Thus + # hack here to set it back. + with guard_set_args(model, {"_compute_loss": False, "accumulate_steps": 1}): + outputs = model.eval_batch([inputs, labels], compute_loss=False) + outputs = self._broadcast_outputs(outputs) + return outputs + + def generate(self, *args, **kwargs): + self._is_gen = True + # patch DecoderLayerPipe to use cache, DecoderLayerPipe is subclass of + # DecoderLayer, and would call super().forward + ori_decoder_layer_forward = self.model._layers._non_pipe_decoder_layer_class.forward + + def decoder_layer_forward(layer_self, *args, **kwargs): + kwargs.update({"use_cache": True, "past_key_value": getattr(layer_self, "_cache", None)}) + outputs = ori_decoder_layer_forward(layer_self, *args, **kwargs) + output = outputs[0] + layer_self._cache = outputs[1] + self._has_cache = True + return output + + with guard_set_args(self.model._layers._non_pipe_decoder_layer_class, {"forward": decoder_layer_forward}): + outputs = super().generate(*args, **kwargs) + self._is_gen = False + # clear cache of decoder layers, sublayers is incursive thus suitable + # to both 1F1B and interleave + for layer in self.model._layers.sublayers(): + if isinstance(layer, self.model._layers._non_pipe_decoder_layer_class): + layer._cache = None + self._has_cache = False + return outputs + + def prepare_inputs_for_generation(self, *args, **kwargs): + arg_bind = inspect.signature(self.model._layers._non_pipe_model_class.prepare_inputs_for_generation).bind( + *((self,) + args), **kwargs + ) + arg_bind.apply_defaults() + arg_dict = arg_bind.arguments + last_arg_name, last_arg_value = arg_dict.popitem() + if arg_bind.signature.parameters[last_arg_name].kind == inspect.Parameter.VAR_KEYWORD: + arg_dict.update(last_arg_value) + else: + arg_dict[last_arg_name] = last_arg_value + arg_dict.pop("self") + past_key_values = arg_dict.get("past_key_values", None) + # prepare_inputs_for_generation use past_key_values to discrimate prefill + # or decode and slice inputs accordingly. + if getattr(self, "_has_cache", False): + arg_dict.update({"past_key_values": True}) + model_inputs = self.model._layers._non_pipe_model_class.prepare_inputs_for_generation(self, **arg_dict) + model_inputs.update({"past_key_values": past_key_values}) + return model_inputs + + +def is_same_tokenizer( + tokenizer: PretrainedTokenizer, + other_tokenizer: PretrainedTokenizer, +) -> bool: + """Check if two tokenizers are the same.""" + return tokenizer is other_tokenizer or ( + tokenizer.__class__ == other_tokenizer.__class__ and tokenizer.get_vocab() == other_tokenizer.get_vocab() + ) + + +def batch_retokenize( + input_ids: paddle.Tensor, + src_tokenizer: PretrainedTokenizer, + dest_tokenizer: PretrainedTokenizer, + *, + padding: bool | str | PaddingStrategy = PaddingStrategy.LONGEST, + truncation: bool | str | TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + skip_special_tokens: bool = True, +) -> BatchEncoding: + """Re-tokenize a batch of input ids from one tokenizer to another.""" + output = dest_tokenizer( + [ + text + dest_tokenizer.eos_token + for text in src_tokenizer.batch_decode( + input_ids, + skip_special_tokens=skip_special_tokens, + ) + ], + padding=padding, + truncation=truncation, + return_tensors="pd", + ) + return output diff --git a/paddlenlp/experimental/transformers/llama/modeling.py b/paddlenlp/experimental/transformers/llama/modeling.py index 6923ba0db0ec..c85c1def54ea 100644 --- a/paddlenlp/experimental/transformers/llama/modeling.py +++ b/paddlenlp/experimental/transformers/llama/modeling.py @@ -126,7 +126,7 @@ def __init__(self, config: LlamaConfig): self.quant_type ) - if config.tensor_parallel_degree > 1: + if config.tensor_parallel_degree > 1 and config.vocab_size % config.tensor_parallel_degree == 0: self.embed_tokens = fleet.meta_parallel.VocabParallelEmbedding( self.vocab_size, self.hidden_size, From 860e61d0a5c2a2ddac931f6cca2aed38bbfb9af6 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Wed, 13 Mar 2024 03:10:22 +0000 Subject: [PATCH 30/46] Open PolicyTrainer loss logging postprocess. More StepTrainer docs. --- examples/RLHF/ppo_trainer.py | 81 ++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 45 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index f784ffdd16f8..dff39c2ab5ba 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -61,32 +61,15 @@ class StepTrainer(Trainer): """ Trainer enhanced with step-level training combining with patches of Trianer. - We can use this to do training whose step is composed of multi models (by + We can use this to do training whose step is composed of multi models via multiple instances of StepTrainer, such as PPO. Additionally, using a mixed - loss and get the separated loss metrics is supported. + loss and get the separated loss metrics is supported, which is helpful to + PipelienParallel with a mixed loss. """ - # used to create criterion for trainer + # used to create criterion for trainer, please refer to `create_criterion` + # for details. loss_cls: type - # Moreover, a model/StepTrainer instance may use a mixed loss which uses a - # different loss for different step and inputs, while we often want to get - # the separated loss metric. We use a callable discriminator using inputs - # (dict) as arguments and returning corresponding loss name to identify - # current loss. NOTE: please make the loss name ends with "_loss". `tr_loss` - # is the default loss name used in trainer.train. - loss_identifier: callable - # refer to mark_step_loss. NOTE: This is transparent to users - loss_step_indice: Dict - # When using multiple instances of StepTrainer collaborate to do one training - # step, each should use its own vars such as loss/model/step_control which are - # local vars in Trainer.train, we define these vars by `train_step_vars`. They - # are vars needed by full_training_step for training control, as following: - # tr_loss, model, epoch, step, step_control. NOTE: This is transparent to users. - # some vars such as `epoch` are meaningless, they are needed just because - # full_training_step copies code from Trainer.train which is designed for - # complete training process. - # TODO(guosheng): use namedtuple or dataclass to make it more readable. - train_step_vars: Dict def __init__( self, @@ -121,7 +104,11 @@ def __init__( self.criterion = self.create_criterion() def create_criterion(self): - """loss creator for trainer.""" + """ + create loss using `loss_cls` for trainer. It would use a wrapped loss_cls + whose label arguments are merged into one argument, this is useful to + PipelineParallel and trainer.criterion which limit loss format. + """ criterion = create_loss(self.loss_cls, self.model.config, self.args, merge_labels=True) return criterion @@ -157,8 +144,20 @@ def get_model(self, train=False): def get_train_step_vars(self, vars: Dict = None) -> Dict: """ - return `train_step_vars`. If not exists, create it first. If `vars` is - not None, update `train_step_vars` with it. + NOTE: This is transparent to users. + When using multiple instances of StepTrainer collaborate to do one training + step, each should use its own vars such as loss/model/step_control which are + local vars in Trainer.train, we define these vars by `train_step_vars`. They + are vars needed by full_training_step for training control, as following: + tr_loss, model, epoch, step, step_control. + some vars such as `epoch` are meaningless, they are needed just because + full_training_step copies code from Trainer.train which is designed for + complete training process. + + return `train_step_vars` (dict). If not exists, create it first. If `vars` + is not None, update `train_step_vars` with it. + + TODO(guosheng): use namedtuple or dataclass to make it more readable. """ if not hasattr(self, "train_step_vars"): # should be called after model is wrapped since the model field should @@ -244,6 +243,7 @@ def _prepare_inputs(self, inputs: Dict[str, Union[paddle.Tensor, Any]]) -> Dict[ def mark_step_loss(self, loss_name): """ + NOTE: This is transparent to users. When using a mixed loss we often want to get the separated loss metrics, thus we mark loss type of each training step to separate them. This is not necessary since the loss would be returnd after each training step. @@ -265,7 +265,10 @@ def get_step_loss(self, loss_prefix: str = "") -> Dict: """ Return a dict mapping loss name to value of current training step. This is mainly to get loss for metric logging, and it would not affect the - training. Overwrite it when we want to change the logging value. + training. This is mostly helpful to PipelienParallel with a mixed loss + in which the loss returned is 0 when not reach accumulated step and the + loss returned at accumulated step is a mixed loss. + NOTE: Overwrite it when we want to change the logging value. """ model = self.get_model(train=True) if not hasattr(self, "loss_dict"): @@ -324,21 +327,20 @@ def loss_identifier(self, inputs: Dict) -> str: return loss_name def get_step_loss(self, loss_prefix: str = "") -> Dict: - loss_dict = super().get_step_loss(loss_prefix=loss_prefix) + loss_dict = super().get_step_loss(loss_prefix="") # use_ptx would double the gradient_accumulation_steps which causes # actor_loss and ptx_loss reduced by half. Moreover, ptx_loss should # be divided by ptx_coeff for logging. # TODO(guosheng): maybe should consider self._enable_delay_scale_loss() - # if "ptx_loss" in loss_dict: - # loss_dict[loss_prefix + "ptx_loss"] = loss_dict[ - # "ptx_loss"] * 2 / self.criterion.ptx_coeff - # loss_dict[loss_prefix + "actor_loss"] = loss_dict["actor_loss"] * 2 + if "ptx_loss" in loss_dict: + loss_dict[loss_prefix + "ptx_loss"] = loss_dict["ptx_loss"] * 2 / self.criterion.ptx_coeff + loss_dict[loss_prefix + "actor_loss"] = loss_dict["actor_loss"] * 2 return loss_dict class ValueTrainer(StepTrainer): loss_cls = RLHFValueLoss - # define loss name + # define loss name for logging loss_identifier = lambda self, inputs: "reward_critic_loss" @@ -493,7 +495,6 @@ def __init__( # Those value can be changed self.kl_coeff = self.args.kl_coeff self.clip_range_score = self.args.clip_range_score - self.policy_trainer.ptx_coeff = self.ptx_coeff = self.args.ptx_coeff self.gamma = 1.0 self.gae_lambda = 0.95 @@ -873,24 +874,14 @@ def _maybe_log_save_evaluate(self, tr_loss, model, epoch, ignore_keys_for_eval, logs: Dict[str, float] = {} for k, v in tr_loss.items(): - if isinstance(v, paddle.Tensor) and "lr" not in k and "max_generated_length" not in k: + if isinstance(v, paddle.Tensor) and "lr" not in k and "max" not in k: v_scalar = self._nested_gather(v).mean().item() - # TODO(guosheng): maybe should consider self._enable_delay_scale_loss() - # and maybe should merge with loss postprocess in PP - if "train/actor_loss" == k and "train/ptx_loss" in tr_loss: - # use_ptx would double the gradient_accumulation_steps - # which causes actor_loss and ptx_loss reduced by half - v_scalar = v_scalar * 2 - elif "train/ptx_loss" == k: - # similar to actor_loss and should double, additionally - # it should be divided by ptx_coeff for logging - v_scalar = v_scalar * 2 / self.ptx_coeff logs[k] = round(v_scalar / (self.state.global_step - self._globalstep_last_logged), 8) v.subtract_(v) attr_name = "_total_" + k.split("/")[-1] + "_scalar" attr_value = getattr(self, attr_name, 0) setattr(self, attr_name, attr_value + v_scalar) - elif "max_generated_length" in k: + elif isinstance(v, paddle.Tensor) and "max" in k: v_scalar = self._nested_gather(v).max().item() logs[k] = v_scalar else: From afa1b5384b58a48319078d64da17609534998328 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Fri, 15 Mar 2024 15:31:55 +0800 Subject: [PATCH 31/46] more timer. --- examples/RLHF/ppo_main.py | 2 +- examples/RLHF/ppo_trainer.py | 89 ++++++++++++++++++++++++++---- paddlenlp/trainer/plugins/timer.py | 1 + 3 files changed, 80 insertions(+), 12 deletions(-) diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index 76af7608105b..f925fb6603e7 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -145,7 +145,7 @@ class TrainingArguments(TrainingArguments): offload_level: str = field( default=None, - metadata={"help": "Offload model, optional for: eval, reward, eval reward, ."}, + metadata={"help": "Offload model, optional for: eval, reward, optimizer, train_model"}, ) # save_generation_output: bool = field( diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index ed1a52a1e14c..c95fa565572c 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -294,7 +294,7 @@ def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs if self.args.use_hybrid_parallel and self.args.sep_parallel_degree > 1: inputs = split_inputs_sequence_dim(inputs) - self.timers and self.timers("read-data").stop() + # self.timers and self.timers("read-data").stop() os.environ["TRAINER_GLOBAL_STEP"] = str(self.state.global_step) self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) @@ -474,7 +474,7 @@ def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs if k in final_local_vars: kwargs[k] = final_local_vars[k] return kwargs - self.timers and self.timers("read-data").start() + # self.timers and self.timers("read-data").start() final_local_vars = locals() for k in kwargs.keys(): @@ -488,8 +488,8 @@ def offload_tensor_to_cpu(tensors): for _, v in tensors.items(): offload_tensor_to_cpu(v) elif isinstance(tensors, paddle.Tensor): - if not tensors.place.is_cpu_place(): - cpu_tensor = tensors._copy_to(paddle.CPUPlace(), True) + if tensors.place.is_gpu_place(): + cpu_tensor = tensors._copy_to(paddle.CUDAPinnedPlace(), False) tensors.value().get_tensor()._share_data_with(cpu_tensor.value().get_tensor()) else: logger.warning(f"Can't parse for type {type(tensors)}") @@ -502,7 +502,7 @@ def reload_tensor_to_gpu(tensors): reload_tensor_to_gpu(v) elif isinstance(tensors, paddle.Tensor): if not tensors.place.is_gpu_place(): - gpu_tensor = tensors._copy_to(paddle.CUDAPlace(global_dev_id), True) + gpu_tensor = tensors._copy_to(paddle.CUDAPlace(global_dev_id), False) tensors.value().get_tensor()._share_data_with(gpu_tensor.value().get_tensor()) else: logger.warning(f"Can't parse for type {type(tensors)}") @@ -744,6 +744,7 @@ def create_send_recv_table(train_keys, eval_keys): # tp情况 # tp+pp->tp + self.timers and self.timers("export-merge-pp").start() if eval_tp_size > 1 and train_pp_size > 1: table = create_send_recv_table(train_state_dict.keys(), eval_state_dict.keys()) @@ -767,12 +768,16 @@ def create_send_recv_table(train_keys, eval_keys): if global_rank == src_rank and with_offload: offload_tensor_to_cpu(train_state_dict[key]) + self.timers and self.timers("export-merge-pp").stop() + self.timers and self.timers("export-broadcast-pp").start() if pp_group.nranks > 1: paddle.distributed.parallel.sync_params_buffers( eval_model, comm_group=pp_group, src_rank=pp_group.ranks[0], fuse_params=False ) + self.timers and self.timers("export-broadcast-pp").stop() else: # 其他 DP rank 的state dict, 适配 offload 和初始化 + self.timers and self.timers("export-offload-and-init").start() if with_offload: for key in list(train_state_dict.keys()): offload_tensor_to_cpu(train_state_dict[key]) @@ -780,8 +785,10 @@ def create_send_recv_table(train_keys, eval_keys): if not v._is_initialized(): t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) v.get_tensor()._share_data_with(t.get_tensor()) + self.timers and self.timers("export-offload-and-init").stop() paddle.distributed.barrier() + self.timers and self.timers("export-broadcast-sd-dp").start() if eval_tp_size == 1: for _, tensor in eval_state_dict.items(): paddle.distributed.broadcast(tensor, src=0, group=None, sync_op=True) @@ -795,6 +802,7 @@ def create_send_recv_table(train_keys, eval_keys): paddle.distributed.parallel.sync_params_buffers( eval_model, comm_group=dp_group, src_rank=dp_group.ranks[0], fuse_params=False ) + self.timers and self.timers("export-broadcast-sd-dp").stop() # paddle.save(eval_state_dict, f"./tmp/eval_{sd_group.rank}_tp_{eval_tp_rank}_pp_{pp_group.rank}.pdparams") # paddle.save(train_state_dict, f"./tmp/train_{sd_group.rank}_tp_{tp_group.rank}_pp_{pp_group.rank}.pdparams") # paddle.distributed.barrier() @@ -1619,13 +1627,17 @@ def gen_epoch_data(): # self.optimizer.offload() if self.args.eval_mode is not None and "optimizer" in self.args.offload_level: + self.timers and self.timers("offload-optimizer").start() offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) + self.timers and self.timers("offload-optimizer").stop() + + self.timers and self.timers("export-evaluate-model").start() self.policy_trainer.export_evaluate_model( self.policy_trainer.model, self._policy_model_eval, - with_offload=self.args.offload_level is not None, + with_offload="train_model" in self.args.offload_level, ) gp = ( self.policy_trainer._policy_model_eval_group @@ -1635,21 +1647,38 @@ def gen_epoch_data(): # gp = create_data_trans_group(self.args.logical_process_index, paddle.distributed.get_rank(), self._policy_model_eval.config.tensor_parallel_degree) # # todo: zhui self.value_trainer.export_evaluate_model( - self.value_trainer.model, self._value_model_eval, with_offload=self.args.offload_level is not None + self.value_trainer.model, + self._value_model_eval, + with_offload="train_model" in self.args.offload_level, ) + self.timers and self.timers("export-evaluate-model").stop() # self.reference_model.reload() # self.reward_model.reload() - # reload_tensor_to_gpu(self.reference_model.state_dict()) - # reload_tensor_to_gpu(self.reward_model.state_dict()) + if "reward" in self.args.offload_level: + self.timers and self.timers("reload-reward").start() + reload_tensor_to_gpu(self.reference_model.state_dict()) + reload_tensor_to_gpu(self.reward_model.state_dict()) + self.timers and self.timers("reload-reward").stop() # todo, split prompt_only_batch # pp2tp2dp2 -> dp4tp2 prompt_only_batch + self.timers and self.timers("resplit-data").start() prompt_only_batch = data_group_split(prompt_only_batch, group=gp) + self.timers and self.timers("resplit-data").stop() + + self.timers and self.timers("split-rl-micro-batches").start() # 生成数据 + # per_train 4, accu 8 + # prompt 32 + + # 32? [4,4,4,4,4,4,4] rl_batches = self.split_rl_micro_batches(prompt_only_batch) # rl_batches = self.load_sing_gen_data(as_batches=True, # use_counter=True) + self.timers and self.timers("split-rl-micro-batches").stop() + + self.timers and self.timers("ptx-batch").start() if self.use_ptx: ptx_batch = data_group_split(ptx_batch, group=gp) ptx_batches = self.split_ptx_micro_batches(ptx_batch) @@ -1657,6 +1686,9 @@ def gen_epoch_data(): else: ptx_batches = [None for _ in range(len(rl_batches))] + self.timers and self.timers("ptx-batch").stop() + + self.timers and self.timers("merge-data").start() # todo, merge data if gp is not None: input_ids_length = rl_batches[0]["input_ids"].shape[-1] @@ -1668,6 +1700,7 @@ def gen_epoch_data(): rl_batches[0] = repad_rl_batches(rl_batches[0], input_ids_length_batchs) paddle.device.cuda.empty_cache() + self.timers and self.timers("merge-data").stop() # # 数据造好, 开始训练 # self.reference_model.offload() @@ -1676,11 +1709,15 @@ def gen_epoch_data(): # value_model_eval.cleanup() if self.args.offload_level is not None: if "eval" in self.args.offload_level: + self.timers and self.timers("offload-eval").start() cleanup_tensor_space(self._policy_model_eval.state_dict()) cleanup_tensor_space(self._value_model_eval.state_dict()) + self.timers and self.timers("offload-eval").stop() if "reward" in self.args.offload_level: + self.timers and self.timers("offload-reward").start() offload_tensor_to_cpu(self.reference_model.state_dict()) offload_tensor_to_cpu(self.reward_model.state_dict()) + self.timers and self.timers("offload-reward").stop() self.set_train() for _ in range(self.args.update_iters): @@ -1866,16 +1903,32 @@ def train( # policy_model.reload() # value_model.reload() + self.timers and self.timers("offload-reload").start() reload_tensor_to_gpu(self.actor_model.state_dict()) reload_tensor_to_gpu(self.reward_critic_model.state_dict()) + self.timers and self.timers("offload-reload").stop() + logger.info("Doing rl step...") + self.timers and self.timers("rl_step").start() rl_info, train_step_kwargs = self.rl_step(rl_batch, **train_step_kwargs) - paddle.device.cuda.empty_cache() + self.timers and self.timers("rl_step").stop() + if self.args.eval_mode is not None and "optimizer" in self.args.offload_level: + self.timers and self.timers("offload-value-optimizer").start() + offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) + self.timers and self.timers("offload-value-optimizer").stop() + if self.use_ptx: + logger.info("Doing ptx step...") + self.timers and self.timers("ptx_step").start() ptx_info, train_step_kwargs = self.ptx_step(ptx_batch, **train_step_kwargs) rl_info.update(ptx_info) paddle.device.cuda.empty_cache() + self.timers and self.timers("ptx_step").stop() + + self.timers and self.timers("offload-policy-optimizer").start() + offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) + self.timers and self.timers("offload-policy-optimizer").stop() self.state.global_step = self.value_trainer.state.global_step self.state.epoch = self.value_trainer.state.epoch @@ -1919,6 +1972,7 @@ def train( # on_sub_step_end self.control = self.callback_handler.on_substep_end(args, self.state, self.control) self._maybe_log_save_evaluate(rl_info, None, epoch, ignore_keys_for_eval, inputs=inputs) + self._print_timer() if step < 0: logger.warning( @@ -1931,6 +1985,7 @@ def train( self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) # argument model is not used in _maybe_log_save_evaluate, thus use None self._maybe_log_save_evaluate(rl_info, None, epoch, ignore_keys_for_eval, inputs=inputs) + self._print_timer() if self.control.should_training_stop: break @@ -2189,6 +2244,11 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: else: actor_model_in_use = self.actor_model + # state = actor_model_in_use.state_dict() + # for k in list(state.keys())[:3]: + # print(k, state[k]) + + self.timers and self.timers("actor-model-generate").start() sequences = actor_model_in_use.generate( input_ids=input_ids, attention_mask=attention_mask, @@ -2196,6 +2256,7 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: generation_config=self.generation_config, synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, )[0] + self.timers and self.timers("actor-model-generate").stop() sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) # sequences = [self.load_sing_gen_data(as_batches=False, use_counter=False)["input_ids"]] @@ -2257,23 +2318,28 @@ def post_rollout( # pipe model outputs a logits tensor with LMHead, while non-pipe model # outputs a tuple with logits tensor as the only one element. + self.timers and self.timers("actor-model-logit").start() logits = actor_model_in_use( sequence, attention_mask=attention_mask, position_ids=position_ids, # return_dict=True, ) # .logits + self.timers and self.timers("actor-model-logit").stop() if not isinstance(logits, paddle.Tensor): logits = logits[0] + self.timers and self.timers("reference-model-logit").start() ref_logits = self.reference_model( sequence, attention_mask=attention_mask, position_ids=position_ids, # return_dict=True, ) # .logits + self.timers and self.timers("reference-model-logit").stop() if not isinstance(ref_logits, paddle.Tensor): ref_logits = ref_logits[0] + self.timers and self.timers("reward-model-score").start() reward_score = self.reward_model( reward_seq, attention_mask=reward_attention_mask, @@ -2283,7 +2349,6 @@ def post_rollout( 1 ] # .end_scores - logger.error("Get Here 3.0!!") reward_value = reward_critic_model_in_use( sequence, attention_mask=attention_mask, @@ -2296,6 +2361,8 @@ def post_rollout( reward_score = reward_score.squeeze(axis=-1) reward_value = reward_value.squeeze(axis=-1) + self.timers and self.timers("reward-model-score").stop() + reward_value = reward_value[:, :-1] log_probs = gather_log_probabilities(logits[:, :-1], sequence[:, 1:]) ref_log_probs = gather_log_probabilities(ref_logits[:, :-1], sequence[:, 1:]) diff --git a/paddlenlp/trainer/plugins/timer.py b/paddlenlp/trainer/plugins/timer.py index dc2cf1245800..0f918de3ec42 100644 --- a/paddlenlp/trainer/plugins/timer.py +++ b/paddlenlp/trainer/plugins/timer.py @@ -116,6 +116,7 @@ def log(self, names, normalizer=1.0, reset=True): assert normalizer > 0.0 # string = "time (ms) / rate" string = "time (ms)" + names = sorted(list(names)) time_dict = {} for name in names: From 757d3a73047f62fb3e3242f1e62e1b6594397542 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Tue, 19 Mar 2024 14:49:05 +0800 Subject: [PATCH 32/46] fix bugs. --- examples/RLHF/ppo_trainer.py | 75 +++++++++++++++++++++------------- examples/RLHF/trainer_utils.py | 4 +- 2 files changed, 48 insertions(+), 31 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index b9a691f19271..e56f051b2ada 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -806,36 +806,53 @@ def __init__( # use trainer for reference_model/reward_model to enable sharding stage-3 # and PipelineParallel. maybe we should allow models to use different dist # strategies later - self.reference_trainer = StepTrainer( - reference_model, - criterion, - args, - data_collator, - train_dataset, - eval_dataset, - reference_tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - self.reward_trainer = StepTrainer( - reward_model, - criterion, + + from paddle.distributed.fleet.meta_parallel import PipelineLayer + + with guard_set_args( args, - data_collator, - train_dataset, - eval_dataset, - reward_tokenizer, - compute_metrics, - callbacks, - optimizers, - preprocess_logits_for_metrics, - ) - # TODO(guosheng): sharding stage3 should create master weight optionally - # instead of creation and clear. - self.reference_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps - self.reward_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps + { + "recompute": False, + "fp16_opt_level": "O1", + "pipeline_parallel_degree": args.pipeline_parallel_degree + if isinstance(reference_model, PipelineLayer) + else 1, # workaround for pipeline parallel model check + }, + ): + + self.reference_trainer = StepTrainer( + reference_model, + criterion, + copy.deepcopy(args), + data_collator, + train_dataset, + eval_dataset, + reference_tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + self.reward_trainer = StepTrainer( + reward_model, + criterion, + copy.deepcopy(args), + data_collator, + train_dataset, + eval_dataset, + reward_tokenizer, + compute_metrics, + callbacks, + optimizers, + preprocess_logits_for_metrics, + ) + # TODO(guosheng): sharding stage3 should create master weight optionally + # instead of creation and clear. + from paddlenlp.trainer.trainer_utils import ShardingOption + + if args.pipeline_parallel_degree > 1 or ShardingOption.FULL_SHARD in args.sharding: + self.reference_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps + self.reward_trainer.init_train_model_opt(100, None, clear_master_weight=True) # dummy max_steps self.reference_model.eval() self.reward_model.eval() diff --git a/examples/RLHF/trainer_utils.py b/examples/RLHF/trainer_utils.py index 43ba33be2318..c8c7b003c2c3 100644 --- a/examples/RLHF/trainer_utils.py +++ b/examples/RLHF/trainer_utils.py @@ -240,7 +240,7 @@ def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs if self.args.use_hybrid_parallel and self.args.sep_parallel_degree > 1: inputs = split_inputs_sequence_dim(inputs) - self.timers and self.timers("read-data").stop() + # self.timers and self.timers("read-data").stop() os.environ["TRAINER_GLOBAL_STEP"] = str(self.state.global_step) self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) @@ -420,7 +420,7 @@ def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs if k in final_local_vars: kwargs[k] = final_local_vars[k] return kwargs - self.timers and self.timers("read-data").start() + # self.timers and self.timers("read-data").start() final_local_vars = locals() for k in kwargs.keys(): From 1448b7357dfb2633024d23fecdb60bc42ee176de Mon Sep 17 00:00:00 2001 From: whucsgs Date: Thu, 21 Mar 2024 04:33:31 +0000 Subject: [PATCH 33/46] Add EMA and PPOMetric --- examples/RLHF/infer_utils.py | 14 +- examples/RLHF/ppo_main.py | 7 +- examples/RLHF/ppo_trainer.py | 390 ++++++++++++++++++++++++++------- examples/RLHF/trainer_utils.py | 3 +- 4 files changed, 327 insertions(+), 87 deletions(-) diff --git a/examples/RLHF/infer_utils.py b/examples/RLHF/infer_utils.py index 60f32dd2d0ab..eec3d783481e 100644 --- a/examples/RLHF/infer_utils.py +++ b/examples/RLHF/infer_utils.py @@ -96,7 +96,9 @@ def _create_param(self, *args, **kwargs): return param paddle.nn.Layer.create_parameter = _create_param - infer_model = create_infer_model(trainer.model, dtype=trainer.amp_dtype) + # trainer might use an extra model instead of trainer.model for eval + eval_model = getattr(trainer, "_inner_eval_model", None) + infer_model = create_infer_model(trainer.model if eval_model is None else eval_model, dtype=trainer.amp_dtype) paddle.nn.Layer.create_parameter = ori_creat_param # create predictor @@ -104,7 +106,7 @@ def _create_param(self, *args, **kwargs): predictor_args = parser.parse_dict( { "src_length": get_model_max_position_embeddings( # can be changed dynamically by predictor.input_length - trainer.model.config + trainer.model.config if eval_model is None else eval_model.config ), "max_length": trainer.args.max_length, "dtype": trainer.amp_dtype, @@ -231,7 +233,9 @@ def infer_guard(trainer, offload_model=True): return global policy_predictor - model = trainer.model + # trainer might use an extra model instead of trainer.model for eval + eval_model = getattr(trainer, "_inner_eval_model", None) + model = trainer.model if eval_model is None else eval_model if policy_predictor is None: policy_predictor = Predictor.create_predictor(trainer) if not policy_predictor.is_available: @@ -256,7 +260,9 @@ class InferEvalModel: """For faster generation, not support PipelineParallel yet.""" def __init__(self, trainer: Trainer): - self.model: PretrainedModel = trainer.model + # trainer might use an extra model instead of trainer.model for eval + eval_model = getattr(trainer, "_inner_eval_model", None) + self.model: PretrainedModel = trainer.model if eval_model is None else eval_model self.tokenizer: PretrainedTokenizer = trainer.tokenizer def eval(self): diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index f925fb6603e7..6b1a1811e08a 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -144,7 +144,7 @@ class TrainingArguments(TrainingArguments): ) offload_level: str = field( - default=None, + default="", metadata={"help": "Offload model, optional for: eval, reward, optimizer, train_model"}, ) @@ -269,6 +269,8 @@ def main(): "clip_range_ratio": training_args.clip_range_ratio, } else: + # non-pipe modelForCausalLM does not accept extra_args and use other ways + # (StepTrainer.create_criterion) to set hyper-parameters extra_args = {} # actor model @@ -297,6 +299,9 @@ def main(): config.tensor_parallel_degree = -1 config.tensor_parallel_rank = 0 actor_eval_model = AutoModelForCausalLM.from_config(config) + # TODO(guosheng): AutoModel (in `_get_model_class_from_config`) pop out + # architecture which is necessary for infer predictor currently + config.architectures = actor_model.config.architectures # actor_eval_model = AutoModelForCausalLM.from_pretrained(model_args.actor_model_name_or_path, config=config) else: actor_eval_model = None diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index e56f051b2ada..cb7c8eeb6cc2 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -427,13 +427,19 @@ def create_data_trans_group(global_rank, group_nums): class StepTrainer(Trainer): """ - Trainer enhanced with step-level training combining with patches of Trianer. - We can use this to do training whose step is composed of multi models via - multiple instances of StepTrainer, such as PPO. Additionally, using a mixed - loss and get the separated loss metrics is supported, which is helpful to - PipelienParallel with a mixed loss. + Features of StepTrainer: + 1. Trainer enhanced with step-level training combining with patches of + Trianer. We can use this to do training whose step is composed of multi + models via multiple instances of StepTrainer, such as PPO. + 2. Additionally, using a mixed loss and get the separated loss metrics is + supported, which is helpful to PipelienParallel with a mixed loss. + 3. EMA is supported. """ + # used to create criterion for trainer, please refer to `create_criterion` + # for details. + loss_cls: type + def __init__( self, model: Union[PretrainedModel, nn.Layer] = None, @@ -466,6 +472,12 @@ def __init__( if getattr(self, "loss_cls", None) and self.criterion is None: self.criterion = self.create_criterion() + # ablout 4s slower than infer generation without ema + self.use_ema = getattr(args, "use_ema", False) + self.shard_ema = getattr(args, "shard_ema", False) + self.offload_ema = getattr(args, "offload_ema", True) + self.ema_beta = getattr(args, "ema_beta", 0.992) + def create_criterion(self): """ create loss using `loss_cls` for trainer. It would use a wrapped loss_cls @@ -486,6 +498,12 @@ def loss_identifier(self, inputs: Dict) -> str: """ return "tr_loss" + def set_eval_model(self, model): + if model is None: + logger.warning("use None to set eval model for trainer and it would be ignored") + else: + self._inner_eval_model = model + def get_model(self, train=False): """ model visitor wrapps PipelineParalle and Inference model to do evaulation @@ -496,7 +514,10 @@ def get_model(self, train=False): model = getattr(self, "_eval_model", None) if model is not None: return model - if self.args.pipeline_parallel_degree > 1: + inner_eval_model = getattr(self, "_inner_eval_model", None) + if (self.args.pipeline_parallel_degree > 1 and inner_eval_model is None) or isinstance( + inner_eval_model, fleet.model.PipelineParallel + ): # Only accept wrapped model for pipeline_parallel mode model = PipeEvalModel(self) self._eval_model = model @@ -540,6 +561,13 @@ def get_train_step_vars(self, vars: Dict = None) -> Dict: self.train_step_vars.update(vars) return self.train_step_vars + @property + def loss_names(self): + if not hasattr(self, "_loss_names"): + self._loss_names = [var_name for var_name in self.get_train_step_vars() if var_name.endswith("_loss")] + assert len(self._loss_names) > 0 + return self._loss_names + def full_training_step(self, **inputs) -> paddle.Tensor: """ Accept any valid key word arguments of model and loss as inputs, they @@ -555,10 +583,22 @@ def full_training_step(self, **inputs) -> paddle.Tensor: train_step_vars = self.get_train_step_vars() loss_name = self.loss_identifier(inputs) loss_var = train_step_vars.get(loss_name, None) - if loss_var is None: + # trainer.train use `tr_loss` as loss var to accumulate loss. + # NOTE: `tr_loss` in trainer.train not only accumulate mean loss for + # steps in one `gradient_accumulation_steps`, but also accumulate for + # one logging intervel which may contains more than one accumulated steps. + # However, in StepTrainer we only want to use `tr_loss` to accumulate + # mean loss for steps in a `gradient_accumulation_steps` range. As for + # logging intervel loss accumulation is not take into account here and + # should be considered in outter. + if loss_var is None: # the first step of current loss type loss_var = paddle.to_tensor(0.0) train_step_vars[loss_name] = loss_var - # trainer.train use `tr_loss` as loss var + elif self.is_accumulation_step: # begin a new accumulation step intervel + for name in self.loss_names: + train_step_vars[name] = paddle.to_tensor(0.0) + loss_var = train_step_vars[loss_name] + train_step_vars["tr_loss"] = loss_var new_train_step_vars = super().full_training_step(inputs, **train_step_vars) @@ -572,6 +612,11 @@ def full_training_step(self, **inputs) -> paddle.Tensor: self.mark_step_loss(loss_name) + if self.use_ema and self.is_accumulation_step: + # TODO(guosheng): assume rollout next thus make ema weights on gpu, + # but may not, maybe need a way to specify it. + self.ema_update(beta=self.ema_beta, offload_ema=self.offload_ema, offload_model=not self.offload_ema) + return train_step_vars[loss_name] def _prepare_inputs(self, inputs: Dict[str, Union[paddle.Tensor, Any]]) -> Dict[str, Union[paddle.Tensor, Any]]: @@ -614,7 +659,7 @@ def mark_step_loss(self, loss_name): accumulated step and the loss returned at accumulated step is a mixed loss. To separate loss metrics in PipelienParallel: 1. We hack PipelineParallel._forward_step to record actual loss for each - step in a list. + step in a list (only in training and not in evaluation currently). 2. We mark the loss type only once for each step using `loss_step_indice` (dict), then wen can check out the corresponding loss metrics from the loss list. @@ -624,23 +669,24 @@ def mark_step_loss(self, loss_name): if loss_name not in self.loss_step_indice: self.loss_step_indice[loss_name] = len(self.loss_step_indice) - def get_step_loss(self, loss_prefix: str = "") -> Dict: + @paddle.no_grad() + def get_step_loss(self, loss_prefix: str = "", loss_accumulator: Dict = {}) -> Dict[str, paddle.Tensor]: """ Return a dict mapping loss name to value of current training step. This is mainly to get loss for metric logging, and it would not affect the training. This is mostly helpful to PipelienParallel with a mixed loss in which the loss returned is 0 when not reach accumulated step and the loss returned at accumulated step is a mixed loss. - NOTE: Overwrite it when we want to change the logging value. + NOTE: 1. Only when reaching accumulated step the losses returned are + accurate, and each loss is a mean loss of steps among one accumulated + steps range. """ + if not self.is_accumulation_step: + msg = "The loss returned may not be accurate when not reaching accumulated step." + logger.error(msg) model = self.get_model(train=True) - if not hasattr(self, "loss_dict"): - self.loss_dict = {} - for var_name, value in self.get_train_step_vars().items(): - if var_name.endswith("_loss"): - self.loss_dict[var_name] = value - loss_dict = {} # return a new dict because of new metric names - if isinstance(model, fleet.model.PipelineParallel) and len(self.loss_dict) > 1: + loss_dict = loss_accumulator if loss_accumulator else {} + if isinstance(model, fleet.model.PipelineParallel) and len(self.loss_names) > 1: # NOTE: PipelineParallel only returns a accumulated loss after # accumulated steps, which is a mixed loss of ppo-loss and # ptx-loss. We hack PipelineParallel._forward_step to record @@ -663,20 +709,155 @@ def get_step_loss(self, loss_prefix: str = "") -> Dict: # thus no need to broadcast. mix_loss = paddle.empty(shape=[self.args.gradient_accumulation_steps], dtype=paddle.float32) paddle.distributed.broadcast(mix_loss, src=model.pp_group.ranks[-1], group=model.pp_group) - for loss_name in self.loss_dict: + for loss_name in self.loss_names: # We assume a static order of multi-losses and mark the loss # indice only once. - value = mix_loss[self.loss_step_indice[loss_name] :: len(self.loss_dict)].mean() + value = mix_loss[self.loss_step_indice[loss_name] :: len(self.loss_names)].mean() loss_name = loss_prefix + loss_name if loss_prefix else loss_name - loss_dict[loss_name] = value + loss_dict[loss_name] = loss_dict[loss_name].add_(value) if loss_name in loss_dict else value return loss_dict + elif isinstance(model, fleet.model.PipelineParallel): + model._step_losses = None - for loss_name in self.loss_dict: + for loss_name in self.loss_names: value = self.get_train_step_vars()[loss_name] loss_name = loss_prefix + loss_name if loss_prefix else loss_name - loss_dict[loss_name] = value + loss_dict[loss_name] = loss_dict[loss_name].add_(value) if loss_name in loss_dict else value return loss_dict + @property + def is_accumulation_step(self): + """Indicate whether accumulation steps' training is done.""" + return self.get_train_step_vars()["step_control"] == 0 + + def get_sharding_master_weight_structured_names(self, model, optimizer): + rank_param_names = [p.name for p in optimizer._rank2params[optimizer._sharding_rank]] + structured_names = [] + # for pipeline model, use `model.state_dict()` would auto map param name + # for name, p in model.named_parameters(): + for name, p in model.state_dict().items(): + if p.name in rank_param_names: + structured_names.append(name) + return structured_names + + def get_master_weight_state_dict(self, model, optimizer): + if self.amp_dtype in ["float16", "bfloat16"] and hasattr(optimizer, "_master_weights"): + master_weights = dict(optimizer._master_weights) + result = {} + # for pipeline model, use `model.state_dict()` would auto map param name + # for name, p in model.named_parameters(): + for name, p in model.state_dict().items(): + if p.name in master_weights: + result[name] = master_weights[p.name] + return result + else: + return model.state_dict() + + def ema_init(self, offload_ema=True, offload_model=False, shard_ema=True): + """should be called after model and optimizer are created and wrapped""" + self.ema_state_dict = {} + self.bak_state_dict = {} + hcg = fleet.get_hybrid_communicate_group() + sharding_size = hcg.get_sharding_parallel_world_size() + # NOTE: use optimizer.master_weight instead of model.state_dict to set + # ema_state_dict would make ema coupled with master_weight reshard. + structured_names = ( + self.get_sharding_master_weight_structured_names(self.model, self.optimizer) + if sharding_size > 1 and shard_ema + else None + ) + # print("=" * 20, "structured_names", structured_names) + # for pipeline model, use `model.state_dict()` would auto map param name + # for name, p in self.model.named_parameters(): + for name, p in self.model.state_dict().items(): + if structured_names is None or name in structured_names: + ema_p = p.detach().cast(dtype=paddle.float32) + if offload_ema: + ema_p = ema_p.pin_memory() + # print("="*20, "ema name", name) + self.ema_state_dict[name] = ema_p + if offload_model: + cpu_p = p.pin_memory() + cpu_p._share_buffer_to(p) + self.bak_state_dict[name] = p + if getattr(self.model, "tie_word_embeddings", False): + raise NotImplementedError + + @paddle.no_grad() + def ema_update(self, beta=0.992, offload_ema=True, offload_model=False): + """ + This would be called automatically in `full_training_step` if `use_ema` + is True to update ema state when ending an accumulated step intervel. + """ + model_keys = list(self.ema_state_dict.keys()) + hcg = fleet.get_hybrid_communicate_group() + sharding_size = hcg.get_sharding_parallel_world_size() + trainer_state_dict = ( + self.get_master_weight_state_dict(self.model, self.optimizer) + if sharding_size > 1 and self.shard_ema + else self.model.state_dict() + ) + for key in model_keys: + if getattr(self.model, "tie_word_embeddings", False) and "lm_head" in key: + raise NotImplementedError + trainer_data = trainer_state_dict[key].cuda() + if trainer_data.dtype != paddle.float32: + # use model state dict instead of master weights + trainer_data = trainer_data.cast(dtype=paddle.float32) + ema_data = self.ema_state_dict[key].cuda() + # update ema & offload ema + ema_result = (beta * ema_data) + (1.0 - beta) * trainer_data + self.ema_state_dict[key] = ema_result.pin_memory() if offload_ema else ema_result + if offload_model: + cpu_p = trainer_data.pin_memory() + cpu_p._share_buffer_to(trainer_data) + if getattr(self.model, "tie_word_embeddings", False): + raise NotImplementedError + + def ema_apply(self): + """ + If use sharding and `shard_ema` is true, `ema_state_dict` only includes + sharded weights, thus we need the completed ema state to apply it to model + and ema would be coupled with reshard, then we need to reshard here. + """ + # TODO(guosheng): `bak_state_dict` is indeed trainer.model, allow to use + # a new model instead of trainer.model as target model. + # NOTE: if `shard_ema` is True, `ema_state_dict` is just a subset (sharded + # part) of model state_dict, and ema would coupled with reshard. + for k, v in self.bak_state_dict.items(): + # TODO(guosheng): reshard here + value = self.ema_state_dict[k].cuda().cast(dtype=v.dtype) + value._share_buffer_to(v) + + def ema_restore(self): + for k, v in self.bak_state_dict.items(): + value = v.cuda() + value._share_buffer_to(v) + if self.offload_ema: # ema weights always in pin_memory in fact + ema_v = self.ema_state_dict[k] + ema_value = ema_v.pin_memory() + ema_value._share_buffer_to(ema_v) + + +class ema(paddle.no_grad.__mro__[1]): + def __init__(self, trainer: StepTrainer): + self.trainer = trainer + + def __enter__(self): + trainer = self.trainer + if trainer.use_ema and not hasattr(trainer, "ema_state_dict"): + # call ema_init here since it should be called after model and + # optimizer are created and wrapped + trainer.ema_init( + offload_ema=trainer.offload_ema, offload_model=not trainer.offload_ema, shard_ema=trainer.shard_ema + ) + if self.trainer.use_ema: + self.trainer.ema_apply() + + def __exit__(self, *args): + if self.trainer.use_ema: + self.trainer.ema_restore() + class PolicyTrainer(StepTrainer): loss_cls = RLHFPPOMixedLoss @@ -689,17 +870,6 @@ def loss_identifier(self, inputs: Dict) -> str: loss_name = "actor_loss" return loss_name - def get_step_loss(self, loss_prefix: str = "") -> Dict: - loss_dict = super().get_step_loss(loss_prefix="") - # use_ptx would double the gradient_accumulation_steps which causes - # actor_loss and ptx_loss reduced by half. Moreover, ptx_loss should - # be divided by ptx_coeff for logging. - # TODO(guosheng): maybe should consider self._enable_delay_scale_loss() - if "ptx_loss" in loss_dict: - loss_dict[loss_prefix + "ptx_loss"] = loss_dict["ptx_loss"] * 2 / self.criterion.ptx_coeff - loss_dict[loss_prefix + "actor_loss"] = loss_dict["actor_loss"] * 2 - return loss_dict - class ValueTrainer(StepTrainer): loss_cls = RLHFValueLoss @@ -707,6 +877,73 @@ class ValueTrainer(StepTrainer): loss_identifier = lambda self, inputs: "reward_critic_loss" +class PPOMetric: + metric_names = [ + "train/" + name + for name in [ + "actor_loss", + "ptx_loss", + "reward_critic_loss", + "reward", + "kl_divergence", + "mean_generated_length", + "max_generated_length", + ] + ] + metric_ops = ["mean", "mean", "mean", "mean", "mean", "mean", "max"] + + def __init__(self, freq, use_stack=True): + self.freq = freq + self.counter = 0 + self.use_stack = use_stack + if use_stack: + self.metrics = paddle.zeros([freq, len(self.metric_names)], dtype=paddle.float32) + else: + self.metrics = [None] * len(self.metric_names) + for i in range(len(self.metrics)): + self.metrics[i] = paddle.zeros([freq], dtype=paddle.float32) + + @paddle.no_grad() + def update(self, metrics: Dict[str, paddle.Tensor]) -> Union[None, Dict[str, float]]: + """ + If has updated for`freq` times then return metrics (results reduced from + all worker) and reset metric states, otherwise return `None`. + """ + for name in self.metric_names: + # PipelineParallel broadcast loss with shape [1] + if len(metrics[name].shape) != 0: + metrics[name] = metrics[name].squeeze() + if metrics[name].dtype != paddle.float32: + metrics[name] = metrics[name].cast(paddle.float32) + if self.use_stack: + self.metrics[self.counter] = paddle.stack([metrics[name] for name in self.metric_names]) + else: + for i, name in enumerate(self.metric_names): + self.metrics[i][self.counter] = metrics[name] + if self.counter + 1 == self.freq: + from paddlenlp.trainer.utils import distributed_concat + + metrics = distributed_concat(self.metrics) + out_metrics = {} + if self.use_stack: + mean_metric = metrics.mean(0) + max_metric = metrics.max(0) + for i, (name, op) in enumerate(zip(self.metric_names, self.metric_ops)): + if op == "max": + out_metrics[name] = max_metric[i].item() if self.use_stack else metrics[i].max().item() + else: + out_metrics[name] = mean_metric[i].item() if self.use_stack else metrics[i].mean().item() + + # reset + self.counter = 0 + if self.use_stack: + self.metrics.fill_(0.0) + else: + for i, name in enumerate(self.metric_names): + self.metrics[i].fill_(0.0) + return out_metrics + + class PPOTrainer(Trainer): def __init__( self, @@ -799,6 +1036,8 @@ def __init__( optimizers, preprocess_logits_for_metrics, ) + self.policy_trainer.set_eval_model(policy_model_eval) + self.value_trainer.set_eval_model(value_model_eval) # disable inner trainers' callback/state/control self.policy_trainer.add_callback(MuteDefaultFlowCallback) self.value_trainer.add_callback(MuteDefaultFlowCallback) @@ -867,7 +1106,7 @@ def __init__( num_return_sequences=self.args.num_return_sequences, temperature=self.args.temperature, top_p=self.args.top_p, - # top_k=self.args.top_k, + # top_k=0, # to disable top_k sampling, default is 50 repetition_penalty=self.args.repetition_penalty, do_sample=True, # allow generation output to contain input @@ -879,6 +1118,7 @@ def __init__( # Those value can be changed self.kl_coeff = self.args.kl_coeff self.clip_range_score = self.args.clip_range_score + self.ptx_coeff = self.args.ptx_coeff self.gamma = 1.0 self.gae_lambda = 0.95 @@ -996,9 +1236,20 @@ def evaluation_loop( ) self._eval_out_file = open(eval_out_file, "w") - output = super().evaluation_loop( - dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix, max_eval_iters - ) + # TODO(guosheng): use _inner_eval_model (if trainer has one) instead of + # original trainer model to eval, especially when using sharded EMA + # NOTE: use here rather than in prediction_step since actor_model would + # be set to eval out of prediction_step + with guard_set_args( + self.policy_trainer, # disable _inner_eval_model + { + "_eval_model": None, # otherwise would use cached _eval_model + "_inner_eval_model": None, # otherwise would use _inner_eval_model to create _eval_model + }, + ): + output = super().evaluation_loop( + dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix, max_eval_iters + ) output.metrics[f"{metric_key_prefix}/reward"] = output.metrics.pop(f"{metric_key_prefix}_loss") columns = ["Prompt", "Generated", "Reward"] @@ -1055,6 +1306,8 @@ def get_epoch_iterator(self): num_ptx_batches = len(self.ptx_dataloader) num_ptx_replicas = (num_prompt_only_batches + num_ptx_batches - 1) // num_ptx_batches + @ema(self.policy_trainer) + @ema(self.value_trainer) def gen_epoch_data(): for prompt_only_batch, ptx_batch in zip( self.prompt_only_dataloader, @@ -1206,7 +1459,7 @@ def is_step_end(self): # reach accumulation_steps, value trainer has the same step_control and # gradient_accumulation_steps as PPO trainer. # if (step_control + 1) % args.gradient_accumulation_steps == 0 - return self.value_trainer.get_train_step_vars()["step_control"] == 0 + return self.value_trainer.is_accumulation_step def get_step_loss(self, loss_prefix: str = "") -> Dict: rl_loss = self.policy_trainer.get_step_loss(loss_prefix) @@ -1287,6 +1540,7 @@ def train( self.control = self.callback_handler.on_train_begin(args, self.state, self.control) self._globalstep_last_logged = self.state.global_step + metric = PPOMetric(freq=self.args.logging_steps) start_time = time.time() self._globalstep_last_start_time = start_time @@ -1340,6 +1594,7 @@ def train( self.state.epoch = epoch + (step + 1) / steps_in_epoch if self.is_step_end(): rl_info.update(self.get_step_loss(loss_prefix="train/")) + rl_info = metric.update(rl_info) # on_step_end self.control = self.callback_handler.on_step_end(args, self.state, self.control) else: @@ -1369,21 +1624,16 @@ def _maybe_log_save_evaluate(self, tr_loss, model, epoch, ignore_keys_for_eval, if self.control.should_log: logs: Dict[str, float] = {} - - for k, v in tr_loss.items(): - if isinstance(v, paddle.Tensor) and "lr" not in k and "max" not in k: - v_scalar = self._nested_gather(v).mean().item() - logs[k] = round(v_scalar / (self.state.global_step - self._globalstep_last_logged), 8) - v.subtract_(v) - attr_name = "_total_" + k.split("/")[-1] + "_scalar" - attr_value = getattr(self, attr_name, 0) - setattr(self, attr_name, attr_value + v_scalar) - elif isinstance(v, paddle.Tensor) and "max" in k: - v_scalar = self._nested_gather(v).max().item() - logs[k] = v_scalar - else: - logs[k] = float("{0:.3e}".format(v)) + # use_ptx would double the gradient_accumulation_steps which causes + # actor_loss and ptx_loss reduced by half. Moreover, ptx_loss should + # be divided by ptx_coeff for logging. + if "train/ptx_loss" in tr_loss: + tr_loss["train/actor_loss"] = tr_loss["train/actor_loss"] * 2 + tr_loss["train/ptx_loss"] = tr_loss["train/ptx_loss"] * 2 / self.ptx_coeff + logs.update(tr_loss) logs["global_step"] = int(self.state.global_step) + logs["train/actor_lr"] = float("{0:.3e}".format(self.policy_trainer._get_learning_rate())) + logs["train/reward_critic_lr"] = float("{0:.3e}".format(self.value_trainer._get_learning_rate())) total_train_batch_size = ( self.args.train_batch_size * self.args.gradient_accumulation_steps * self.args.dataset_world_size @@ -1422,6 +1672,7 @@ def add_kl_divergence_regularization( min=-self.clip_range_score, max=self.clip_range_score, ) + # TODO(guosheng): use scatter_add/put_along_axis batch_size = log_probs.shape[0] for i in range(batch_size): end_index = sequence_mask[i].nonzero()[-1] @@ -1520,8 +1771,6 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: "train/kl_divergence": kl_divergence, "train/mean_generated_length": mean_generated_length, "train/max_generated_length": max_generated_length, - "train/actor_lr": self.policy_trainer._get_learning_rate(), - "train/reward_critic_lr": self.value_trainer._get_learning_rate(), } def ptx_step(self, ptx_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: @@ -1579,19 +1828,9 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: # NOTE: generation output of paddlenlp do not contain prompt, we should # change sequences here. - # todo, fixme zhui, self.actor_model.generate - if self._policy_model_eval is not None: - actor_model_in_use = self._policy_model_eval - else: - actor_model_in_use = self.actor_model - - # state = actor_model_in_use.state_dict() - # for k in list(state.keys())[:3]: - # print(k, state[k]) - self.timers and self.timers("actor-model-generate").start() with infer_guard(self.policy_trainer): - sequences = actor_model_in_use.generate( + sequences = self.actor_model.generate( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, @@ -1637,8 +1876,8 @@ def post_rollout( reward_attention_mask = reward_tokenize_output["attention_mask"] else: # actor_model_in_use gen - for text in self.tokenizer.batch_decode(sequence, skip_special_tokens=True): - print(text) + # for text in self.tokenizer.batch_decode(sequence, skip_special_tokens=True): + # print(text) reward_seq = sequence reward_attention_mask = attention_mask # position_ids is necessary for non-right padding @@ -1647,21 +1886,10 @@ def post_rollout( # (number of 0s) in source to calculate end offsets. position_ids = make_position_ids(attention_mask) - # todo, fixme zhui, self.actor_model forward - if self._policy_model_eval is not None: - actor_model_in_use = self._policy_model_eval - else: - actor_model_in_use = self.actor_model - - if self._value_model_eval is not None: - reward_critic_model_in_use = self._value_model_eval - else: - reward_critic_model_in_use = self.reward_critic_model - # pipe model outputs a logits tensor with LMHead, while non-pipe model # outputs a tuple with logits tensor as the only one element. self.timers and self.timers("actor-model-logit").start() - logits = actor_model_in_use( + logits = self.actor_model( sequence, attention_mask=attention_mask, position_ids=position_ids, @@ -1691,7 +1919,7 @@ def post_rollout( 1 ] # .end_scores - reward_value = reward_critic_model_in_use( + reward_value = self.reward_critic_model( sequence, attention_mask=attention_mask, position_ids=position_ids, diff --git a/examples/RLHF/trainer_utils.py b/examples/RLHF/trainer_utils.py index c8c7b003c2c3..a8858cf0b85a 100644 --- a/examples/RLHF/trainer_utils.py +++ b/examples/RLHF/trainer_utils.py @@ -469,7 +469,8 @@ class PipeEvalModel(GenerationMixin): """ def __init__(self, trainer: Trainer): - self.model: fleet.model.PipelineParallel = trainer.model_wrapped + eval_model = getattr(trainer, "_inner_eval_model", None) + self.model: fleet.model.PipelineParallel = trainer.model_wrapped if eval_model is None else eval_model self.config: PretrainedConfig = trainer.model.config self._is_gen = False self.update_model_kwargs_for_generation = ( From b80963110525d0e9452de77b60b2683322902c27 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Thu, 21 Mar 2024 19:04:13 +0800 Subject: [PATCH 34/46] add tests --- examples/RLHF/ppo_config.json | 12 ++-- examples/RLHF/tests/run_model.py | 106 +++++++++++++++++++++++++++++ examples/RLHF/tests/test_export.py | 57 ++++++++++++++++ 3 files changed, 171 insertions(+), 4 deletions(-) create mode 100644 examples/RLHF/tests/run_model.py create mode 100644 examples/RLHF/tests/test_export.py diff --git a/examples/RLHF/ppo_config.json b/examples/RLHF/ppo_config.json index 19539bc452cc..bd6da3cbde18 100644 --- a/examples/RLHF/ppo_config.json +++ b/examples/RLHF/ppo_config.json @@ -13,8 +13,8 @@ "repetition_penalty": 1.0, "num_train_epochs": 1, "update_iters": 1, - "per_device_prompt_batch_size": 16, - "per_device_train_batch_size": 16, + "per_device_prompt_batch_size": 64, + "per_device_train_batch_size": 64, "gradient_accumulation_steps": 1, "learning_rate": 1e-5, "weight_decay": 0.01, @@ -32,7 +32,7 @@ "clip_range_score": 50.0, "clip_range_value": 5.0, "ptx_coeff": 16.0, - "per_device_eval_batch_size": 16, + "per_device_eval_batch_size": 64, "logging_steps": 1, "evaluation_strategy": "steps", "eval_steps": 100, @@ -53,6 +53,10 @@ "max_grad_norm": 1.0, "adam_beta1": 0.9, "adam_beta2": 0.95, + "overwrite_output_dir": 1, + "skip_profile_timer": 0, + "skip_memory_metric": 0, + "dataloader_drop_last": true, "eval_mode": "tensor_parallel", - "offload_level": "eval" + "offload_level": "eval optimizer" } diff --git a/examples/RLHF/tests/run_model.py b/examples/RLHF/tests/run_model.py new file mode 100644 index 000000000000..1377a4f592bd --- /dev/null +++ b/examples/RLHF/tests/run_model.py @@ -0,0 +1,106 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +from dataclasses import dataclass, field + +import numpy +import paddle +from paddle.distributed import fleet +from ppo_trainer import Trainer, data_group_merge, data_group_split + +from paddlenlp.trainer import PdArgumentParser, TrainingArguments +from paddlenlp.transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoModelForCausalLMPipe, +) + + +@dataclass +class ModelArgument: + model_name_or_path: str = field( + default=None, metadata={"help": "Build-in pretrained model name or the path to local model."} + ) + + +def main(): + # Arguments + parser = PdArgumentParser((ModelArgument, TrainingArguments)) + model_args, training_args = parser.parse_args_into_dataclasses() + + model_config = AutoConfig.from_pretrained( + model_args.model_name_or_path, + tensor_parallel_output=False, + tensor_parallel_degree=training_args.tensor_parallel_degree, + tensor_parallel_rank=training_args.tensor_parallel_rank, + dtype="float32", + ) + + model_class = AutoModelForCausalLM + if training_args.pipeline_parallel_degree > 1: + model_class = AutoModelForCausalLMPipe + + actor_model = model_class.from_pretrained( + model_args.model_name_or_path, + config=model_config, + ) + + if True: # test export_evaluate_model + # 随机初始化 + config = copy.deepcopy(model_config) + if training_args.pipeline_parallel_degree <= 1: + config.tensor_parallel_degree = -1 + config.tensor_parallel_rank = 0 + + actor_eval_model = AutoModelForCausalLM.from_config(config) + # ground truth模型 + actor_gt_model = AutoModelForCausalLM.from_pretrained(model_args.model_name_or_path, config=config) + + trainer = Trainer( + model=actor_model, + args=training_args, + ) + trainer.export_evaluate_model(actor_model, actor_eval_model) + + gp_state = actor_gt_model.state_dict() + export_state = actor_eval_model.state_dict() + + for k, v in gp_state.items(): + assert ( + v._md5sum() == export_state[k]._md5sum() + ), f"{k} groud_truth: {v.shape}, export: {export_state[k].shape}" + + hcg = fleet.get_hybrid_communicate_group() + pp_group = hcg.get_pipe_parallel_group() + tp_group = hcg.get_model_parallel_group() + + split_group = tp_group + if training_args.pipeline_parallel_degree > 1: + split_group = pp_group + + input_ids = paddle.randint(low=1, high=50, shape=[8, 64]) + paddle.distributed.broadcast(input_ids, src=0) + + split_input_ids = data_group_split(input_ids, group=split_group) + ret = actor_eval_model(input_ids=split_input_ids, return_dict=True) + eval_loggits = data_group_merge(ret.logits, group=split_group) + + gt_ret = actor_gt_model(input_ids=input_ids, return_dict=True) + gt_loggits = gt_ret.logits + numpy.testing.assert_almost_equal(eval_loggits.numpy(), gt_loggits.numpy(), decimal=5) + + +if __name__ == "__main__": + main() diff --git a/examples/RLHF/tests/test_export.py b/examples/RLHF/tests/test_export.py new file mode 100644 index 000000000000..bde1d05c33ea --- /dev/null +++ b/examples/RLHF/tests/test_export.py @@ -0,0 +1,57 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from tests.parallel_launch import TestMultipleGpus + +environment_variables = { + "NCCL_ALGO": "Tree", + "NVIDIA_TF32_OVERRIDE": "0", + "NCCL_IB_TIMEOUT": "22", + "NCCL_DEBUG": "INFO", + "FLAGS_embedding_deterministic": "1", + "FLAGS_cudnn_deterministic": "1", + "Flags_mp_aysnc_allreduce": "1", + "Flags_skip_mp_c_identity": "1", + "FLAGS_shard_norm_align_dp": "0", + "FLAGS_shard_use_reduce": "1", + "test_ci_no_save_model": "1", +} + + +class TestExportEvalModel(TestMultipleGpus): + def setUp(self): + os.environ.update(environment_variables) + super().setUp() + + def test_pptp_to_tp(self): + config = { + "output_dir": "./tmp", + "model_name_or_path": "__internal_testing__/tiny-random-llama", + "tensor_parallel_degree": 2, + "pipeline_parallel_degree": 2, + } + scripts = "tests/run_model.py" + self.run_4gpu(scripts, **config) + + def test_tp_to_single(self): + config = { + "output_dir": "./tmp", + "model_name_or_path": "__internal_testing__/tiny-random-llama", + "tensor_parallel_degree": 2, + "pipeline_parallel_degree": 1, + } + scripts = "tests/run_model.py" + self.run_2gpu(scripts, **config) From 2f8d0327cc34131056a9834c90a2b5679c83bbdb Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Fri, 22 Mar 2024 14:59:48 +0800 Subject: [PATCH 35/46] add unit test for rank guard. --- examples/RLHF/ppo_trainer.py | 18 ++++++++++++++++ examples/RLHF/tests/run_model.py | 29 +++++++++++++++++++++----- examples/RLHF/tests/test_export.py | 11 ++++++++++ paddlenlp/trainer/utils/helper.py | 33 ++++++++++++++++++++++++++++++ 4 files changed, 86 insertions(+), 5 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index e56f051b2ada..e63a432aab10 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -58,6 +58,7 @@ logger, speed_metrics, ) +from paddlenlp.trainer.utils.helper import nested_broadcast_tensor_with_empty from paddlenlp.utils.distributed import distributed_gather global_dev_id = 0 if paddle.get_device() == "cpu" else int(paddle.get_device().split(":")[1]) @@ -138,6 +139,23 @@ def data_group_merge(tensors, group): return tensors +def group_rank_guard(group, rank=0): + def decorator(func): + def wrapper_func(*args, **kwargs): + if group.rank == rank: + ret = func(*args, **kwargs) + dist.barrier() + else: + ret = None + dist.barrier() + ret = nested_broadcast_tensor_with_empty(ret, group=group) + return ret + + return wrapper_func + + return decorator + + def repad_rl_batches(batches, input_lengths): if "position_ids" in batches: v = batches["position_ids"] diff --git a/examples/RLHF/tests/run_model.py b/examples/RLHF/tests/run_model.py index 1377a4f592bd..19bb2176d5e2 100644 --- a/examples/RLHF/tests/run_model.py +++ b/examples/RLHF/tests/run_model.py @@ -18,7 +18,7 @@ import numpy import paddle from paddle.distributed import fleet -from ppo_trainer import Trainer, data_group_merge, data_group_split +from ppo_trainer import Trainer, data_group_merge, data_group_split, group_rank_guard from paddlenlp.trainer import PdArgumentParser, TrainingArguments from paddlenlp.transformers import ( @@ -33,6 +33,21 @@ class ModelArgument: model_name_or_path: str = field( default=None, metadata={"help": "Build-in pretrained model name or the path to local model."} ) + test_mode: str = field(default="export", metadata={"help": "export data_split or rank_guard."}) + + +def test_group_rank_guard(group): + @group_rank_guard(group=group, rank=0) + def func(): + tensor = paddle.randn([4, 64]) + return tensor + + t = func() + ret = [] + paddle.distributed.stream.all_gather(ret, t, group=group) + + for x in ret: + assert x._md5sum() == t._md5sum(), f"{x} {t}" def main(): @@ -40,6 +55,14 @@ def main(): parser = PdArgumentParser((ModelArgument, TrainingArguments)) model_args, training_args = parser.parse_args_into_dataclasses() + hcg = fleet.get_hybrid_communicate_group() + pp_group = hcg.get_pipe_parallel_group() + tp_group = hcg.get_model_parallel_group() + + if model_args.test_mode == "rank_guard": + test_group_rank_guard(tp_group) + return 0 + model_config = AutoConfig.from_pretrained( model_args.model_name_or_path, tensor_parallel_output=False, @@ -82,10 +105,6 @@ def main(): v._md5sum() == export_state[k]._md5sum() ), f"{k} groud_truth: {v.shape}, export: {export_state[k].shape}" - hcg = fleet.get_hybrid_communicate_group() - pp_group = hcg.get_pipe_parallel_group() - tp_group = hcg.get_model_parallel_group() - split_group = tp_group if training_args.pipeline_parallel_degree > 1: split_group = pp_group diff --git a/examples/RLHF/tests/test_export.py b/examples/RLHF/tests/test_export.py index bde1d05c33ea..6254427e89cd 100644 --- a/examples/RLHF/tests/test_export.py +++ b/examples/RLHF/tests/test_export.py @@ -55,3 +55,14 @@ def test_tp_to_single(self): } scripts = "tests/run_model.py" self.run_2gpu(scripts, **config) + + def test_group_rank_guard(self): + config = { + "output_dir": "./tmp", + "model_name_or_path": "__internal_testing__/tiny-random-llama", + "tensor_parallel_degree": 2, + "pipeline_parallel_degree": 1, + "test_mode": "rank_guard", + } + scripts = "tests/run_model.py" + self.run_2gpu(scripts, **config) diff --git a/paddlenlp/trainer/utils/helper.py b/paddlenlp/trainer/utils/helper.py index ff68e51f127b..8d2957d7a650 100644 --- a/paddlenlp/trainer/utils/helper.py +++ b/paddlenlp/trainer/utils/helper.py @@ -226,6 +226,39 @@ def nested_broadcast_tensor(tensor, src=0, group=None): return tensor +def nested_broadcast_tensor_with_empty(tensor, src=0, group=None): + # src should src rank in the group, not global rank. + process_rank = paddle.distributed.get_rank() + + if group is not None: + src_rank = group.ranks[src] + if process_rank == src_rank: + if tensor is None: + logger.warning( + f"Your local rank {paddle.distributed.get_rank()} must have a state_dict. dp_rank:{process_rank}, src_rank:{src_rank}" + ) + fake_tensor = [nested_reduce_tensor(tensor)] + else: + if tensor is not None: + logger.warning( + f"Your local rank {paddle.distributed.get_rank()} are forbidden to have a state_dict. dp_rank:{process_rank}, src_rank:{src_rank}" + ) + fake_tensor = [None] + + paddle.distributed.broadcast_object_list( + fake_tensor, + src=src_rank, + group=group, + ) + fake_tensor = fake_tensor[0] + + if process_rank != src_rank: + tensor = nested_empty_tensor(fake_tensor) + + tensor = nested_broadcast_tensor(tensor, src=src_rank, group=group) + return tensor + + def broadcast_dp_optimizer(state_dict): if paddle.distributed.get_world_size() <= 1: return state_dict From fbb9ac3e9eef4cfd32dfc68b3db72bf46883258b Mon Sep 17 00:00:00 2001 From: whucsgs Date: Mon, 25 Mar 2024 02:21:52 +0000 Subject: [PATCH 36/46] Fix reshard zero3 and reshard infer. --- examples/RLHF/infer_utils.py | 43 ++++++++-- examples/RLHF/ppo_trainer.py | 160 +++++++++++++++++------------------ 2 files changed, 117 insertions(+), 86 deletions(-) diff --git a/examples/RLHF/infer_utils.py b/examples/RLHF/infer_utils.py index eec3d783481e..b11d27b0a432 100644 --- a/examples/RLHF/infer_utils.py +++ b/examples/RLHF/infer_utils.py @@ -126,6 +126,8 @@ def _create_caches(self): """inputs can be reused among multiple predictions, such as cache""" if hasattr(self, "cache_kvs_shape"): # has created cache input_length = getattr(self, "input_length", 0) + # TODO(guosheng): better way to get history max cahce length, we can + # not get cahce length form cache tensor when not know cache layout if input_length <= self.config.src_length: # reuse cahce return else: # create longer cache @@ -192,7 +194,13 @@ def set_state_dict(self, model, offload_model=True): def _preprocess(self, source): # make cache when infer happens to get actual shape to save memory self._create_caches() - return self._inputs_processer(source) + with guard_set_args(self.config, {"src_length": getattr(self, "input_length", self.config.src_length)}): + inputs = self._inputs_processer(source) + # We want to use a defined input_length to create cache and input_ids. + # However predictor could not use a specified length to pad currently. + # Thus we use this way to let get the actual input length. + self.infer_input_length = inputs["input_ids"].shape[-1] + return inputs @paddle.no_grad() def _infer(self, inputs): @@ -225,6 +233,17 @@ def predict(self, input_texts: str | list[str]): @contextmanager def infer_guard(trainer, offload_model=True): + # trainer might use an extra model instead of trainer.model for eval + eval_model = getattr(trainer, "_inner_eval_model", None) + model = trainer.model if eval_model is None else eval_model + + # PipelineParallel does not support inference speedup + if not getattr(trainer, "use_fusemt", False) or isinstance( + model, (dist.fleet.meta_parallel.PipelineLayer, dist.fleet.model.PipelineParallel) + ): + yield + return + try: try_import("paddlenlp_ops") except: @@ -233,9 +252,6 @@ def infer_guard(trainer, offload_model=True): return global policy_predictor - # trainer might use an extra model instead of trainer.model for eval - eval_model = getattr(trainer, "_inner_eval_model", None) - model = trainer.model if eval_model is None else eval_model if policy_predictor is None: policy_predictor = Predictor.create_predictor(trainer) if not policy_predictor.is_available: @@ -301,9 +317,26 @@ def generate(self, *args, **kwargs): # "chat_template": None }, ): + # NOTE: right padding in predictor according to prompt might have a + # different length with input_ids, espically when input_ids has more + # paddings than the necessary. Thus pass input_length to predictor to: + # 1. use a consistent length to replace input_ids back to output to + # keep the same padding format. however predictor could not use a + # specified length to pad currently + # 2. allow to use a dynamic length for memory efficiency (by a smaller + # cache) policy_predictor.input_length = input_ids.shape[-1] outputs = policy_predictor.predict(prompts) - outputs = (outputs[0][:, input_ids.shape[-1] :],) if generation_config.trunc_input else (outputs[0],) + + if generation_config.trunc_input: + outputs = (outputs[0][:, policy_predictor.infer_input_length :],) + return outputs + + if policy_predictor.input_length != policy_predictor.infer_input_length: + outputs = (paddle.concat([input_ids, outputs[0][:, policy_predictor.infer_input_length :]], axis=-1),) + return outputs + + outputs = (outputs[0],) if self.tokenizer.padding_side == "left": # convert back to left padding inputs outputs[0][:, : input_ids.shape[-1]] = input_ids diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index cb7c8eeb6cc2..da5a8fcf5710 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -82,7 +82,7 @@ def reload_tensor_to_gpu(tensors): for _, v in tensors.items(): reload_tensor_to_gpu(v) elif isinstance(tensors, paddle.Tensor): - if not tensors.place.is_gpu_place(): + if tensors._is_initialized() and not tensors.place.is_gpu_place(): gpu_tensor = tensors._copy_to(paddle.CUDAPlace(global_dev_id), False) tensors.value().get_tensor()._share_data_with(gpu_tensor.value().get_tensor()) else: @@ -472,6 +472,7 @@ def __init__( if getattr(self, "loss_cls", None) and self.criterion is None: self.criterion = self.create_criterion() + self.use_fusemt = getattr(args, "use_fusemt", False) # ablout 4s slower than infer generation without ema self.use_ema = getattr(args, "use_ema", False) self.shard_ema = getattr(args, "shard_ema", False) @@ -766,7 +767,6 @@ def ema_init(self, offload_ema=True, offload_model=False, shard_ema=True): if sharding_size > 1 and shard_ema else None ) - # print("=" * 20, "structured_names", structured_names) # for pipeline model, use `model.state_dict()` would auto map param name # for name, p in self.model.named_parameters(): for name, p in self.model.state_dict().items(): @@ -774,7 +774,6 @@ def ema_init(self, offload_ema=True, offload_model=False, shard_ema=True): ema_p = p.detach().cast(dtype=paddle.float32) if offload_ema: ema_p = ema_p.pin_memory() - # print("="*20, "ema name", name) self.ema_state_dict[name] = ema_p if offload_model: cpu_p = p.pin_memory() @@ -1306,8 +1305,6 @@ def get_epoch_iterator(self): num_ptx_batches = len(self.ptx_dataloader) num_ptx_replicas = (num_prompt_only_batches + num_ptx_batches - 1) // num_ptx_batches - @ema(self.policy_trainer) - @ema(self.value_trainer) def gen_epoch_data(): for prompt_only_batch, ptx_batch in zip( self.prompt_only_dataloader, @@ -1317,63 +1314,65 @@ def gen_epoch_data(): self.set_eval() # self.optimizer.offload() - if self.args.eval_mode is not None and "optimizer" in self.args.offload_level: - self.timers and self.timers("offload-optimizer").start() - offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) - offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) - self.timers and self.timers("offload-optimizer").stop() - - self.timers and self.timers("export-evaluate-model").start() + # if self.args.eval_mode is not None and "optimizer" in self.args.offload_level: + # self.timers and self.timers("offload-optimizer").start() + # offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) + # offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) + # self.timers and self.timers("offload-optimizer").stop() + + with ema(self.policy_trainer), ema(self.value_trainer): + if self.args.eval_mode is not None: + self.timers and self.timers("export-evaluate-model").start() + + self.policy_trainer.export_evaluate_model( + self.policy_trainer.model, + self._policy_model_eval, + with_offload="train_model" in self.args.offload_level, + ) + self.value_trainer.export_evaluate_model( + self.value_trainer.model, + self._value_model_eval, + with_offload="train_model" in self.args.offload_level, + ) + gp = ( + self.policy_trainer._policy_model_eval_group + if hasattr(self.policy_trainer, "_policy_model_eval_group") + else None + ) + # gp = create_data_trans_group(self.args.logical_process_index, paddle.distributed.get_rank(), self._policy_model_eval.config.tensor_parallel_degree) + # # todo: zhui + self.timers and self.timers("export-evaluate-model").stop() - self.policy_trainer.export_evaluate_model( - self.policy_trainer.model, - self._policy_model_eval, - with_offload="train_model" in self.args.offload_level, - ) - gp = ( - self.policy_trainer._policy_model_eval_group - if hasattr(self.policy_trainer, "_policy_model_eval_group") - else None - ) - # gp = create_data_trans_group(self.args.logical_process_index, paddle.distributed.get_rank(), self._policy_model_eval.config.tensor_parallel_degree) - # # todo: zhui - self.value_trainer.export_evaluate_model( - self.value_trainer.model, - self._value_model_eval, - with_offload="train_model" in self.args.offload_level, - ) - self.timers and self.timers("export-evaluate-model").stop() - - # self.reference_model.reload() - # self.reward_model.reload() - if "reward" in self.args.offload_level: - self.timers and self.timers("reload-reward").start() - reload_tensor_to_gpu(self.reference_model.state_dict()) - reload_tensor_to_gpu(self.reward_model.state_dict()) - self.timers and self.timers("reload-reward").stop() - - # todo, split prompt_only_batch - # pp2tp2dp2 -> dp4tp2 prompt_only_batch - self.timers and self.timers("resplit-data").start() - prompt_only_batch = data_group_split(prompt_only_batch, group=gp) - self.timers and self.timers("resplit-data").stop() - - self.timers and self.timers("split-rl-micro-batches").start() - # 生成数据 - # per_train 4, accu 8 - # prompt 32 - - # 32? [4,4,4,4,4,4,4] - rl_batches = self.split_rl_micro_batches(prompt_only_batch) - # rl_batches = self.load_sing_gen_data(as_batches=True, - # use_counter=True) - self.timers and self.timers("split-rl-micro-batches").stop() + # self.reference_model.reload() + # self.reward_model.reload() + if "reward" in self.args.offload_level: + self.timers and self.timers("reload-reward").start() + reload_tensor_to_gpu(self.reference_model.state_dict()) + reload_tensor_to_gpu(self.reward_model.state_dict()) + self.timers and self.timers("reload-reward").stop() + + # todo, split prompt_only_batch + # pp2tp2dp2 -> dp4tp2 prompt_only_batch + self.timers and self.timers("resplit-data").start() + prompt_only_batch = data_group_split(prompt_only_batch, group=gp) + self.timers and self.timers("resplit-data").stop() + + self.timers and self.timers("split-rl-micro-batches").start() + # 生成数据 + # per_train 4, accu 8 + # prompt 32 + + # 32? [4,4,4,4,4,4,4] + rl_batches = self.split_rl_micro_batches(prompt_only_batch) + # rl_batches = self.load_sing_gen_data(as_batches=True, + # use_counter=True) + self.timers and self.timers("split-rl-micro-batches").stop() self.timers and self.timers("ptx-batch").start() if self.use_ptx: - ptx_batch = data_group_split(ptx_batch, group=gp) + # ptx_batch = data_group_split(ptx_batch, group=gp) ptx_batches = self.split_ptx_micro_batches(ptx_batch) - ptx_batches = data_group_merge(ptx_batches, group=gp) + # ptx_batches = data_group_merge(ptx_batches, group=gp) else: ptx_batches = [None for _ in range(len(rl_batches))] @@ -1399,7 +1398,7 @@ def gen_epoch_data(): # policy_model_eval.cleanup() # value_model_eval.cleanup() if self.args.offload_level is not None: - if "eval" in self.args.offload_level: + if self.args.eval_mode is not None and "eval" in self.args.offload_level: self.timers and self.timers("offload-eval").start() cleanup_tensor_space(self._policy_model_eval.state_dict()) cleanup_tensor_space(self._value_model_eval.state_dict()) @@ -1805,18 +1804,21 @@ def split_rl_micro_batches( total_batch_size = prompt_only_batch["input_ids"].shape[0] micro_batch_size = self.args.per_device_train_batch_size micro_batches = [] - for i in range(0, total_batch_size, micro_batch_size): - micro_batch = {} - micro_batch = map_structure( - lambda tensor: tensor[i : i + micro_batch_size], - prompt_only_batch, - ) - micro_batches.extend(self.rollout(micro_batch)) - + with infer_guard(self.policy_trainer): + for i in range(0, total_batch_size, micro_batch_size): + micro_batch = {} + micro_batch = map_structure( + lambda tensor: tensor[i : i + micro_batch_size], + prompt_only_batch, + ) + micro_batches.extend(self.generate(micro_batch)) + # size of micro_batches (num of training batch) would be: + # per_device_prompt_batch_size * num_return_sequences // per_device_train_batch_size + micro_batches = [self.post_rollout(*micro_batch) for micro_batch in micro_batches] return micro_batches @paddle.no_grad() - def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: + def generate(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: """Rollout a batch of experiences.""" input_ids = prompt_only_batch["input_ids"] attention_mask = prompt_only_batch["attention_mask"] @@ -1829,27 +1831,23 @@ def rollout(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: # change sequences here. self.timers and self.timers("actor-model-generate").start() - with infer_guard(self.policy_trainer): - sequences = self.actor_model.generate( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - generation_config=self.generation_config, - synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, - )[0] + sequences = self.actor_model.generate( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + generation_config=self.generation_config, + synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, + )[0] self.timers and self.timers("actor-model-generate").stop() sequences = sequences.reshape([input_ids.shape[0], self.args.num_return_sequences, -1]).transpose([1, 0, 2]) + # prompt, sequence, attention_mask return [ - # TODO(guosheng): move post_rollout out to split_rl_micro_batches - # to allow infer model generate multi times consecutively and then - # convert weights, otherwise we have to convert weights multi times - # when need multi batch rollout data. - self.post_rollout( + ( input_ids, seq, - attention_mask=paddle.logical_and( + paddle.logical_and( seq != self.tokenizer.pad_token_id, seq != self.tokenizer.unk_token_id, ), From cb6e4ff23fa0ac69001575fc0e96e5d5a397ab95 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Tue, 26 Mar 2024 05:22:20 +0000 Subject: [PATCH 37/46] Revert #7818 for llama and remove position_ids for gen/train/eval to align. --- examples/RLHF/ppo_config.json | 28 ++++----- examples/RLHF/ppo_trainer.py | 79 +++++++++--------------- paddlenlp/generation/utils.py | 28 ++++----- paddlenlp/transformers/llama/modeling.py | 5 +- 4 files changed, 61 insertions(+), 79 deletions(-) diff --git a/examples/RLHF/ppo_config.json b/examples/RLHF/ppo_config.json index bd6da3cbde18..96f0e26f0be5 100644 --- a/examples/RLHF/ppo_config.json +++ b/examples/RLHF/ppo_config.json @@ -6,15 +6,15 @@ "reward_model_name_or_path": "PKU-Alignment/beaver-7b-v1.0-reward", "_actor_model_name_or_path": "facebook/llama-7b", "_reward_model_name_or_path": "facebook/llama-7b", - "output_dir": "./ppo-sd14pp2-test", + "output_dir": "/root/paddlejob/workspace/guosheng/ckpts/ppo-reshard-sd38", "max_length": 512, "temperature": 1.0, "num_return_sequences":1, "repetition_penalty": 1.0, "num_train_epochs": 1, "update_iters": 1, - "per_device_prompt_batch_size": 64, - "per_device_train_batch_size": 64, + "per_device_prompt_batch_size": 16, + "per_device_train_batch_size": 16, "gradient_accumulation_steps": 1, "learning_rate": 1e-5, "weight_decay": 0.01, @@ -32,7 +32,7 @@ "clip_range_score": 50.0, "clip_range_value": 5.0, "ptx_coeff": 16.0, - "per_device_eval_batch_size": 64, + "per_device_eval_batch_size": 16, "logging_steps": 1, "evaluation_strategy": "steps", "eval_steps": 100, @@ -44,19 +44,19 @@ "do_eval": true, "disable_tqdm": true, "save_total_limit": 1, - "sharding_parallel_degree": 2, - "sharding": "stage1", - "tensor_parallel_degree": 2, - "pipeline_parallel_degree": 2, + "sharding_parallel_degree": 8, + "sharding": "stage3", + "tensor_parallel_degree": 1, + "pipeline_parallel_degree": 1, "pipeline_parallel_config": "disable_p2p_cache_shape", "comment-PKU_Beaver-max_grad_norm": 1.0, "max_grad_norm": 1.0, "adam_beta1": 0.9, "adam_beta2": 0.95, - "overwrite_output_dir": 1, - "skip_profile_timer": 0, - "skip_memory_metric": 0, - "dataloader_drop_last": true, - "eval_mode": "tensor_parallel", - "offload_level": "eval optimizer" + "_overwrite_output_dir": 1, + "_skip_profile_timer": 0, + "_skip_memory_metric": 0, + "_dataloader_drop_last": true, + "_eval_mode": "tensor_parallel", + "_offload_level": "eval optimizer" } diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 89455468c24a..4e3f78b6562e 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -25,12 +25,11 @@ import paddle.nn as nn from data import DummyDataset, PromptOnlyBatch from infer_utils import InferEvalModel, infer_guard -from models.ppo_model_utils import ( +from models.ppo_model_utils import ( # make_position_ids, RLHFPPOMixedLoss, RLHFValueLoss, create_loss, gather_log_probabilities, - make_position_ids, ) from paddle.distributed import fleet from paddle.io import DataLoader, Dataset, DistributedBatchSampler @@ -1065,11 +1064,12 @@ def __init__( from paddle.distributed.fleet.meta_parallel import PipelineLayer + # allow reference_model/reward_model to use different dist strategy with guard_set_args( args, { "recompute": False, - "fp16_opt_level": "O1", + # "fp16_opt_level": "O1", "pipeline_parallel_degree": args.pipeline_parallel_degree if isinstance(reference_model, PipelineLayer) else 1, # workaround for pipeline parallel model check @@ -1191,9 +1191,9 @@ def prediction_step( seq = self.actor_model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], - position_ids=inputs["position_ids"] - if "position_ids" in inputs - else make_position_ids(inputs["attention_mask"]), + # position_ids=inputs["position_ids"] + # if "position_ids" in inputs + # else make_position_ids(inputs["attention_mask"]), generation_config=self.generation_config, synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, )[0] @@ -1369,35 +1369,14 @@ def gen_epoch_data(): reload_tensor_to_gpu(self.reward_model.state_dict()) self.timers and self.timers("reload-reward").stop() - # todo, split prompt_only_batch - # pp2tp2dp2 -> dp4tp2 prompt_only_batch + # TODO(guosheng): guard for data split/merge self.timers and self.timers("resplit-data").start() prompt_only_batch = data_group_split(prompt_only_batch, group=gp) self.timers and self.timers("resplit-data").stop() - self.timers and self.timers("split-rl-micro-batches").start() - # 生成数据 - # per_train 4, accu 8 - # prompt 32 - - # 32? [4,4,4,4,4,4,4] rl_batches = self.split_rl_micro_batches(prompt_only_batch) - # rl_batches = self.load_sing_gen_data(as_batches=True, - # use_counter=True) self.timers and self.timers("split-rl-micro-batches").stop() - - self.timers and self.timers("ptx-batch").start() - if self.use_ptx: - # ptx_batch = data_group_split(ptx_batch, group=gp) - ptx_batches = self.split_ptx_micro_batches(ptx_batch) - # ptx_batches = data_group_merge(ptx_batches, group=gp) - else: - ptx_batches = [None for _ in range(len(rl_batches))] - - self.timers and self.timers("ptx-batch").stop() - self.timers and self.timers("merge-data").start() - # todo, merge data if gp is not None: input_ids_length = rl_batches[0]["input_ids"].shape[-1] rl_batches[0]["input_ids_length"] = paddle.to_tensor( @@ -1406,15 +1385,17 @@ def gen_epoch_data(): rl_batches = data_group_merge(rl_batches, group=gp) input_ids_length_batchs = rl_batches[0].pop("input_ids_length") rl_batches[0] = repad_rl_batches(rl_batches[0], input_ids_length_batchs) + self.timers and self.timers("merge-data").stop() + + self.timers and self.timers("ptx-batch").start() + if self.use_ptx: + ptx_batches = self.split_ptx_micro_batches(ptx_batch) + else: + ptx_batches = [None for _ in range(len(rl_batches))] + self.timers and self.timers("ptx-batch").stop() paddle.device.cuda.empty_cache() - self.timers and self.timers("merge-data").stop() - # # 数据造好, 开始训练 - # self.reference_model.offload() - # self.reward_model.offload() - # policy_model_eval.cleanup() - # value_model_eval.cleanup() if self.args.offload_level is not None: if self.args.eval_mode is not None and "eval" in self.args.offload_level: self.timers and self.timers("offload-eval").start() @@ -1580,17 +1561,18 @@ def train( # policy_model.reload() # value_model.reload() - self.timers and self.timers("offload-reload").start() - reload_tensor_to_gpu(self.actor_model.state_dict()) - reload_tensor_to_gpu(self.reward_critic_model.state_dict()) - self.timers and self.timers("offload-reload").stop() + # self.timers and self.timers("offload-reload").start() + # reload_tensor_to_gpu(self.actor_model.state_dict()) + # reload_tensor_to_gpu(self.reward_critic_model.state_dict()) + # self.timers and self.timers("offload-reload").stop() logger.info("Doing rl step...") self.timers and self.timers("rl_step").start() rl_info = self.rl_step(rl_batch) paddle.device.cuda.empty_cache() self.timers and self.timers("rl_step").stop() - if self.args.eval_mode is not None and "optimizer" in self.args.offload_level: + + if "optimizer" in self.args.offload_level: self.timers and self.timers("offload-value-optimizer").start() offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) self.timers and self.timers("offload-value-optimizer").stop() @@ -1603,6 +1585,7 @@ def train( paddle.device.cuda.empty_cache() self.timers and self.timers("ptx_step").stop() + if "optimizer" in self.args.offload_level: self.timers and self.timers("offload-policy-optimizer").start() offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) self.timers and self.timers("offload-policy-optimizer").stop() @@ -1741,7 +1724,7 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: # inputs shared by policy and value trainer input_ids = rl_batch["input_ids"] # length: src+tgt attention_mask = rl_batch["attention_mask"] # length: src+tgt - position_ids = rl_batch["position_ids"] # length: src+tgt + position_ids = None # rl_batch["position_ids"] # length: src+tgt sequence_mask = rl_batch["sequence_mask"] # length: src+tgt(-1) # inputs used by policy trainer old_log_probs = rl_batch["log_probs"] # length: src+tgt(-1) @@ -1840,19 +1823,17 @@ def generate(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: """Rollout a batch of experiences.""" input_ids = prompt_only_batch["input_ids"] attention_mask = prompt_only_batch["attention_mask"] - position_ids = ( - prompt_only_batch["position_ids"] - if "position_ids" in prompt_only_batch - else make_position_ids(attention_mask) - ) - # NOTE: generation output of paddlenlp do not contain prompt, we should - # change sequences here. + # position_ids = ( + # prompt_only_batch["position_ids"] + # if "position_ids" in prompt_only_batch + # else make_position_ids(attention_mask) + # ) self.timers and self.timers("actor-model-generate").start() sequences = self.actor_model.generate( input_ids=input_ids, attention_mask=attention_mask, - position_ids=position_ids, + # position_ids=position_ids, generation_config=self.generation_config, synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, )[0] @@ -1900,7 +1881,7 @@ def post_rollout( # If using right padding source + left padding target, make padding positions # in source be 0, since reward model use position_ids plus with padding size # (number of 0s) in source to calculate end offsets. - position_ids = make_position_ids(attention_mask) + position_ids = None # make_position_ids(attention_mask) # pipe model outputs a logits tensor with LMHead, while non-pipe model # outputs a tuple with logits tensor as the only one element. diff --git a/paddlenlp/generation/utils.py b/paddlenlp/generation/utils.py index 0b4d0583e412..fbcd93fdc12d 100644 --- a/paddlenlp/generation/utils.py +++ b/paddlenlp/generation/utils.py @@ -1208,19 +1208,19 @@ def sample( probs = TopPProcess(probs, top_p, min_tokens_to_keep) # multinomial already support fp16 and bf16 currently, fix issue: https://github.com/PaddlePaddle/Paddle/issues/51852 - next_tokens = paddle.multinomial(probs) - # # multinomial not support fp16 and bf16 currently, issue: https://github.com/PaddlePaddle/Paddle/issues/51852 - # if probs.dtype == paddle.bfloat16 and top_k == 1: - # probs = probs.astype("float32") - # next_tokens = paddle.unsqueeze(paddle.argmax(probs, axis=-1), -1) - # else: - # # next_tokens = paddle.multinomial(probs) - # probs = probs.cpu() - # from paddlenlp.transformers.utils import device_guard - - # with device_guard("cpu"): - # next_tokens = paddle.multinomial(probs) - # next_tokens = next_tokens.cuda() + # next_tokens = paddle.multinomial(probs) + # multinomial not support fp16 and bf16 currently, issue: https://github.com/PaddlePaddle/Paddle/issues/51852 + if probs.dtype == paddle.bfloat16 and top_k == 1: + probs = probs.astype("float32") + next_tokens = paddle.unsqueeze(paddle.argmax(probs, axis=-1), -1) + else: + # next_tokens = paddle.multinomial(probs) + probs = probs.cpu() + from paddlenlp.transformers.utils import device_guard + + with device_guard("cpu"): + next_tokens = paddle.multinomial(probs) + next_tokens = next_tokens.cuda() if self.config.tensor_parallel_degree > 1: # Maybe no need to broadcast if seed is set correclty. @@ -1240,7 +1240,7 @@ def sample( # and sampling, and then broadcast to avoid broadcast logits. if hasattr(self, "pp_group"): paddle.distributed.broadcast( - next_tokens, src=self.pp_group.ranks[-1], group=self.pp_group # use rank 0 for same seed to check + next_tokens, src=self.pp_group.ranks[0], group=self.pp_group # use rank 0 for same seed to check ) next_scores = paddle.index_sample(origin_probs, next_tokens) diff --git a/paddlenlp/transformers/llama/modeling.py b/paddlenlp/transformers/llama/modeling.py index 0dfc6b0ca009..346f8adaef05 100644 --- a/paddlenlp/transformers/llama/modeling.py +++ b/paddlenlp/transformers/llama/modeling.py @@ -366,8 +366,9 @@ def forward(self, hidden_states): if paddle.in_dynamic_mode(): with paddle.amp.auto_cast(False): - hidden_states = hidden_states.astype("float32") - variance = hidden_states.pow(2).mean(-1, keepdim=True) + # hidden_states = hidden_states.astype("float32") + # variance = hidden_states.pow(2).mean(-1, keepdim=True) + variance = hidden_states.astype("float32").pow(2).mean(-1, keepdim=True) hidden_states = paddle.rsqrt(variance + self.variance_epsilon) * hidden_states else: hidden_states = hidden_states.astype("float32") From 4ddd4156dbf1550e19e78aa02f393ed56b88f0ff Mon Sep 17 00:00:00 2001 From: whucsgs Date: Mon, 1 Apr 2024 02:44:35 +0000 Subject: [PATCH 38/46] Move reload/clean/data_group to comm_utils and use guard to decorate them. --- examples/RLHF/comm_utils.py | 403 +++++++++++++ examples/RLHF/infer_utils.py | 26 + examples/RLHF/models/score_model_utils.py | 11 +- examples/RLHF/ppo_trainer.py | 704 ++++++++-------------- 4 files changed, 683 insertions(+), 461 deletions(-) create mode 100644 examples/RLHF/comm_utils.py diff --git a/examples/RLHF/comm_utils.py b/examples/RLHF/comm_utils.py new file mode 100644 index 000000000000..de077c65db31 --- /dev/null +++ b/examples/RLHF/comm_utils.py @@ -0,0 +1,403 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +import paddle +import paddle.distributed as dist + +from paddlenlp.trainer.plugins.unified_checkpoint import flatten_list +from paddlenlp.trainer.trainer import Trainer, logger +from paddlenlp.trainer.utils.helper import nested_broadcast_tensor_with_empty +from paddlenlp.utils.distributed import distributed_gather + +global_dev_id = 0 if paddle.get_device() == "cpu" else int(paddle.get_device().split(":")[1]) + + +def offload_tensor_to_cpu(tensors): + if isinstance(tensors, dict): + for _, v in tensors.items(): + offload_tensor_to_cpu(v) + elif isinstance(tensors, paddle.Tensor): + if tensors.place.is_gpu_place(): + cpu_tensor = tensors._copy_to(paddle.CUDAPinnedPlace(), False) + tensors.value().get_tensor()._share_data_with(cpu_tensor.value().get_tensor()) + else: + logger.warning(f"Can't parse for type {type(tensors)}") + return tensors + + +def reload_tensor_to_gpu(tensors): + if isinstance(tensors, dict): + for _, v in tensors.items(): + reload_tensor_to_gpu(v) + elif isinstance(tensors, paddle.Tensor): + if tensors._is_initialized() and not tensors.place.is_gpu_place(): + gpu_tensor = tensors._copy_to(paddle.CUDAPlace(global_dev_id), False) + tensors.value().get_tensor()._share_data_with(gpu_tensor.value().get_tensor()) + else: + logger.warning(f"Can't parse for type {type(tensors)}") + return tensors + + +def cleanup_tensor_space(tensors): + if isinstance(tensors, dict): + for _, v in tensors.items(): + cleanup_tensor_space(v) + elif isinstance(tensors, paddle.Tensor): + tensors._clear_data() + else: + logger.warning(f"Can't parse for type {type(tensors)}") + return tensors + + +def data_group_split(tensors, group): + if group is None: + return tensors + if isinstance(tensors, (list, tuple)): + return type(tensors)(data_group_split(t, group) for t in tensors) + elif isinstance(tensors, dict): + new_dict = {} + for k, v in tensors.items(): + new_dict[k] = data_group_split(v, group) + return new_dict + elif isinstance(tensors, paddle.Tensor): + return tensors.split(group.nranks)[group.rank] + else: + logger.warning(f"Can't parse for type {type(tensors)}") + return tensors + + +def data_group_merge(tensors, group): + if group is None: + return tensors + + if isinstance(tensors, (list, tuple)): + return type(tensors)(data_group_merge(t, group) for t in tensors) + elif isinstance(tensors, dict): + new_dict = {} + for k, v in tensors.items(): + new_dict[k] = data_group_merge(v, group) + return new_dict + elif isinstance(tensors, paddle.Tensor): + tensor_list = [] + all_gather_nd(tensor_list, tensors, group=group, padded=True) + return paddle.concat(tensor_list) + else: + logger.warning(f"Can't parse for type {type(tensors)}") + return tensors + + +def group_rank_guard(group, rank=0): + def decorator(func): + def wrapper_func(*args, **kwargs): + if group.rank == rank: + ret = func(*args, **kwargs) + dist.barrier() + else: + ret = None + dist.barrier() + ret = nested_broadcast_tensor_with_empty(ret, group=group) + return ret + + return wrapper_func + + return decorator + + +def repad_rl_batches(batches, input_lengths): + if batches.get("position_ids", None) is not None: + v = batches["position_ids"] + for x in range(v.shape[0]): + v[x, input_lengths[x] :] = 1 + batches["position_ids"] = v + for key in list(batches.keys()): + if batches[key].shape[0] != input_lengths.shape[0]: + batches[key] = batches[key].mean() + + return batches + + +# https://stackoverflow.com/questions/12594148/skipping-execution-of-with-block +class SkipWithBlock(Exception): + pass + + +class SkipContextManager: + def __init__(self, skip): + self.skip = skip + + def __enter__(self): + if self.skip: + sys.settrace(lambda *args, **keys: None) + frame = sys._getframe(1) + frame.f_trace = self.trace + + def trace(self, frame, event, arg): + raise SkipWithBlock() + + def __exit__(self, type, value, traceback): + if type is None: + return # No exception + if issubclass(type, SkipWithBlock): + return True # Suppress special SkipWithBlock exception + + +def all_gather_nd(tensor_list, tensor, group=None, padded=False): + """ + Gathers tensor arrays of different lengths in a list. + The length dimension is 0. This supports any number of extra dimensions in the tensors. + All the other dimensions should be equal between the tensors. + + Args: + tensor (Tensor): Tensor to be broadcast from current process. + + Returns: + (Tensor): output list of tensors that can be of different sizes + """ + if len(tensor.shape) == 0: + tensor = tensor.reshape([1]) + dist.all_gather(tensor_list, tensor, group=group) + return tensor_list + + world_size = group.nranks + local_size = paddle.to_tensor(tensor.shape, place=tensor.place) + all_sizes = [paddle.zeros_like(local_size) for _ in range(world_size)] + dist.all_gather(all_sizes, local_size, group=group) + + # max_length = max(size[0] for size in all_sizes) + + # length_diff = max_length.item() - local_size[0].item() + # if length_diff: + # pad_size = (length_diff, *tensor.size()[1:]) + # padding = paddle.zeros(pad_size, place=tensor.place(), dtype=tensor.dtype) + # tensor = padle.concat((tensor, padding)) + + max_length = max(size[-1] for size in all_sizes) + + length_diff = max_length.item() - local_size[-1].item() + if length_diff: + pad_size = (*tensor.shape[:-1], length_diff) + padding = paddle.zeros(pad_size, dtype=tensor.dtype) + tensor = paddle.concat([tensor, padding], axis=-1) + + all_tensors_padded = [] + dist.all_gather(all_tensors_padded, tensor, group=group) + # all_tensors = [] + if padded: + tensor_list.extend(all_tensors_padded) + return all_tensors_padded + + for tensor_, size in zip(all_tensors_padded, all_sizes): + tensor_list.append(tensor_[..., : size[-1]]) + return tensor_list + + +def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): + if eval_model is None: + return None + + with_offload = kwargs.pop("with_offload", False) + train_tp_size = max(train_model.config.tensor_parallel_degree, 1) + eval_tp_size = max(eval_model.config.tensor_parallel_degree, 1) + eval_tp_rank = max(eval_model.config.tensor_parallel_rank, 0) + + hcg = dist.fleet.get_hybrid_communicate_group() + tp_group = hcg.get_model_parallel_group() + pp_group = hcg.get_pipe_parallel_group() + sd_group = hcg.get_sharding_parallel_group() + dp_group = hcg.get_data_parallel_group() + + global_rank = paddle.distributed.get_rank() + + train_state_dict = train_model.state_dict() + eval_state_dict = eval_model.state_dict() + + if dp_group.rank <= 0 and sd_group.rank <= 0: + train_pp_size = pp_group.nranks + if eval_tp_size > 1 and train_tp_size != eval_tp_size: + raise ValueError("Only support for the same tensor_parallel_degree for train and eval model for now.") + + # 单卡情况 + # tp->single + # tp+pp -> single + if eval_tp_size == 1: + if train_pp_size == 1 and train_tp_size > 1: + # tp ->single + logger.error("using tp to single eval model.") + # state = train_model.merge_tensor_parallel() + tp_actions = train_model.get_tensor_parallel_convert_actions( + train_model.config, + loaded_state_dict_keys=eval_state_dict.keys(), + is_split=False, + ignore_error=False, + ) + + is_dst = global_rank == 0 + for key in eval_state_dict.keys(): + tensor = train_state_dict[key] + if key in tp_actions: + ret = distributed_gather(tensor, dst=0, group=tp_group, offload=False) + action = tp_actions.pop(key) + tensor = action(ret) if is_dst else None + else: + tensor = tensor._copy_to(paddle.CPUPlace(), False) if is_dst else None + + if tensor is not None: + eval_state_dict[key].set_value(tensor) + + if not eval_state_dict[key]._is_initialized(): + v = eval_state_dict[key] + t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) + v.get_tensor()._share_data_with(t.get_tensor()) + + if with_offload: + offload_tensor_to_cpu(train_state_dict[key]) + else: + # single to single + # tp+pp -> single + raise ValueError("Not support yet.") + + def create_send_recv_table(train_keys, eval_keys): + recv_table = [] + send_table = [] + if pp_group.rank == 0: + for key in eval_keys: + recv_table.append((key, global_rank)) + + for key in train_keys: + send_table.append((key, global_rank)) + + all_recv, all_send = [], [] + paddle.distributed.all_gather_object(all_recv, [recv_table], group=pp_group) + paddle.distributed.all_gather_object(all_send, [send_table], group=pp_group) + all_recv = flatten_list(all_recv) + all_send = flatten_list(all_send) + + send_dict = {} + for k, v in all_send: + send_dict[k] = v + + table = [] + for k, v in all_recv: + # key, send, recv + table.append([k, send_dict.pop(k), v]) + assert len(send_dict) == 0, f"Some key can't be recv {send_dict.keys()}" + return table + + # pp0tp0 -> pp0tp0 + # pp0tp1 -> pp0tp1 + # pp1tp0 -> pp0tp0 + # pp1tp1 -> pp0tp1 + + # tp情况 + # tp+pp->tp + self.timers and self.timers("export-merge-pp").start() + if eval_tp_size > 1 and train_pp_size > 1: + table = create_send_recv_table(train_state_dict.keys(), eval_state_dict.keys()) + + for key, src_rank, dst_rank in table: + # Init tensor for model is cleaned + if not eval_state_dict[key]._is_initialized(): + v = eval_state_dict[key] + t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) + v.get_tensor()._share_data_with(t.get_tensor()) + + if src_rank == dst_rank and global_rank == src_rank: + eval_state_dict[key].copy_(train_state_dict[key], True) + else: + if global_rank == src_rank: + dist.stream.send(train_state_dict[key], dst=dst_rank) + + if global_rank == dst_rank: + dist.stream.recv(eval_state_dict[key], src=src_rank) + + # Offload train model if need + if global_rank == src_rank and with_offload: + offload_tensor_to_cpu(train_state_dict[key]) + + self.timers and self.timers("export-merge-pp").stop() + self.timers and self.timers("export-broadcast-pp").start() + if pp_group.nranks > 1: + paddle.distributed.parallel.sync_params_buffers( + eval_model, comm_group=pp_group, src_rank=pp_group.ranks[0], fuse_params=False + ) + self.timers and self.timers("export-broadcast-pp").stop() + else: + # 其他 DP rank 的state dict, 适配 offload 和初始化 + self.timers and self.timers("export-offload-and-init").start() + if with_offload: + for key in list(train_state_dict.keys()): + offload_tensor_to_cpu(train_state_dict[key]) + for k, v in eval_state_dict.items(): + if not v._is_initialized(): + t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) + v.get_tensor()._share_data_with(t.get_tensor()) + self.timers and self.timers("export-offload-and-init").stop() + + paddle.distributed.barrier() + self.timers and self.timers("export-broadcast-sd-dp").start() + if eval_tp_size == 1: + for _, tensor in eval_state_dict.items(): + paddle.distributed.broadcast(tensor, src=0, group=None, sync_op=True) + else: + if sd_group.nranks > 1: + if dp_group.rank <= 0: + paddle.distributed.parallel.sync_params_buffers( + eval_model, comm_group=sd_group, src_rank=sd_group.ranks[0], fuse_params=False + ) + if dp_group.nranks > 1: + paddle.distributed.parallel.sync_params_buffers( + eval_model, comm_group=dp_group, src_rank=dp_group.ranks[0], fuse_params=False + ) + self.timers and self.timers("export-broadcast-sd-dp").stop() + # paddle.save(eval_state_dict, f"./tmp/eval_{sd_group.rank}_tp_{eval_tp_rank}_pp_{pp_group.rank}.pdparams") + # paddle.save(train_state_dict, f"./tmp/train_{sd_group.rank}_tp_{tp_group.rank}_pp_{pp_group.rank}.pdparams") + # paddle.distributed.barrier() + # exit(-1) + + old_dp_workers = self.args.world_size // (max(sd_group.nranks, 1) * max(dp_group.nranks, 1)) + group_nums = self.args.logical_process_index // old_dp_workers * eval_tp_size + eval_tp_rank + + if not hasattr(self, "_policy_model_eval_group") or self._policy_model_eval_group is None: + self._policy_model_eval_group = create_data_trans_group(global_rank, group_nums) + + return None + + +def create_data_trans_group(global_rank, group_nums): + all_split_table = [] + paddle.distributed.all_gather_object(all_split_table, [(global_rank, group_nums)]) + all_split_table = flatten_list(all_split_table) + split_dict = {} + for k, v in all_split_table: + split_dict[k] = v + + split_ranks = {} + for k, v in all_split_table: + if v in split_ranks: + split_ranks[v].append(k) + else: + split_ranks[v] = [k] + + group = None + for k, ranks in split_ranks.items(): + gp = paddle.distributed.new_group(ranks=ranks) + if global_rank in ranks: + group = gp + + return group + + +Trainer.export_evaluate_model = export_evaluate_model diff --git a/examples/RLHF/infer_utils.py b/examples/RLHF/infer_utils.py index b11d27b0a432..d91141532411 100644 --- a/examples/RLHF/infer_utils.py +++ b/examples/RLHF/infer_utils.py @@ -20,6 +20,7 @@ import paddle import paddle.distributed as dist +from comm_utils import cleanup_tensor_space, offload_tensor_to_cpu, reload_tensor_to_gpu from paddle.utils import try_import from trainer_utils import guard_set_args @@ -280,6 +281,31 @@ def __init__(self, trainer: Trainer): eval_model = getattr(trainer, "_inner_eval_model", None) self.model: PretrainedModel = trainer.model if eval_model is None else eval_model self.tokenizer: PretrainedTokenizer = trainer.tokenizer + self.trainer = trainer + + def enable(self): + trainer = self.trainer + if trainer.model is not self.model: + trainer.export_evaluate_model( + trainer.model, + self.model, + with_offload="train_model" in trainer.args.offload_level, + ) + else: + reload_tensor_to_gpu(self.model.state_dict()) + + def disable(self): + trainer = self.trainer + if trainer.model is not self.model: + cleanup_tensor_space(self.model.state_dict()) + else: + offload_tensor_to_cpu(self.model.state_dict()) + + def __getattr__(self, name): + try: + return super().__getattr__(name) + except AttributeError: + return getattr(self.model, name) def eval(self): self.model.eval() diff --git a/examples/RLHF/models/score_model_utils.py b/examples/RLHF/models/score_model_utils.py index bd9854f169aa..73dc551b28e3 100644 --- a/examples/RLHF/models/score_model_utils.py +++ b/examples/RLHF/models/score_model_utils.py @@ -180,9 +180,16 @@ def get_score( # Take left padding into account, which has 0s in left and max_len # in right. left_pad_mask = position_ids == 0 - position_ids = paddle.where( - left_pad_mask, position_ids, position_ids + left_pad_mask.sum(-1, keepdim=True) - 1 + # position_ids = paddle.where( + # left_pad_mask, position_ids, position_ids + left_pad_mask.sum(-1, keepdim=True) - 1 + # ) + # the above limits right padding must not be 0s, the following suits + # to both left and right padding with 0s + left_pad_num = ( + paddle.where(left_pad_mask, position_ids.shape[-1] + 100, position_ids).argmin(axis=-1, keepdim=True) + - 1 ) + position_ids = left_pad_num + position_ids second_pos = paddle.max(position_ids, axis=-1, keepdim=True) end_pos = paddle.stack([first_pos, second_pos], axis=-1).squeeze(1) end_score = scores.gather_nd(end_pos) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 4e3f78b6562e..483066974c54 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -16,20 +16,28 @@ import itertools import math import os -import sys import time from typing import Any, Callable, Dict, List, Optional, Tuple, Union import paddle import paddle.distributed as dist import paddle.nn as nn +from comm_utils import ( # noqa + cleanup_tensor_space, + create_data_trans_group, + data_group_merge, + data_group_split, + offload_tensor_to_cpu, + reload_tensor_to_gpu, +) from data import DummyDataset, PromptOnlyBatch from infer_utils import InferEvalModel, infer_guard -from models.ppo_model_utils import ( # make_position_ids, +from models.ppo_model_utils import ( RLHFPPOMixedLoss, RLHFValueLoss, create_loss, gather_log_probabilities, + make_position_ids, ) from paddle.distributed import fleet from paddle.io import DataLoader, Dataset, DistributedBatchSampler @@ -46,7 +54,6 @@ from paddlenlp.data import DataCollator from paddlenlp.generation import GenerationConfig -from paddlenlp.trainer.plugins.unified_checkpoint import flatten_list from paddlenlp.trainer.trainer import ( EvalLoopOutput, EvalPrediction, @@ -57,391 +64,9 @@ logger, speed_metrics, ) -from paddlenlp.trainer.utils.helper import nested_broadcast_tensor_with_empty -from paddlenlp.utils.distributed import distributed_gather - -global_dev_id = 0 if paddle.get_device() == "cpu" else int(paddle.get_device().split(":")[1]) from paddlenlp.transformers import PretrainedModel, PretrainedTokenizer -def offload_tensor_to_cpu(tensors): - if isinstance(tensors, dict): - for _, v in tensors.items(): - offload_tensor_to_cpu(v) - elif isinstance(tensors, paddle.Tensor): - if tensors.place.is_gpu_place(): - cpu_tensor = tensors._copy_to(paddle.CUDAPinnedPlace(), False) - tensors.value().get_tensor()._share_data_with(cpu_tensor.value().get_tensor()) - else: - logger.warning(f"Can't parse for type {type(tensors)}") - return tensors - - -def reload_tensor_to_gpu(tensors): - if isinstance(tensors, dict): - for _, v in tensors.items(): - reload_tensor_to_gpu(v) - elif isinstance(tensors, paddle.Tensor): - if tensors._is_initialized() and not tensors.place.is_gpu_place(): - gpu_tensor = tensors._copy_to(paddle.CUDAPlace(global_dev_id), False) - tensors.value().get_tensor()._share_data_with(gpu_tensor.value().get_tensor()) - else: - logger.warning(f"Can't parse for type {type(tensors)}") - return tensors - - -def cleanup_tensor_space(tensors): - if isinstance(tensors, dict): - for _, v in tensors.items(): - cleanup_tensor_space(v) - elif isinstance(tensors, paddle.Tensor): - tensors._clear_data() - else: - logger.warning(f"Can't parse for type {type(tensors)}") - return tensors - - -def data_group_split(tensors, group): - if group is None: - return tensors - if isinstance(tensors, (list, tuple)): - return type(tensors)(data_group_split(t, group) for t in tensors) - elif isinstance(tensors, dict): - new_dict = {} - for k, v in tensors.items(): - new_dict[k] = data_group_split(v, group) - return new_dict - elif isinstance(tensors, paddle.Tensor): - return tensors.split(group.nranks)[group.rank] - else: - logger.warning(f"Can't parse for type {type(tensors)}") - return tensors - - -def data_group_merge(tensors, group): - if group is None: - return tensors - - if isinstance(tensors, (list, tuple)): - return type(tensors)(data_group_merge(t, group) for t in tensors) - elif isinstance(tensors, dict): - new_dict = {} - for k, v in tensors.items(): - new_dict[k] = data_group_merge(v, group) - return new_dict - elif isinstance(tensors, paddle.Tensor): - tensor_list = [] - all_gather_nd(tensor_list, tensors, group=group, padded=True) - return paddle.concat(tensor_list) - else: - logger.warning(f"Can't parse for type {type(tensors)}") - return tensors - - -def group_rank_guard(group, rank=0): - def decorator(func): - def wrapper_func(*args, **kwargs): - if group.rank == rank: - ret = func(*args, **kwargs) - dist.barrier() - else: - ret = None - dist.barrier() - ret = nested_broadcast_tensor_with_empty(ret, group=group) - return ret - - return wrapper_func - - return decorator - - -def repad_rl_batches(batches, input_lengths): - if "position_ids" in batches: - v = batches["position_ids"] - for x in range(v.shape[0]): - v[x, input_lengths[x] :] = 1 - batches["position_ids"] = v - for key in list(batches.keys()): - if batches[key].shape[0] != input_lengths.shape[0]: - batches[key] = batches[key].mean() - - return batches - - -# https://stackoverflow.com/questions/12594148/skipping-execution-of-with-block -class SkipWithBlock(Exception): - pass - - -class SkipContextManager: - def __init__(self, skip): - self.skip = skip - - def __enter__(self): - if self.skip: - sys.settrace(lambda *args, **keys: None) - frame = sys._getframe(1) - frame.f_trace = self.trace - - def trace(self, frame, event, arg): - raise SkipWithBlock() - - def __exit__(self, type, value, traceback): - if type is None: - return # No exception - if issubclass(type, SkipWithBlock): - return True # Suppress special SkipWithBlock exception - - -def all_gather_nd(tensor_list, tensor, group=None, padded=False): - """ - Gathers tensor arrays of different lengths in a list. - The length dimension is 0. This supports any number of extra dimensions in the tensors. - All the other dimensions should be equal between the tensors. - - Args: - tensor (Tensor): Tensor to be broadcast from current process. - - Returns: - (Tensor): output list of tensors that can be of different sizes - """ - if len(tensor.shape) == 0: - tensor = tensor.reshape([1]) - dist.all_gather(tensor_list, tensor, group=group) - return tensor_list - - world_size = group.nranks - local_size = paddle.to_tensor(tensor.shape, place=tensor.place) - all_sizes = [paddle.zeros_like(local_size) for _ in range(world_size)] - dist.all_gather(all_sizes, local_size, group=group) - - # max_length = max(size[0] for size in all_sizes) - - # length_diff = max_length.item() - local_size[0].item() - # if length_diff: - # pad_size = (length_diff, *tensor.size()[1:]) - # padding = paddle.zeros(pad_size, place=tensor.place(), dtype=tensor.dtype) - # tensor = padle.concat((tensor, padding)) - - max_length = max(size[-1] for size in all_sizes) - - length_diff = max_length.item() - local_size[-1].item() - if length_diff: - pad_size = (*tensor.shape[:-1], length_diff) - padding = paddle.zeros(pad_size, dtype=tensor.dtype) - tensor = paddle.concat([tensor, padding], axis=-1) - - all_tensors_padded = [] - dist.all_gather(all_tensors_padded, tensor, group=group) - # all_tensors = [] - if padded: - tensor_list.extend(all_tensors_padded) - return all_tensors_padded - - for tensor_, size in zip(all_tensors_padded, all_sizes): - tensor_list.append(tensor_[..., : size[-1]]) - return tensor_list - - -def export_evaluate_model(self: Trainer, train_model, eval_model, **kwargs): - if eval_model is None: - return None - - with_offload = kwargs.pop("with_offload", False) - train_tp_size = max(train_model.config.tensor_parallel_degree, 1) - eval_tp_size = max(eval_model.config.tensor_parallel_degree, 1) - eval_tp_rank = max(eval_model.config.tensor_parallel_rank, 0) - - hcg = fleet.get_hybrid_communicate_group() - tp_group = hcg.get_model_parallel_group() - pp_group = hcg.get_pipe_parallel_group() - sd_group = hcg.get_sharding_parallel_group() - dp_group = hcg.get_data_parallel_group() - - global_rank = paddle.distributed.get_rank() - - train_state_dict = train_model.state_dict() - eval_state_dict = eval_model.state_dict() - - if dp_group.rank <= 0 and sd_group.rank <= 0: - train_pp_size = pp_group.nranks - if eval_tp_size > 1 and train_tp_size != eval_tp_size: - raise ValueError("Only support for the same tensor_parallel_degree for train and eval model for now.") - - # 单卡情况 - # tp->single - # tp+pp -> single - if eval_tp_size == 1: - if train_pp_size == 1 and train_tp_size > 1: - # tp ->single - logger.error("using tp to single eval model.") - # state = train_model.merge_tensor_parallel() - tp_actions = train_model.get_tensor_parallel_convert_actions( - train_model.config, - loaded_state_dict_keys=eval_state_dict.keys(), - is_split=False, - ignore_error=False, - ) - - is_dst = global_rank == 0 - for key in eval_state_dict.keys(): - tensor = train_state_dict[key] - if key in tp_actions: - ret = distributed_gather(tensor, dst=0, group=tp_group, offload=False) - action = tp_actions.pop(key) - tensor = action(ret) if is_dst else None - else: - tensor = tensor._copy_to(paddle.CPUPlace(), False) if is_dst else None - - if tensor is not None: - eval_state_dict[key].set_value(tensor) - - if not eval_state_dict[key]._is_initialized(): - v = eval_state_dict[key] - t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) - v.get_tensor()._share_data_with(t.get_tensor()) - - if with_offload: - offload_tensor_to_cpu(train_state_dict[key]) - else: - # single to single - # tp+pp -> single - raise ValueError("Not support yet.") - - def create_send_recv_table(train_keys, eval_keys): - recv_table = [] - send_table = [] - if pp_group.rank == 0: - for key in eval_keys: - recv_table.append((key, global_rank)) - - for key in train_keys: - send_table.append((key, global_rank)) - - all_recv, all_send = [], [] - paddle.distributed.all_gather_object(all_recv, [recv_table], group=pp_group) - paddle.distributed.all_gather_object(all_send, [send_table], group=pp_group) - all_recv = flatten_list(all_recv) - all_send = flatten_list(all_send) - - send_dict = {} - for k, v in all_send: - send_dict[k] = v - - table = [] - for k, v in all_recv: - # key, send, recv - table.append([k, send_dict.pop(k), v]) - assert len(send_dict) == 0, f"Some key can't be recv {send_dict.keys()}" - return table - - # pp0tp0 -> pp0tp0 - # pp0tp1 -> pp0tp1 - # pp1tp0 -> pp0tp0 - # pp1tp1 -> pp0tp1 - - # tp情况 - # tp+pp->tp - self.timers and self.timers("export-merge-pp").start() - if eval_tp_size > 1 and train_pp_size > 1: - table = create_send_recv_table(train_state_dict.keys(), eval_state_dict.keys()) - - for key, src_rank, dst_rank in table: - # Init tensor for model is cleaned - if not eval_state_dict[key]._is_initialized(): - v = eval_state_dict[key] - t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) - v.get_tensor()._share_data_with(t.get_tensor()) - - if src_rank == dst_rank and global_rank == src_rank: - eval_state_dict[key].copy_(train_state_dict[key], True) - else: - if global_rank == src_rank: - dist.stream.send(train_state_dict[key], dst=dst_rank) - - if global_rank == dst_rank: - dist.stream.recv(eval_state_dict[key], src=src_rank) - - # Offload train model if need - if global_rank == src_rank and with_offload: - offload_tensor_to_cpu(train_state_dict[key]) - - self.timers and self.timers("export-merge-pp").stop() - self.timers and self.timers("export-broadcast-pp").start() - if pp_group.nranks > 1: - paddle.distributed.parallel.sync_params_buffers( - eval_model, comm_group=pp_group, src_rank=pp_group.ranks[0], fuse_params=False - ) - self.timers and self.timers("export-broadcast-pp").stop() - else: - # 其他 DP rank 的state dict, 适配 offload 和初始化 - self.timers and self.timers("export-offload-and-init").start() - if with_offload: - for key in list(train_state_dict.keys()): - offload_tensor_to_cpu(train_state_dict[key]) - for k, v in eval_state_dict.items(): - if not v._is_initialized(): - t = paddle._C_ops.full_like(v, 0, v.dtype, paddle.CUDAPlace(global_dev_id)) - v.get_tensor()._share_data_with(t.get_tensor()) - self.timers and self.timers("export-offload-and-init").stop() - - paddle.distributed.barrier() - self.timers and self.timers("export-broadcast-sd-dp").start() - if eval_tp_size == 1: - for _, tensor in eval_state_dict.items(): - paddle.distributed.broadcast(tensor, src=0, group=None, sync_op=True) - else: - if sd_group.nranks > 1: - if dp_group.rank <= 0: - paddle.distributed.parallel.sync_params_buffers( - eval_model, comm_group=sd_group, src_rank=sd_group.ranks[0], fuse_params=False - ) - if dp_group.nranks > 1: - paddle.distributed.parallel.sync_params_buffers( - eval_model, comm_group=dp_group, src_rank=dp_group.ranks[0], fuse_params=False - ) - self.timers and self.timers("export-broadcast-sd-dp").stop() - # paddle.save(eval_state_dict, f"./tmp/eval_{sd_group.rank}_tp_{eval_tp_rank}_pp_{pp_group.rank}.pdparams") - # paddle.save(train_state_dict, f"./tmp/train_{sd_group.rank}_tp_{tp_group.rank}_pp_{pp_group.rank}.pdparams") - # paddle.distributed.barrier() - # exit(-1) - - old_dp_workers = self.args.world_size // (max(sd_group.nranks, 1) * max(dp_group.nranks, 1)) - group_nums = self.args.logical_process_index // old_dp_workers * eval_tp_size + eval_tp_rank - - if not hasattr(self, "_policy_model_eval_group") or self._policy_model_eval_group is None: - self._policy_model_eval_group = create_data_trans_group(global_rank, group_nums) - - return None - - -def create_data_trans_group(global_rank, group_nums): - all_split_table = [] - paddle.distributed.all_gather_object(all_split_table, [(global_rank, group_nums)]) - all_split_table = flatten_list(all_split_table) - split_dict = {} - for k, v in all_split_table: - split_dict[k] = v - - split_ranks = {} - for k, v in all_split_table: - if v in split_ranks: - split_ranks[v].append(k) - else: - split_ranks[v] = [k] - - group = None - for k, ranks in split_ranks.items(): - gp = paddle.distributed.new_group(ranks=ranks) - if global_rank in ranks: - group = gp - - return group - - -Trainer.export_evaluate_model = export_evaluate_model - - class StepTrainer(Trainer): """ Features of StepTrainer: @@ -517,10 +142,31 @@ def loss_identifier(self, inputs: Dict) -> str: return "tr_loss" def set_eval_model(self, model): + """ + To avoid eval/generation with PipelineParallel when training with PP, we + allow to use an extra eval model to do eval/generation, which would need + to reshard parameters and dispatch data according to model's distributed + topo. Currently, the eval model should cancel PP setting and keep the same + TP setting with training. + """ if model is None: logger.warning("use None to set eval model for trainer and it would be ignored") + return else: self._inner_eval_model = model + # bind a new comm group for eval model data dispatch + # param dispatch is binded in `InferEvalModel.enable` + hcg = fleet.get_hybrid_communicate_group() + sd_group = hcg.get_sharding_parallel_group() + dp_group = hcg.get_data_parallel_group() + global_rank = dist.get_rank() + eval_tp_size = max(model.config.tensor_parallel_degree, 1) + eval_tp_rank = max(model.config.tensor_parallel_rank, 0) + old_dp_workers = self.args.world_size // (max(sd_group.nranks, 1) * max(dp_group.nranks, 1)) + group_nums = self.args.logical_process_index // old_dp_workers * eval_tp_size + eval_tp_rank + self._data_trans_group = create_data_trans_group(global_rank, group_nums) + # just for compatiable with old code + self._policy_model_eval_group = self._data_trans_group def get_model(self, train=False): """ @@ -875,6 +521,27 @@ def __exit__(self, *args): self.trainer.ema_restore() +class enable(paddle.no_grad.__mro__[1]): + """offload""" + + def __init__(self, *args): + self.objs = args + + def __enter__(self): + for obj in self.objs: + if hasattr(obj, "enable"): + obj.enable() + else: + reload_tensor_to_gpu(obj.state_dict()) + + def __exit__(self, *args): + for obj in self.objs: + if hasattr(obj, "disable"): + obj.disable() + else: + offload_tensor_to_cpu(obj.state_dict()) + + class PolicyTrainer(StepTrainer): loss_cls = RLHFPPOMixedLoss @@ -1339,53 +1006,54 @@ def gen_epoch_data(): # self.timers and self.timers("offload-optimizer").stop() with ema(self.policy_trainer), ema(self.value_trainer): - if self.args.eval_mode is not None: - self.timers and self.timers("export-evaluate-model").start() - - self.policy_trainer.export_evaluate_model( - self.policy_trainer.model, - self._policy_model_eval, - with_offload="train_model" in self.args.offload_level, - ) - self.value_trainer.export_evaluate_model( - self.value_trainer.model, - self._value_model_eval, - with_offload="train_model" in self.args.offload_level, - ) - gp = ( - self.policy_trainer._policy_model_eval_group - if hasattr(self.policy_trainer, "_policy_model_eval_group") - else None - ) + # if self.args.eval_mode is not None: + # self.timers and self.timers("export-evaluate-model").start() + + # self.policy_trainer.export_evaluate_model( + # self.policy_trainer.model, + # self._policy_model_eval, + # with_offload="train_model" in self.args.offload_level, + # ) + # self.value_trainer.export_evaluate_model( + # self.value_trainer.model, + # self._value_model_eval, + # with_offload="train_model" in self.args.offload_level, + # ) + # gp = ( + # self.policy_trainer._policy_model_eval_group + # if hasattr(self.policy_trainer, "_policy_model_eval_group") + # else None + # ) + # gp = getattr(self.policy_trainer, "_data_trans_group", None) # gp = create_data_trans_group(self.args.logical_process_index, paddle.distributed.get_rank(), self._policy_model_eval.config.tensor_parallel_degree) # # todo: zhui - self.timers and self.timers("export-evaluate-model").stop() + # self.timers and self.timers("export-evaluate-model").stop() # self.reference_model.reload() # self.reward_model.reload() - if "reward" in self.args.offload_level: - self.timers and self.timers("reload-reward").start() - reload_tensor_to_gpu(self.reference_model.state_dict()) - reload_tensor_to_gpu(self.reward_model.state_dict()) - self.timers and self.timers("reload-reward").stop() + # if "reward" in self.args.offload_level: + # self.timers and self.timers("reload-reward").start() + # reload_tensor_to_gpu(self.reference_model.state_dict()) + # reload_tensor_to_gpu(self.reward_model.state_dict()) + # self.timers and self.timers("reload-reward").stop() # TODO(guosheng): guard for data split/merge - self.timers and self.timers("resplit-data").start() - prompt_only_batch = data_group_split(prompt_only_batch, group=gp) - self.timers and self.timers("resplit-data").stop() - self.timers and self.timers("split-rl-micro-batches").start() + # self.timers and self.timers("resplit-data").start() + # prompt_only_batch = data_group_split(prompt_only_batch, group=gp) + # self.timers and self.timers("resplit-data").stop() + # self.timers and self.timers("split-rl-micro-batches").start() rl_batches = self.split_rl_micro_batches(prompt_only_batch) - self.timers and self.timers("split-rl-micro-batches").stop() - self.timers and self.timers("merge-data").start() - if gp is not None: - input_ids_length = rl_batches[0]["input_ids"].shape[-1] - rl_batches[0]["input_ids_length"] = paddle.to_tensor( - [input_ids_length] * rl_batches[0]["input_ids"].shape[0], dtype="int64" - ) - rl_batches = data_group_merge(rl_batches, group=gp) - input_ids_length_batchs = rl_batches[0].pop("input_ids_length") - rl_batches[0] = repad_rl_batches(rl_batches[0], input_ids_length_batchs) - self.timers and self.timers("merge-data").stop() + # self.timers and self.timers("split-rl-micro-batches").stop() + # self.timers and self.timers("merge-data").start() + # if gp is not None: + # # input_ids_length = rl_batches[0]["input_ids"].shape[-1] + # # rl_batches[0]["input_ids_length"] = paddle.to_tensor( + # # [input_ids_length] * rl_batches[0]["input_ids"].shape[0], dtype="int64" + # # ) + # rl_batches = data_group_merge(rl_batches, group=gp) + # # input_ids_length_batchs = rl_batches[0].pop("input_ids_length") + # # rl_batches[0] = repad_rl_batches(rl_batches[0], input_ids_length_batchs) + # self.timers and self.timers("merge-data").stop() self.timers and self.timers("ptx-batch").start() if self.use_ptx: @@ -1396,17 +1064,17 @@ def gen_epoch_data(): paddle.device.cuda.empty_cache() - if self.args.offload_level is not None: - if self.args.eval_mode is not None and "eval" in self.args.offload_level: - self.timers and self.timers("offload-eval").start() - cleanup_tensor_space(self._policy_model_eval.state_dict()) - cleanup_tensor_space(self._value_model_eval.state_dict()) - self.timers and self.timers("offload-eval").stop() - if "reward" in self.args.offload_level: - self.timers and self.timers("offload-reward").start() - offload_tensor_to_cpu(self.reference_model.state_dict()) - offload_tensor_to_cpu(self.reward_model.state_dict()) - self.timers and self.timers("offload-reward").stop() + # if self.args.offload_level is not None: + # if self.args.eval_mode is not None and "eval" in self.args.offload_level: + # self.timers and self.timers("offload-eval").start() + # cleanup_tensor_space(self._policy_model_eval.state_dict()) + # cleanup_tensor_space(self._value_model_eval.state_dict()) + # self.timers and self.timers("offload-eval").stop() + # if "reward" in self.args.offload_level: + # self.timers and self.timers("offload-reward").start() + # offload_tensor_to_cpu(self.reference_model.state_dict()) + # offload_tensor_to_cpu(self.reward_model.state_dict()) + # self.timers and self.timers("offload-reward").stop() self.set_train() for _ in range(self.args.update_iters): @@ -1568,14 +1236,15 @@ def train( logger.info("Doing rl step...") self.timers and self.timers("rl_step").start() - rl_info = self.rl_step(rl_batch) + with self.enable(self.value_trainer.optimizer): + rl_info = self.rl_step(rl_batch) paddle.device.cuda.empty_cache() self.timers and self.timers("rl_step").stop() - if "optimizer" in self.args.offload_level: - self.timers and self.timers("offload-value-optimizer").start() - offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) - self.timers and self.timers("offload-value-optimizer").stop() + # if "optimizer" in self.args.offload_level: + # self.timers and self.timers("offload-value-optimizer").start() + # offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) + # self.timers and self.timers("offload-value-optimizer").stop() if self.use_ptx: logger.info("Doing ptx step...") @@ -1585,10 +1254,10 @@ def train( paddle.device.cuda.empty_cache() self.timers and self.timers("ptx_step").stop() - if "optimizer" in self.args.offload_level: - self.timers and self.timers("offload-policy-optimizer").start() - offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) - self.timers and self.timers("offload-policy-optimizer").stop() + # if "optimizer" in self.args.offload_level: + # self.timers and self.timers("offload-policy-optimizer").start() + # offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) + # self.timers and self.timers("offload-policy-optimizer").stop() self.state.global_step += 1 self.state.epoch = epoch + (step + 1) / steps_in_epoch @@ -1724,7 +1393,7 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: # inputs shared by policy and value trainer input_ids = rl_batch["input_ids"] # length: src+tgt attention_mask = rl_batch["attention_mask"] # length: src+tgt - position_ids = None # rl_batch["position_ids"] # length: src+tgt + position_ids = rl_batch["position_ids"] # length: src+tgt sequence_mask = rl_batch["sequence_mask"] # length: src+tgt(-1) # inputs used by policy trainer old_log_probs = rl_batch["log_probs"] # length: src+tgt(-1) @@ -1775,11 +1444,27 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: def ptx_step(self, ptx_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: """Perform a single update step with PTX loss.""" + # sft inputs use right padding, position_ids is optional + # ptx_batch["position_ids"] = ptx_batch.get( + # "position_ids", make_position_ids(ptx_batch["attention_mask"])) ptx_loss = self.policy_trainer.full_training_step(**ptx_batch) return { "train/ptx_loss": ptx_loss, } + def enable(self, *args): + enable_map = { + # maybe use `model: (pattern, enable_method, disable_method)`` + self.actor_model: "eval", + self.reward_critic_model: "eval", + self.reference_model: "reward", + self.reward_model: "reward", + self.policy_trainer.optimizer: "optimizer", + self.value_trainer.optimizer: "optimizer", + } + objs = [arg for arg in args if enable_map.get(arg, "") in self.args.offload_level] + return enable(*objs) + def split_ptx_micro_batches( self, ptx_batch: Dict[str, paddle.Tensor], @@ -1797,6 +1482,18 @@ def split_ptx_micro_batches( micro_batches.append(micro_batch) return micro_batches + @staticmethod + def data_dispatch(fun): + def _impl(self, data): + gp = getattr(self.policy_trainer, "_data_trans_group", None) + data = data_group_split(data, group=gp) + data = fun(self, data) + data = data_group_merge(data, group=gp) + return data + + return _impl + + @data_dispatch def split_rl_micro_batches( self, prompt_only_batch: PromptOnlyBatch, @@ -1805,7 +1502,13 @@ def split_rl_micro_batches( total_batch_size = prompt_only_batch["input_ids"].shape[0] micro_batch_size = self.args.per_device_train_batch_size micro_batches = [] - with infer_guard(self.policy_trainer): + + # TODO(guosheng): clean get_epoch_iterator: + # 1. scope guard for offload, we would split post_rollout into multiple + # sub-methods to offload in-time + # 2. decorate split_rl_micro_batches to automatically split/merge data + with self.enable(self.actor_model, self.reference_model), infer_guard(self.policy_trainer): + # generate for multi batches and then disable FuseMT model for i in range(0, total_batch_size, micro_batch_size): micro_batch = {} micro_batch = map_structure( @@ -1813,9 +1516,25 @@ def split_rl_micro_batches( prompt_only_batch, ) micro_batches.extend(self.generate(micro_batch)) + # get log_probs for multi batches and then disable actor/refer rmodel + for micro_batch in micro_batches: + # position_ids is necessary for non-right padding + # If using right padding source + left padding target, make padding positions + # in source be 0, since reward model use position_ids plus with padding size + # (number of 0s) in source to calculate end offsets. + micro_batch["position_ids"] = make_position_ids(micro_batch["attention_mask"]) + micro_batch.update(self.rollout_logprob(**micro_batch)) + + # get reward/value for multi batches and then disable reward/value model + with self.enable(self.reward_critic_model, self.reward_model): + for micro_batch in micro_batches: + micro_batch.update(self.rollout_reward_value(**micro_batch)) + + # + micro_batches = [self.normalize_data(micro_batch, use_tgt_len_value=False) for micro_batch in micro_batches] # size of micro_batches (num of training batch) would be: # per_device_prompt_batch_size * num_return_sequences // per_device_train_batch_size - micro_batches = [self.post_rollout(*micro_batch) for micro_batch in micro_batches] + # micro_batches = [self.post_rollout(**micro_batch) for micro_batch in micro_batches] return micro_batches @paddle.no_grad() @@ -1823,17 +1542,14 @@ def generate(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: """Rollout a batch of experiences.""" input_ids = prompt_only_batch["input_ids"] attention_mask = prompt_only_batch["attention_mask"] - # position_ids = ( - # prompt_only_batch["position_ids"] - # if "position_ids" in prompt_only_batch - # else make_position_ids(attention_mask) - # ) self.timers and self.timers("actor-model-generate").start() sequences = self.actor_model.generate( input_ids=input_ids, attention_mask=attention_mask, - # position_ids=position_ids, + position_ids=prompt_only_batch["position_ids"] + if "position_ids" in prompt_only_batch + else make_position_ids(attention_mask), generation_config=self.generation_config, synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, )[0] @@ -1843,17 +1559,87 @@ def generate(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: # prompt, sequence, attention_mask return [ - ( - input_ids, - seq, - paddle.logical_and( + { + "prompt": input_ids, + "input_ids": seq, # "sequence": + "attention_mask": paddle.logical_and( seq != self.tokenizer.pad_token_id, seq != self.tokenizer.unk_token_id, ), - ) + } for seq in sequences ] + @paddle.no_grad() + def rollout_logprob( + self, input_ids: paddle.Tensor, attention_mask: paddle.Tensor, position_ids: paddle.Tensor = None, **kwargs + ) -> Dict[str, paddle.Tensor]: + # pipe model outputs a logits tensor with LMHead, while non-pipe model + # outputs a tuple with logits tensor as the only one element. + logits = self.actor_model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + # return_dict=True, + ) # .logits + if not isinstance(logits, paddle.Tensor): + logits = logits[0] + ref_logits = self.reference_model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + # return_dict=True, + ) # .logits + if not isinstance(ref_logits, paddle.Tensor): + ref_logits = ref_logits[0] + log_probs = gather_log_probabilities(logits[:, :-1], input_ids[:, 1:]) + ref_log_probs = gather_log_probabilities(ref_logits[:, :-1], input_ids[:, 1:]) + return {"log_probs": log_probs, "ref_log_probs": ref_log_probs} + + @paddle.no_grad() + def rollout_reward_value( + self, input_ids: paddle.Tensor, attention_mask: paddle.Tensor, position_ids: paddle.Tensor = None, **kwargs + ) -> Dict[str, paddle.Tensor]: + if self.reward_tokenizer is not self.tokenizer: + # right padding + reward_tokenize_output = batch_retokenize( + input_ids, + src_tokenizer=self.tokenizer, + dest_tokenizer=self.reward_tokenizer, + skip_special_tokens=True, + ) + reward_input_ids = reward_tokenize_output["input_ids"] + reward_attention_mask = reward_tokenize_output["attention_mask"] + reward_position_ids = make_position_ids(reward_attention_mask) + else: + # for text in self.tokenizer.batch_decode(sequence, skip_special_tokens=True): + # print(text) + reward_input_ids = input_ids + reward_attention_mask = attention_mask + reward_position_ids = position_ids + reward_score = self.reward_model( + reward_input_ids, + attention_mask=reward_attention_mask, + position_ids=reward_position_ids, + # return_dict=True, + )[ + 1 + ] # .end_scores + + reward_value = self.reward_critic_model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + # return_dict=True, + )[ + 0 + ] # .scores + reward_score = reward_score.squeeze(axis=-1) + reward_value = reward_value.squeeze(axis=-1) + + reward_value = reward_value[:, :-1] + return {"rewards": reward_score, "reward_values": reward_value} + @paddle.no_grad() def post_rollout( self, @@ -1881,7 +1667,7 @@ def post_rollout( # If using right padding source + left padding target, make padding positions # in source be 0, since reward model use position_ids plus with padding size # (number of 0s) in source to calculate end offsets. - position_ids = None # make_position_ids(attention_mask) + position_ids = make_position_ids(attention_mask) # pipe model outputs a logits tensor with LMHead, while non-pipe model # outputs a tuple with logits tensor as the only one element. From b68cb0d5e75e53d204e848f45394905f2feb841d Mon Sep 17 00:00:00 2001 From: whucsgs Date: Tue, 7 May 2024 07:56:13 +0000 Subject: [PATCH 39/46] Offload sync and other data reuse fix. --- examples/RLHF/infer_utils.py | 66 ++++++++++- examples/RLHF/models/ppo_model_utils.py | 1 - examples/RLHF/ppo_trainer.py | 150 +++++++++++++++++------- examples/RLHF/trainer_utils.py | 6 +- 4 files changed, 175 insertions(+), 48 deletions(-) diff --git a/examples/RLHF/infer_utils.py b/examples/RLHF/infer_utils.py index d91141532411..7ea19b50bb11 100644 --- a/examples/RLHF/infer_utils.py +++ b/examples/RLHF/infer_utils.py @@ -77,8 +77,11 @@ def create_infer_model(model, dtype, set_state=False): config.use_cachekv_int8 = False config.single_card_ptq = True infer_model_cls = getattr(paddlenlp.experimental.transformers, model.__class__.__name__ + "InferenceModel") + # ori_init_weights = infer_model_cls.init_weights + # infer_model_cls.init_weights = lambda self: None with dtype_guard(dtype): infer_model = infer_model_cls(config) + # infer_model_cls.init_weights = ori_init_weights if set_state: state_dict = {} @@ -94,6 +97,7 @@ def create_infer_model(model, dtype, set_state=False): def _create_param(self, *args, **kwargs): param = ori_creat_param(self, *args, **kwargs) param._clear_data() + # param._clear() return param paddle.nn.Layer.create_parameter = _create_param @@ -101,6 +105,8 @@ def _create_param(self, *args, **kwargs): eval_model = getattr(trainer, "_inner_eval_model", None) infer_model = create_infer_model(trainer.model if eval_model is None else eval_model, dtype=trainer.amp_dtype) paddle.nn.Layer.create_parameter = ori_creat_param + # for k, v in infer_model.state_dict().items(): + # v._clear() # create predictor parser = PdArgumentParser((PredictorArgument,)) @@ -114,7 +120,7 @@ def _create_param(self, *args, **kwargs): "batch_size": trainer.args.per_device_train_batch_size, # infer model do not support top_k, and differ with non-infer model # generation which gets default top_K=50 using generation_config.top_k - "top_p": 0.0, + "top_p": 0.8, # trainer.args.top_p, "temperature": trainer.args.temperature, "repetition_penalty": trainer.args.repetition_penalty, @@ -154,6 +160,7 @@ def disable(self, model, onload_model=True): # clear params for _, param in self.model.state_dict().items(): param._clear_data() + # param._clear() if onload_model: model.to(paddle.device.get_device()) self.is_available = False @@ -175,21 +182,63 @@ def set_state_dict(self, model, offload_model=True): if getattr(self, "_weights_mapping", None) is None: self._weights_mapping = self.model.get_weights_mapping() + # non_share_params = [] for k, v in self._weights_mapping.items(): param, (convert_fun, args) = k, v args = [state_dict[name] for name in args] value = convert_fun(*args) + # non_share_params = [] + # for arg in args: + # # shared params no need to offload + # if value is not arg: + # non_share_params.append(arg) + # print("=" * 20, "name", v[1], + # [(arg.shape, arg.place, arg.dtype) for arg in args], + # value.shape, value.place, value.dtype, + # isinstance(value.place, paddle.CUDAPlace), + # value.place.is_gpu_place()) if offload_model: for arg in args: # shared params no need to offload if value is not arg: - arg.to(offload_place, blocking=False) + # arg.to(offload_place, blocking=True) + # cpu_arg = arg.pin_memory() + cpu_arg = arg._copy_to(offload_place, blocking=False) + cpu_arg._share_buffer_to(arg) + # print("=" * 20, "not share param name", v[1], + # value.place.is_gpu_place(), + # value._is_initialized(), param._is_initialized()) + # else: + # print("=" * 20, "share param name", v[1], + # value.place.is_gpu_place(), + # value._is_initialized(), param._is_initialized()) + # print("=" * 20, "name", v[1], + # [(arg.shape, arg.place, arg.dtype) for arg in args], + # value.shape, value.place, value.dtype, + # isinstance(value.place, paddle.CUDAPlace), + # value.place.is_gpu_place()) if not isinstance(value, paddle.Tensor): param.set_value(value) - elif isinstance(value.place, paddle.CUDAPlace): - value._share_buffer_to(param) + # elif isinstance(value.place, paddle.CUDAPlace): + elif value.place.is_gpu_place(): + # NOTE: _share_buffer_to seems do not work + # value._share_buffer_to(param) + # value._share_underline_tensor_to(param) + param.get_tensor()._share_data_with(value.get_tensor()) else: - param.copy_(value, False) + param.copy_(value, True) + # if offload_model: + # if value is not args[0]: + # value._clear_data() + # else: + # value.to(offload_place, blocking=True) + + # if offload_model: + # for param in non_share_params: + # param.to(offload_place, blocking=True) + # if offload_model: + # for param in non_share_params: + # param.to(offload_place, blocking=False) paddle.device.cuda.synchronize() def _preprocess(self, source): @@ -256,7 +305,9 @@ def infer_guard(trainer, offload_model=True): if policy_predictor is None: policy_predictor = Predictor.create_predictor(trainer) if not policy_predictor.is_available: + # print("="*20, "enable predictor begin") policy_predictor.enable(model, offload_model=offload_model) + # print("="*20, "enable predictor end") # TODO(guosheng): patch for dist.all_recude to use tp group, fix it later ori_all_reduce = dist.all_reduce @@ -291,15 +342,19 @@ def enable(self): self.model, with_offload="train_model" in trainer.args.offload_level, ) + # print("=" * 20, "enable export_evaluate_model") else: reload_tensor_to_gpu(self.model.state_dict()) + # print("=" * 20, "enable reload_tensor_to_gpu") def disable(self): trainer = self.trainer if trainer.model is not self.model: cleanup_tensor_space(self.model.state_dict()) + # print("=" * 20, "disable cleanup_tensor_space") else: offload_tensor_to_cpu(self.model.state_dict()) + # print("=" * 20, "disable offload_tensor_to_cpu") def __getattr__(self, name): try: @@ -353,6 +408,7 @@ def generate(self, *args, **kwargs): # cache) policy_predictor.input_length = input_ids.shape[-1] outputs = policy_predictor.predict(prompts) + print("=" * 20, "output shape", outputs[0].shape) if generation_config.trunc_input: outputs = (outputs[0][:, policy_predictor.infer_input_length :],) diff --git a/examples/RLHF/models/ppo_model_utils.py b/examples/RLHF/models/ppo_model_utils.py index e7146adaa9f7..5fa1b4e1a6ed 100644 --- a/examples/RLHF/models/ppo_model_utils.py +++ b/examples/RLHF/models/ppo_model_utils.py @@ -95,7 +95,6 @@ def create_loss(loss_cls, config, extra_args, merge_labels=None): # create from TrainingArguments loss_kwargs = dict([(name, getattr(extra_args, name)) for name in loss_arg_names if hasattr(extra_args, name)]) loss = loss_cls(config, **loss_kwargs) - loss_cls.forward = ori_fwd return loss diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 483066974c54..c1fe67e6cf9d 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -17,6 +17,7 @@ import math import os import time +import types from typing import Any, Callable, Dict, List, Optional, Tuple, Union import paddle @@ -61,6 +62,7 @@ Trainer, TrainerCallback, TrainingArguments, + check_memory_usage, logger, speed_metrics, ) @@ -114,7 +116,7 @@ def __init__( if getattr(self, "loss_cls", None) and self.criterion is None: self.criterion = self.create_criterion() - self.use_fusemt = getattr(args, "use_fusemt", False) + self.use_fusemt = getattr(args, "use_fusemt", True) # ablout 4s slower than infer generation without ema self.use_ema = getattr(args, "use_ema", False) self.shard_ema = getattr(args, "shard_ema", False) @@ -533,6 +535,11 @@ def __enter__(self): obj.enable() else: reload_tensor_to_gpu(obj.state_dict()) + # print("=" * 20, "enable reload_tensor_to_gpu") + # offload_tensor_to_cpu/reload_tensor_to_gpu use non-blocking copy + if len(self.objs) > 0: + paddle.device.cuda.synchronize() + check_memory_usage("enable memory") def __exit__(self, *args): for obj in self.objs: @@ -540,6 +547,11 @@ def __exit__(self, *args): obj.disable() else: offload_tensor_to_cpu(obj.state_dict()) + # print("=" * 20, "disable offload_tensor_to_cpu") + # offload_tensor_to_cpu/reload_tensor_to_gpu use non-blocking copy + if len(self.objs) > 0: + paddle.device.cuda.synchronize() + check_memory_usage("disable memory") class PolicyTrainer(StepTrainer): @@ -997,6 +1009,7 @@ def gen_epoch_data(): ): # generate batches self.set_eval() + # print("=" * 20, "gen data begin") # self.optimizer.offload() # if self.args.eval_mode is not None and "optimizer" in self.args.offload_level: @@ -1042,7 +1055,9 @@ def gen_epoch_data(): # prompt_only_batch = data_group_split(prompt_only_batch, group=gp) # self.timers and self.timers("resplit-data").stop() # self.timers and self.timers("split-rl-micro-batches").start() + check_memory_usage("split_rl_micro_batches begin memory") rl_batches = self.split_rl_micro_batches(prompt_only_batch) + check_memory_usage("clear_cache split_rl_micro_batches memory") # self.timers and self.timers("split-rl-micro-batches").stop() # self.timers and self.timers("merge-data").start() # if gp is not None: @@ -1062,7 +1077,9 @@ def gen_epoch_data(): ptx_batches = [None for _ in range(len(rl_batches))] self.timers and self.timers("ptx-batch").stop() + check_memory_usage("gen data memory") paddle.device.cuda.empty_cache() + check_memory_usage("clear_cache gen data memory") # if self.args.offload_level is not None: # if self.args.eval_mode is not None and "eval" in self.args.offload_level: @@ -1212,6 +1229,28 @@ def train( self._globalstep_last_start_time = start_time # self.timers and self.timers("read-data").start() + policy_opt = self.policy_trainer.optimizer._inner_opt._inner_opt._create_accumulators + + def _policy_opt(self, block, parameters): + check_memory_usage("before policy_trainer create accumulators") + policy_opt(block, parameters) + check_memory_usage("after policy_trainer create accumulators") + + self.policy_trainer.optimizer._inner_opt._inner_opt._create_accumulators = types.MethodType( + _policy_opt, self.policy_trainer.optimizer._inner_opt._inner_opt + ) + + value_opt = self.value_trainer.optimizer._inner_opt._inner_opt._create_accumulators + + def _value_opt(self, block, parameters): + check_memory_usage("before value_trainer create accumulators") + value_opt(block, parameters) + check_memory_usage("after value_trainer create accumulators") + + self.value_trainer.optimizer._inner_opt._inner_opt._create_accumulators = types.MethodType( + _value_opt, self.value_trainer.optimizer._inner_opt._inner_opt + ) + for epoch in range(epochs_trained, num_train_epochs): if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( train_dataloader.batch_sampler, DistributedBatchSampler @@ -1236,23 +1275,31 @@ def train( logger.info("Doing rl step...") self.timers and self.timers("rl_step").start() - with self.enable(self.value_trainer.optimizer): - rl_info = self.rl_step(rl_batch) - paddle.device.cuda.empty_cache() - self.timers and self.timers("rl_step").stop() - - # if "optimizer" in self.args.offload_level: - # self.timers and self.timers("offload-value-optimizer").start() - # offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) - # self.timers and self.timers("offload-value-optimizer").stop() - - if self.use_ptx: - logger.info("Doing ptx step...") - self.timers and self.timers("ptx_step").start() - ptx_info = self.ptx_step(ptx_batch) - rl_info.update(ptx_info) + check_memory_usage("startup memory") + with self.enable(self.policy_trainer.optimizer): + # with self.enable(self.value_trainer.optimizer): + with self.enable(): + check_memory_usage("startup enable memory") + rl_info = self.rl_step(rl_batch) + check_memory_usage("rl_step end memory") paddle.device.cuda.empty_cache() - self.timers and self.timers("ptx_step").stop() + check_memory_usage("clear_cache rl_step memory") + self.timers and self.timers("rl_step").stop() + + # if "optimizer" in self.args.offload_level: + # self.timers and self.timers("offload-value-optimizer").start() + # offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) + # self.timers and self.timers("offload-value-optimizer").stop() + + if self.use_ptx: + logger.info("Doing ptx step...") + self.timers and self.timers("ptx_step").start() + ptx_info = self.ptx_step(ptx_batch) + rl_info.update(ptx_info) + self.timers and self.timers("ptx_step").stop() + check_memory_usage("ptx_step end memory") + paddle.device.cuda.empty_cache() + check_memory_usage("clear_cache ptx_step memory") # if "optimizer" in self.args.offload_level: # self.timers and self.timers("offload-policy-optimizer").start() @@ -1266,9 +1313,11 @@ def train( rl_info = metric.update(rl_info) # on_step_end self.control = self.callback_handler.on_step_end(args, self.state, self.control) + # print("="*20, "step end") else: # on_sub_step_end self.control = self.callback_handler.on_substep_end(args, self.state, self.control) + # print("=" * 20, "sub step end") self._maybe_log_save_evaluate(rl_info, None, epoch, ignore_keys_for_eval, inputs=inputs) self._print_timer() @@ -1402,33 +1451,35 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: old_reward_values = rl_batch["reward_values"] # length: src+tgt(-1) reward_returns = rl_batch["reward_returns"] # length: src+tgt(-1) - policy_trainer_inputs = { + value_trainer_inputs = { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, - "old_log_probs": old_log_probs, - "reward_advantages": reward_advantages, + "old_reward_values": old_reward_values, + "reward_returns": reward_returns, "sequence_mask": sequence_mask, } - actor_loss = self.policy_trainer.full_training_step(**policy_trainer_inputs) + with self.enable(self.value_trainer.optimizer): + reward_critic_loss = self.value_trainer.full_training_step(**value_trainer_inputs) - value_trainer_inputs = { + policy_trainer_inputs = { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, - "old_reward_values": old_reward_values, - "reward_returns": reward_returns, + "old_log_probs": old_log_probs, + "reward_advantages": reward_advantages, "sequence_mask": sequence_mask, } - reward_critic_loss = self.value_trainer.full_training_step(**value_trainer_inputs) + actor_loss = self.policy_trainer.full_training_step(**policy_trainer_inputs) # metric - rewards = rl_batch["rewards"] - rewards = rewards.mean() - ref_log_probs = rl_batch["ref_log_probs"] - kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask).sum(axis=-1).mean() - mean_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).mean() - max_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).max() + with paddle.no_grad(): + rewards = rl_batch["rewards"] + rewards = rewards.mean() + ref_log_probs = rl_batch["ref_log_probs"] + kl_divergence = ((old_log_probs - ref_log_probs) * sequence_mask).sum(axis=-1).mean() + mean_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).mean() + max_generated_length = sequence_mask.cast(paddle.float32).sum(axis=-1).max() return { # when using PipelienParallel, the loss returned is 0 when not reach @@ -1453,6 +1504,8 @@ def ptx_step(self, ptx_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: } def enable(self, *args): + # note: must keep the same model since actor_model, reward_model etc. + # are property enable_map = { # maybe use `model: (pattern, enable_method, disable_method)`` self.actor_model: "eval", @@ -1462,7 +1515,15 @@ def enable(self, *args): self.policy_trainer.optimizer: "optimizer", self.value_trainer.optimizer: "optimizer", } + # if use an extra eval model to do eval/generation, switch on actor_model + # and reward_critic_model; otherwise no need to switch + if getattr(self.policy_trainer, "_inner_eval_model", None) is not None: + enable_map.pop(self.actor_model) + if getattr(self.value_trainer, "_inner_eval_model", None) is not None: + enable_map.pop(self.reward_critic_model) objs = [arg for arg in args if enable_map.get(arg, "") in self.args.offload_level] + # print("=" * 20, "enable", self.args.offload_level, len(objs), + # [arg for arg in args if enable_map.get(arg, "")]) return enable(*objs) def split_ptx_micro_batches( @@ -1493,6 +1554,7 @@ def _impl(self, data): return _impl + @paddle.no_grad() @data_dispatch def split_rl_micro_batches( self, @@ -1507,15 +1569,20 @@ def split_rl_micro_batches( # 1. scope guard for offload, we would split post_rollout into multiple # sub-methods to offload in-time # 2. decorate split_rl_micro_batches to automatically split/merge data - with self.enable(self.actor_model, self.reference_model), infer_guard(self.policy_trainer): + with self.enable(self.actor_model, self.reference_model): # generate for multi batches and then disable FuseMT model - for i in range(0, total_batch_size, micro_batch_size): - micro_batch = {} - micro_batch = map_structure( - lambda tensor: tensor[i : i + micro_batch_size], - prompt_only_batch, - ) - micro_batches.extend(self.generate(micro_batch)) + with infer_guard(self.policy_trainer): + # dist.barrier() + # print("="*20, "begin generate") + for i in range(0, total_batch_size, micro_batch_size): + micro_batch = {} + micro_batch = map_structure( + lambda tensor: tensor[i : i + micro_batch_size], + prompt_only_batch, + ) + micro_batches.extend(self.generate(micro_batch)) + # dist.barrier() + # paddle.device.cuda.synchronize() # get log_probs for multi batches and then disable actor/refer rmodel for micro_batch in micro_batches: # position_ids is necessary for non-right padding @@ -1524,6 +1591,7 @@ def split_rl_micro_batches( # (number of 0s) in source to calculate end offsets. micro_batch["position_ids"] = make_position_ids(micro_batch["attention_mask"]) micro_batch.update(self.rollout_logprob(**micro_batch)) + # print("="*20, "micro_batch", micro_batch) # get reward/value for multi batches and then disable reward/value model with self.enable(self.reward_critic_model, self.reward_model): @@ -1612,8 +1680,8 @@ def rollout_reward_value( reward_attention_mask = reward_tokenize_output["attention_mask"] reward_position_ids = make_position_ids(reward_attention_mask) else: - # for text in self.tokenizer.batch_decode(sequence, skip_special_tokens=True): - # print(text) + for text in self.tokenizer.batch_decode(input_ids, skip_special_tokens=True): + print(text) reward_input_ids = input_ids reward_attention_mask = attention_mask reward_position_ids = position_ids diff --git a/examples/RLHF/trainer_utils.py b/examples/RLHF/trainer_utils.py index a8858cf0b85a..51ee1017be42 100644 --- a/examples/RLHF/trainer_utils.py +++ b/examples/RLHF/trainer_utils.py @@ -14,6 +14,7 @@ import inspect import os +import time from contextlib import contextmanager from typing import Dict @@ -235,6 +236,9 @@ def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs ignore_keys_for_eval = kwargs.get("ignore_keys_for_eval", None) tr_loss = kwargs.get("tr_loss", 0.0) model = kwargs.get("model", self.model_wrapped) + # needed in _maybe_log_save_evaluate + self._globalstep_last_logged = getattr(self, "_globalstep_last_logged", 0) + self._globalstep_last_start_time = getattr(self, "_globalstep_last_start_time", time.time()) args = self.args @@ -397,7 +401,7 @@ def full_training_step(self: Trainer, inputs: Dict[str, paddle.Tensor], **kwargs for buffer in buffers: buffer._clear_grad_storage() else: - self.optimizer.clear_grad() + self.optimizer.clear_grad(set_to_zero=False) self.callback_handler.on_optimizer_end( args, self.state, self.control, scaler=self.scaler if self.do_grad_scaling else None From 5e46ab652e3e3a6435913c2714e28832cae22d86 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Thu, 9 May 2024 02:30:38 +0000 Subject: [PATCH 40/46] Clead code --- examples/RLHF/infer_utils.py | 44 +----------- examples/RLHF/ppo_trainer.py | 129 +---------------------------------- 2 files changed, 3 insertions(+), 170 deletions(-) diff --git a/examples/RLHF/infer_utils.py b/examples/RLHF/infer_utils.py index 7ea19b50bb11..2eecfb09bb38 100644 --- a/examples/RLHF/infer_utils.py +++ b/examples/RLHF/infer_utils.py @@ -187,36 +187,12 @@ def set_state_dict(self, model, offload_model=True): param, (convert_fun, args) = k, v args = [state_dict[name] for name in args] value = convert_fun(*args) - # non_share_params = [] - # for arg in args: - # # shared params no need to offload - # if value is not arg: - # non_share_params.append(arg) - # print("=" * 20, "name", v[1], - # [(arg.shape, arg.place, arg.dtype) for arg in args], - # value.shape, value.place, value.dtype, - # isinstance(value.place, paddle.CUDAPlace), - # value.place.is_gpu_place()) if offload_model: for arg in args: # shared params no need to offload if value is not arg: - # arg.to(offload_place, blocking=True) - # cpu_arg = arg.pin_memory() cpu_arg = arg._copy_to(offload_place, blocking=False) cpu_arg._share_buffer_to(arg) - # print("=" * 20, "not share param name", v[1], - # value.place.is_gpu_place(), - # value._is_initialized(), param._is_initialized()) - # else: - # print("=" * 20, "share param name", v[1], - # value.place.is_gpu_place(), - # value._is_initialized(), param._is_initialized()) - # print("=" * 20, "name", v[1], - # [(arg.shape, arg.place, arg.dtype) for arg in args], - # value.shape, value.place, value.dtype, - # isinstance(value.place, paddle.CUDAPlace), - # value.place.is_gpu_place()) if not isinstance(value, paddle.Tensor): param.set_value(value) # elif isinstance(value.place, paddle.CUDAPlace): @@ -227,18 +203,7 @@ def set_state_dict(self, model, offload_model=True): param.get_tensor()._share_data_with(value.get_tensor()) else: param.copy_(value, True) - # if offload_model: - # if value is not args[0]: - # value._clear_data() - # else: - # value.to(offload_place, blocking=True) - - # if offload_model: - # for param in non_share_params: - # param.to(offload_place, blocking=True) - # if offload_model: - # for param in non_share_params: - # param.to(offload_place, blocking=False) + paddle.device.cuda.synchronize() def _preprocess(self, source): @@ -305,9 +270,7 @@ def infer_guard(trainer, offload_model=True): if policy_predictor is None: policy_predictor = Predictor.create_predictor(trainer) if not policy_predictor.is_available: - # print("="*20, "enable predictor begin") policy_predictor.enable(model, offload_model=offload_model) - # print("="*20, "enable predictor end") # TODO(guosheng): patch for dist.all_recude to use tp group, fix it later ori_all_reduce = dist.all_reduce @@ -342,19 +305,15 @@ def enable(self): self.model, with_offload="train_model" in trainer.args.offload_level, ) - # print("=" * 20, "enable export_evaluate_model") else: reload_tensor_to_gpu(self.model.state_dict()) - # print("=" * 20, "enable reload_tensor_to_gpu") def disable(self): trainer = self.trainer if trainer.model is not self.model: cleanup_tensor_space(self.model.state_dict()) - # print("=" * 20, "disable cleanup_tensor_space") else: offload_tensor_to_cpu(self.model.state_dict()) - # print("=" * 20, "disable offload_tensor_to_cpu") def __getattr__(self, name): try: @@ -408,7 +367,6 @@ def generate(self, *args, **kwargs): # cache) policy_predictor.input_length = input_ids.shape[-1] outputs = policy_predictor.predict(prompts) - print("=" * 20, "output shape", outputs[0].shape) if generation_config.trunc_input: outputs = (outputs[0][:, policy_predictor.infer_input_length :],) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index c1fe67e6cf9d..fd1d8c33c5af 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -17,7 +17,6 @@ import math import os import time -import types from typing import Any, Callable, Dict, List, Optional, Tuple, Union import paddle @@ -62,7 +61,6 @@ Trainer, TrainerCallback, TrainingArguments, - check_memory_usage, logger, speed_metrics, ) @@ -535,11 +533,9 @@ def __enter__(self): obj.enable() else: reload_tensor_to_gpu(obj.state_dict()) - # print("=" * 20, "enable reload_tensor_to_gpu") # offload_tensor_to_cpu/reload_tensor_to_gpu use non-blocking copy if len(self.objs) > 0: paddle.device.cuda.synchronize() - check_memory_usage("enable memory") def __exit__(self, *args): for obj in self.objs: @@ -547,11 +543,9 @@ def __exit__(self, *args): obj.disable() else: offload_tensor_to_cpu(obj.state_dict()) - # print("=" * 20, "disable offload_tensor_to_cpu") # offload_tensor_to_cpu/reload_tensor_to_gpu use non-blocking copy if len(self.objs) > 0: paddle.device.cuda.synchronize() - check_memory_usage("disable memory") class PolicyTrainer(StepTrainer): @@ -1009,66 +1003,9 @@ def gen_epoch_data(): ): # generate batches self.set_eval() - # print("=" * 20, "gen data begin") - - # self.optimizer.offload() - # if self.args.eval_mode is not None and "optimizer" in self.args.offload_level: - # self.timers and self.timers("offload-optimizer").start() - # offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) - # offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) - # self.timers and self.timers("offload-optimizer").stop() with ema(self.policy_trainer), ema(self.value_trainer): - # if self.args.eval_mode is not None: - # self.timers and self.timers("export-evaluate-model").start() - - # self.policy_trainer.export_evaluate_model( - # self.policy_trainer.model, - # self._policy_model_eval, - # with_offload="train_model" in self.args.offload_level, - # ) - # self.value_trainer.export_evaluate_model( - # self.value_trainer.model, - # self._value_model_eval, - # with_offload="train_model" in self.args.offload_level, - # ) - # gp = ( - # self.policy_trainer._policy_model_eval_group - # if hasattr(self.policy_trainer, "_policy_model_eval_group") - # else None - # ) - # gp = getattr(self.policy_trainer, "_data_trans_group", None) - # gp = create_data_trans_group(self.args.logical_process_index, paddle.distributed.get_rank(), self._policy_model_eval.config.tensor_parallel_degree) - # # todo: zhui - # self.timers and self.timers("export-evaluate-model").stop() - - # self.reference_model.reload() - # self.reward_model.reload() - # if "reward" in self.args.offload_level: - # self.timers and self.timers("reload-reward").start() - # reload_tensor_to_gpu(self.reference_model.state_dict()) - # reload_tensor_to_gpu(self.reward_model.state_dict()) - # self.timers and self.timers("reload-reward").stop() - - # TODO(guosheng): guard for data split/merge - # self.timers and self.timers("resplit-data").start() - # prompt_only_batch = data_group_split(prompt_only_batch, group=gp) - # self.timers and self.timers("resplit-data").stop() - # self.timers and self.timers("split-rl-micro-batches").start() - check_memory_usage("split_rl_micro_batches begin memory") rl_batches = self.split_rl_micro_batches(prompt_only_batch) - check_memory_usage("clear_cache split_rl_micro_batches memory") - # self.timers and self.timers("split-rl-micro-batches").stop() - # self.timers and self.timers("merge-data").start() - # if gp is not None: - # # input_ids_length = rl_batches[0]["input_ids"].shape[-1] - # # rl_batches[0]["input_ids_length"] = paddle.to_tensor( - # # [input_ids_length] * rl_batches[0]["input_ids"].shape[0], dtype="int64" - # # ) - # rl_batches = data_group_merge(rl_batches, group=gp) - # # input_ids_length_batchs = rl_batches[0].pop("input_ids_length") - # # rl_batches[0] = repad_rl_batches(rl_batches[0], input_ids_length_batchs) - # self.timers and self.timers("merge-data").stop() self.timers and self.timers("ptx-batch").start() if self.use_ptx: @@ -1077,21 +1014,7 @@ def gen_epoch_data(): ptx_batches = [None for _ in range(len(rl_batches))] self.timers and self.timers("ptx-batch").stop() - check_memory_usage("gen data memory") paddle.device.cuda.empty_cache() - check_memory_usage("clear_cache gen data memory") - - # if self.args.offload_level is not None: - # if self.args.eval_mode is not None and "eval" in self.args.offload_level: - # self.timers and self.timers("offload-eval").start() - # cleanup_tensor_space(self._policy_model_eval.state_dict()) - # cleanup_tensor_space(self._value_model_eval.state_dict()) - # self.timers and self.timers("offload-eval").stop() - # if "reward" in self.args.offload_level: - # self.timers and self.timers("offload-reward").start() - # offload_tensor_to_cpu(self.reference_model.state_dict()) - # offload_tensor_to_cpu(self.reward_model.state_dict()) - # self.timers and self.timers("offload-reward").stop() self.set_train() for _ in range(self.args.update_iters): @@ -1229,28 +1152,6 @@ def train( self._globalstep_last_start_time = start_time # self.timers and self.timers("read-data").start() - policy_opt = self.policy_trainer.optimizer._inner_opt._inner_opt._create_accumulators - - def _policy_opt(self, block, parameters): - check_memory_usage("before policy_trainer create accumulators") - policy_opt(block, parameters) - check_memory_usage("after policy_trainer create accumulators") - - self.policy_trainer.optimizer._inner_opt._inner_opt._create_accumulators = types.MethodType( - _policy_opt, self.policy_trainer.optimizer._inner_opt._inner_opt - ) - - value_opt = self.value_trainer.optimizer._inner_opt._inner_opt._create_accumulators - - def _value_opt(self, block, parameters): - check_memory_usage("before value_trainer create accumulators") - value_opt(block, parameters) - check_memory_usage("after value_trainer create accumulators") - - self.value_trainer.optimizer._inner_opt._inner_opt._create_accumulators = types.MethodType( - _value_opt, self.value_trainer.optimizer._inner_opt._inner_opt - ) - for epoch in range(epochs_trained, num_train_epochs): if isinstance(train_dataloader, paddle.io.DataLoader) and isinstance( train_dataloader.batch_sampler, DistributedBatchSampler @@ -1265,46 +1166,22 @@ def _value_opt(self, block, parameters): # self.callback_handler.on_load_data_end(args, self.state, self.control, inputs=inputs) rl_batch, ptx_batch = inputs # TODO(guosheng): make rl_step/ptx_step run with autocast_smart_context_manager - - # policy_model.reload() - # value_model.reload() - # self.timers and self.timers("offload-reload").start() - # reload_tensor_to_gpu(self.actor_model.state_dict()) - # reload_tensor_to_gpu(self.reward_critic_model.state_dict()) - # self.timers and self.timers("offload-reload").stop() - logger.info("Doing rl step...") self.timers and self.timers("rl_step").start() - check_memory_usage("startup memory") with self.enable(self.policy_trainer.optimizer): # with self.enable(self.value_trainer.optimizer): with self.enable(): - check_memory_usage("startup enable memory") rl_info = self.rl_step(rl_batch) - check_memory_usage("rl_step end memory") paddle.device.cuda.empty_cache() - check_memory_usage("clear_cache rl_step memory") self.timers and self.timers("rl_step").stop() - # if "optimizer" in self.args.offload_level: - # self.timers and self.timers("offload-value-optimizer").start() - # offload_tensor_to_cpu(self.value_trainer.optimizer.state_dict()) - # self.timers and self.timers("offload-value-optimizer").stop() - if self.use_ptx: logger.info("Doing ptx step...") self.timers and self.timers("ptx_step").start() ptx_info = self.ptx_step(ptx_batch) rl_info.update(ptx_info) self.timers and self.timers("ptx_step").stop() - check_memory_usage("ptx_step end memory") paddle.device.cuda.empty_cache() - check_memory_usage("clear_cache ptx_step memory") - - # if "optimizer" in self.args.offload_level: - # self.timers and self.timers("offload-policy-optimizer").start() - # offload_tensor_to_cpu(self.policy_trainer.optimizer.state_dict()) - # self.timers and self.timers("offload-policy-optimizer").stop() self.state.global_step += 1 self.state.epoch = epoch + (step + 1) / steps_in_epoch @@ -1313,11 +1190,9 @@ def _value_opt(self, block, parameters): rl_info = metric.update(rl_info) # on_step_end self.control = self.callback_handler.on_step_end(args, self.state, self.control) - # print("="*20, "step end") else: # on_sub_step_end self.control = self.callback_handler.on_substep_end(args, self.state, self.control) - # print("=" * 20, "sub step end") self._maybe_log_save_evaluate(rl_info, None, epoch, ignore_keys_for_eval, inputs=inputs) self._print_timer() @@ -1680,8 +1555,8 @@ def rollout_reward_value( reward_attention_mask = reward_tokenize_output["attention_mask"] reward_position_ids = make_position_ids(reward_attention_mask) else: - for text in self.tokenizer.batch_decode(input_ids, skip_special_tokens=True): - print(text) + # for text in self.tokenizer.batch_decode(input_ids, skip_special_tokens=True): + # print(text) reward_input_ids = input_ids reward_attention_mask = attention_mask reward_position_ids = position_ids From d5389175e0f94bbc68075eb70f224bf841cf68db Mon Sep 17 00:00:00 2001 From: whucsgs Date: Thu, 16 May 2024 10:57:34 +0000 Subject: [PATCH 41/46] Update README --- examples/RLHF/README.md | 18 ++++++++++++------ examples/RLHF/infer_utils.py | 3 +-- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/examples/RLHF/README.md b/examples/RLHF/README.md index 732a626061d6..86ad71c64e92 100644 --- a/examples/RLHF/README.md +++ b/examples/RLHF/README.md @@ -1,6 +1,6 @@ # RLHF PPO -提供了基于强化学习 PPO 算法对 LLM 进行人类偏好对齐的代码及完整使用示例。其中 PPO 代码实现细节参考了 [PKU-Alignment/safe-rlhf](https://github.com/PKU-Alignment/safe-rlhf)(PKU Beaver) 中的 PPO 实现,支持reward normalization、pretraining loss等常用的 PPO 稳定训练策略;示例使用 PKU-Alignment/safe-rlhf 提供的部分数据集和模型。后续将持续完善扩展,支持更好效果、更低成本、更高性能、更大规模的 RLHF 能力。 +提供了基于强化学习 PPO 算法对 LLM 进行人类偏好对齐的代码及完整使用示例,支持**3D 分布式并行训练以及 rollout 阶段使用预测优化进行生成加速**。其中 PPO 代码实现细节参考了 [PKU-Alignment/safe-rlhf](https://github.com/PKU-Alignment/safe-rlhf)(PKU Beaver) 中的 PPO 实现,支持reward normalization、pretraining loss等常用的 PPO 稳定训练策略;示例使用 PKU-Alignment/safe-rlhf 提供的部分数据集和模型。后续将持续完善扩展,支持更好效果、更低成本、更高性能、更大规模的 RLHF 能力。 ## 快速开始 @@ -14,6 +14,9 @@ ├── ppo_main.py # RLHF训练脚本 ├── ppo_config.json # RLHF训练配置文件 ├── ppo_trainer.py # RLHF训练执行器py脚本 +├── ppo_config.json # RLHF训练配置文件 +├── trainer_utils.py # Trainer补丁及工具py脚本 +├── infer_utils.py # 生成加速工具py脚本 ├── data # 数据集相关目录 │ └── base.py # 数据集基类及工具py文件 │ └── alpaca.py # alpaca(raw)数据集py文件 @@ -24,6 +27,10 @@ ├── models # 模型相关目录 │ └── score_model_utils.py # score model基类及工具py文件 │ └── score_model.py # score model模型定义py文件 +│ └── ppo_model_utils.py # PPO loss等模型策略py文件 +│ └── pp_model_utils.py # 流水线并行补丁及工具py文件 +│ └── model_pp.py # 流水线并行模型py文件 +│ └── infer_model_utils.py # 预测加速模型补丁及工具py文件 └── README.md ``` @@ -31,9 +38,9 @@ - Python >= 3.10 - PaddlePaddle >= 2.6.0 -- PaddleNLP >= 2.6.0 +- PaddleNLP 最新版本 -此外还需要安装以下依赖:`pip install rich` +如需使用生成加速功能,需要安装 [paddlenlp_ops](https://github.com/PaddlePaddle/PaddleNLP/tree/develop/csrc) ,请使用 `git clone https://github.com/PaddlePaddle/PaddleNLP.git` 克隆 PaddleNLP 代码库并且将 PaddleNLP/llm 目录的路径加入 PYTHONPATH(后续将进行完善)。安装 paddlenlp_ops 后训练时将直接开启生成加速(开启流水线并行时不支持生成加速),否则使用原生动态图进行生成。 ### 数据准备 @@ -184,7 +191,8 @@ python -u -m paddle.distributed.launch reward_main.py ./reward_config.json RLHF 阶段需要 actor model、reference model、critic model、reward model 四个模型;actor-model/reference-model 使用 SFT 模型进行 initialize/frozen;critic-model/reward-model 使用 reward 模型进行 initialize/frozen (另外注意若 SFT 使用 LoRA 请先将 LoRA 权重合并)。这里使用 PKU-Alignment/PKU-SafeRLHF 提供的 SFT 模型([PKU-Alignment/alpaca-7b-reproduced](https://huggingface.co/PKU-Alignment/alpaca-7b-reproduced))和 reward 模型([PKU-Alignment/beaver-7b-v1.0-reward](https://huggingface.co/PKU-Alignment/beaver-7b-v1.0-reward),注意该模型只关注 helpful 未考量 harmless)作为示例,使用 `ppo_main.py` 脚本根据 `ppo_config.json` 进行 RLHF 训练。 ``` -python -u -m paddle.distributed.launch ppo_main.py ./ppo_config.json +# 类型提升 warning 暂时通过 loglevel 屏蔽,待后续修复 +GLOG_minloglevel=2 python -u -m paddle.distributed.launch ppo_main.py ./ppo_config.json ``` `ppo_config.json` 中的绝大部分参数释义同[LLM 精调](https://github.com/PaddlePaddle/PaddleNLP/tree/develop/llm#2-%E7%B2%BE%E8%B0%83),不再赘述,重点给出以下参数配置及释义(使用 PKU-Alignment/PKU-SafeRLHF 中的默认值): @@ -210,8 +218,6 @@ python -u -m paddle.distributed.launch ppo_main.py ./ppo_config.json 另外所有 [`TrainingArguments` 支持参数配置](https://paddlenlp.readthedocs.io/zh/latest/trainer.html#trainingarguments)将为 actor-model 和 critic-model 的训练复用(如`sharding_stage`),除单独提供了 `critic_learning_rate/critic_weight_decay/critic_lr_scheduler_type/critic_warmup_ratio/critic_recompute` 这些参数支持为 critic-model 训练单独指定相应配置。actor-model 和 critic-model 的 checkpoints 将分别保存在 `outpt_dir` 所指定目录的 policy 和 value 文件夹下。 -当前示例中所用数据及规模 RLHF 训练基于 sharding stage3 使用 NVIDIA A100 80G 4卡/8卡训练验证。 - ### 推理 训练完成后可以直接使用 `outpt_dir` 所指定目录中 policy 文件夹下的 checkpoints 按照[LLM 推理](https://github.com/PaddlePaddle/PaddleNLP/tree/develop/llm#4-%E6%8E%A8%E7%90%86)部分的介绍来进行推理,请参考相应部分内容。 diff --git a/examples/RLHF/infer_utils.py b/examples/RLHF/infer_utils.py index 2eecfb09bb38..eefe7c22fb9f 100644 --- a/examples/RLHF/infer_utils.py +++ b/examples/RLHF/infer_utils.py @@ -120,8 +120,7 @@ def _create_param(self, *args, **kwargs): "batch_size": trainer.args.per_device_train_batch_size, # infer model do not support top_k, and differ with non-infer model # generation which gets default top_K=50 using generation_config.top_k - "top_p": 0.8, - # trainer.args.top_p, + "top_p": trainer.args.top_p, "temperature": trainer.args.temperature, "repetition_penalty": trainer.args.repetition_penalty, } From 1feb5ee9c5fe30a01f66c8045b553a09e0debb38 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Tue, 4 Jun 2024 12:39:29 +0000 Subject: [PATCH 42/46] Update ppo_trainer --- examples/RLHF/README.md | 2 +- examples/RLHF/infer_utils.py | 2 +- examples/RLHF/models/score_model.py | 2 + examples/RLHF/models/score_model_utils.py | 2 +- examples/RLHF/ppo_config.json | 8 +- examples/RLHF/ppo_main.py | 26 +++- examples/RLHF/ppo_trainer.py | 171 +++++++++++++--------- examples/RLHF/trainer_utils.py | 3 +- 8 files changed, 135 insertions(+), 81 deletions(-) diff --git a/examples/RLHF/README.md b/examples/RLHF/README.md index 71f8017c2034..478f8365833b 100644 --- a/examples/RLHF/README.md +++ b/examples/RLHF/README.md @@ -221,7 +221,7 @@ GLOG_minloglevel=2 python -u -m paddle.distributed.launch ppo_main.py ./ppo_conf 此外为了支持更高性、更大规模的 RLHF 训练提供了以下特殊参数配置,可以按需使用: - `use_fusemt`:安装 paddlenlp_ops 后将在 rollout 生成时开启生成加速(开启流水线并行时不支持生成加速),通过此设置可以禁用生成加速。 - `eval_mode`:支持为空或者设置为 "single"、"tensor_parallel";通常可以在使用流水线并行训练时设置为"tensor_parallel",以此在 rollout 生成阶段使用非流水线并行模型并进行生成加速。 -- `offload_level`:支持设置为"reward"、"optimizer"或者同时使用(空格分隔),用于在不同阶段 model/optimizer 使用结束后及时 offload 并在下次使用时 reload 相应参数权重以节省显存。 +- `offload_level`:支持设置为"freeze_model"、"optimizer"、"train_model"或者同时使用(空格分隔),分别指示 reward+reference 两个冻结模型、actor+critic 两个训练模型的优化器状态和模型参数的 offload/reload,用于在不同阶段 model/optimizer 使用结束后及时 offload 并在下次使用时 reload 相应参数权重以节省显存。 另外注意,在使用流水线并行时(pipeline_parallel_degree大于1)建议将 `dataloader_drop_last` 设置为 true, 以此避免不同batch size带来的问题。 diff --git a/examples/RLHF/infer_utils.py b/examples/RLHF/infer_utils.py index eefe7c22fb9f..b3979c65dfd7 100644 --- a/examples/RLHF/infer_utils.py +++ b/examples/RLHF/infer_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. - +from __future__ import annotations import copy import inspect import types diff --git a/examples/RLHF/models/score_model.py b/examples/RLHF/models/score_model.py index e67a4b036db4..aa0f50977945 100644 --- a/examples/RLHF/models/score_model.py +++ b/examples/RLHF/models/score_model.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import Any import paddle diff --git a/examples/RLHF/models/score_model_utils.py b/examples/RLHF/models/score_model_utils.py index 73dc551b28e3..5515d56fbc20 100644 --- a/examples/RLHF/models/score_model_utils.py +++ b/examples/RLHF/models/score_model_utils.py @@ -204,7 +204,7 @@ def get_score( end_score.append(scores[i, end_index]) # size = (D,) end_score = paddle.stack(end_score, axis=0) # size = (B, D) - if self.training: + if self.training and self.do_normalize: if dist.is_initialized(): gathered_end_score_list = [] diff --git a/examples/RLHF/ppo_config.json b/examples/RLHF/ppo_config.json index 68d28340b141..7bc5f88e515f 100644 --- a/examples/RLHF/ppo_config.json +++ b/examples/RLHF/ppo_config.json @@ -43,15 +43,15 @@ "do_eval": true, "disable_tqdm": true, "save_total_limit": 1, - "sharding_parallel_degree": 2, + "sharding_parallel_degree": 4, "sharding": "stage1", "tensor_parallel_degree": 2, - "pipeline_parallel_degree": 2, + "pipeline_parallel_degree": 1, "pipeline_parallel_config": "disable_p2p_cache_shape", "max_grad_norm": 1.0, "adam_beta1": 0.9, "adam_beta2": 0.95, "dataloader_drop_last": false, - "eval_mode": "tensor_parallel", - "offload_level": "reward" + "eval_mode": "", + "offload_level": "freeze_model" } diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index 6b1a1811e08a..1ff4cfe3059f 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -15,6 +15,7 @@ import copy import os import sys +import types # os.environ["http_proxy"] = "http://10.162.37.16:8128" # os.environ["https_proxy"] = "http://10.162.37.16:8128" @@ -37,6 +38,7 @@ from dataclasses import dataclass, field from typing import Any, Dict, Tuple +from functools import partial import paddle from data import PromptOnlyDataset, SupervisedDataset, parse_dataset @@ -82,8 +84,8 @@ class TrainingArguments(TrainingArguments): default=0.0, metadata={"help": "The coefficient for the ptx loss."}, ) - update_iters: float = field( - default=0.0, + update_iters: int = field( + default=1, metadata={"help": "The number of repeated updates on a generated batch."}, ) critic_learning_rate: float = field( @@ -114,12 +116,12 @@ class TrainingArguments(TrainingArguments): default=1.0, metadata={"help": "The value used to module the next token probabilities."}, ) - top_k: int = field( - default=1, - metadata={"help": "top_k"}, - ) + # top_k: int = field( + # default=1, + # metadata={"help": "top_k"}, + # ) top_p: float = field( - default=1.0, + default=0.8, metadata={ "help": "If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to`top_p` or higher are kept for generation." }, @@ -147,6 +149,12 @@ class TrainingArguments(TrainingArguments): default="", metadata={"help": "Offload model, optional for: eval, reward, optimizer, train_model"}, ) + use_fusemt: bool = field( + default=True, + metadata={ + "help": "use inference model to speedup in rollout generation" + }, + ) # save_generation_output: bool = field( # default=False, @@ -475,6 +483,10 @@ def main(): if data_args.ptx_datasets is not None else None ) + if ptx_ds is not None: + # PretrainingCriterion requires shifted inputs and labels + ptx_ds.get_collator = types.MethodType( + partial(ptx_ds.get_collator.__func__, shift=True), ptx_ds) # offload # cleanup actor_eval_model, reward_critic_eval_model diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 59e5fb0040b3..3059537df61e 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -16,6 +16,7 @@ import itertools import math import os +import sys import time from typing import Any, Callable, Dict, List, Optional, Tuple, Union @@ -30,7 +31,6 @@ offload_tensor_to_cpu, reload_tensor_to_gpu, ) -from data import DummyDataset, PromptOnlyBatch from infer_utils import InferEvalModel, infer_guard from models.ppo_model_utils import ( RLHFPPOMixedLoss, @@ -114,7 +114,7 @@ def __init__( if getattr(self, "loss_cls", None) and self.criterion is None: self.criterion = self.create_criterion() - self.use_fusemt = getattr(args, "use_fusemt", True) + self.use_fusemt = getattr(args, "use_fusemt", False) # ablout 4s slower than infer generation without ema self.use_ema = getattr(args, "use_ema", False) self.shard_ema = getattr(args, "shard_ema", False) @@ -534,8 +534,9 @@ def __enter__(self): else: reload_tensor_to_gpu(obj.state_dict()) # offload_tensor_to_cpu/reload_tensor_to_gpu use non-blocking copy + # maybe overlap with compute later if len(self.objs) > 0: - paddle.device.cuda.synchronize() + paddle.device.synchronize() def __exit__(self, *args): for obj in self.objs: @@ -544,8 +545,9 @@ def __exit__(self, *args): else: offload_tensor_to_cpu(obj.state_dict()) # offload_tensor_to_cpu/reload_tensor_to_gpu use non-blocking copy + # maybe overlap with compute later if len(self.objs) > 0: - paddle.device.cuda.synchronize() + paddle.device.synchronize() class PolicyTrainer(StepTrainer): @@ -633,6 +635,17 @@ def update(self, metrics: Dict[str, paddle.Tensor]) -> Union[None, Dict[str, flo return out_metrics +def data_dispatch(fun): + def _impl(self, data): + gp = getattr(self.policy_trainer, "_data_trans_group", None) + data = data_group_split(data, group=gp) + data = fun(self, data) + data = data_group_merge(data, group=gp) + return data + + return _impl + + class PPOTrainer(Trainer): def __init__( self, @@ -679,6 +692,7 @@ def __init__( self.eval_dataset = eval_dataset (policy_model, reference_model, reward_model, value_model, policy_model_eval, value_model_eval) = model + self._model_config = policy_model.config # use this to change flash attention dynamicly self._policy_model_eval = policy_model_eval self._value_model_eval = value_model_eval @@ -864,9 +878,9 @@ def prediction_step( seq = self.actor_model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], - # position_ids=inputs["position_ids"] - # if "position_ids" in inputs - # else make_position_ids(inputs["attention_mask"]), + position_ids=inputs["position_ids"] + if "position_ids" in inputs + else make_position_ids(inputs["attention_mask"]), generation_config=self.generation_config, synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, )[0] @@ -991,15 +1005,11 @@ def init_train_model_opt( return policy_model, value_model def get_epoch_iterator(self): - # TODO(guosheng): support iter dataset - num_prompt_only_batches = len(self.prompt_only_dataloader) - num_ptx_batches = len(self.ptx_dataloader) - num_ptx_replicas = (num_prompt_only_batches + num_ptx_batches - 1) // num_ptx_batches def gen_epoch_data(): for prompt_only_batch, ptx_batch in zip( - self.prompt_only_dataloader, - itertools.chain.from_iterable([self.ptx_dataloader] * num_ptx_replicas), + self.prompt_only_dataloader, + itertools.cycle(self.ptx_dataloader), ): # generate batches self.set_eval() @@ -1025,31 +1035,44 @@ class EpochIterator: def __iter__(self): return gen_epoch_data() + def __len__(self): + return len(self.prompt_only_dataloader) * ( + self.args.update_iters * + self.args.per_device_prompt_batch_size * + self.args.num_return_sequences // + self.args.per_device_train_batch_size) + return EpochIterator() def init_train_num(self: Trainer, train_dataloader: DataLoader): args = self.args total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.dataset_world_size - - len_dataloader = len(train_dataloader) - num_train_sub_steps = ( - len_dataloader - * self.args.update_iters - * self.args.per_device_prompt_batch_size - * self.args.num_return_sequences - // self.args.per_device_train_batch_size - ) - num_update_steps_per_epoch = num_train_sub_steps // args.gradient_accumulation_steps - if args.max_steps > 0: - max_steps = args.max_steps - num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( - args.max_steps % num_update_steps_per_epoch > 0 - ) + len_dataloader = None + if not self._is_iterable_dataset(self.train_dataset): + len_dataloader = len(train_dataloader) + num_train_sub_steps = (len_dataloader * self.args.update_iters * + self.args.per_device_prompt_batch_size * + self.args.num_return_sequences // + self.args.per_device_train_batch_size) + num_update_steps_per_epoch = num_train_sub_steps // args.gradient_accumulation_steps + num_examples = len(self.train_dataset) + if args.max_steps > 0: + max_steps = args.max_steps + num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( + args.max_steps % num_update_steps_per_epoch > 0 + ) + else: + max_steps = int(num_update_steps_per_epoch * args.num_train_epochs) + num_train_epochs = math.ceil(args.num_train_epochs) + num_train_samples = total_train_batch_size * max_steps else: - max_steps = int(num_update_steps_per_epoch * args.num_train_epochs) - num_train_epochs = math.ceil(args.num_train_epochs) - num_examples = num_train_samples = total_train_batch_size * max_steps + assert args.max_steps > 0 + max_steps = args.max_steps + num_train_epochs = sys.maxsize + num_update_steps_per_epoch = args.max_steps + num_examples = total_train_batch_size * args.max_steps + num_train_samples = args.max_steps * total_train_batch_size return ( total_train_batch_size, @@ -1093,17 +1116,21 @@ def train( if self.use_ptx: with guard_set_args( - args, + args, { - "per_device_train_batch_size": self.args.per_device_prompt_batch_size - * self.args.num_return_sequences + "per_device_train_batch_size": + 1 if getattr(self.ptx_dataset, "is_intokens", False) else + self.args.per_device_prompt_batch_size * + self.args.num_return_sequences }, ), guard_set_args( - self, {"train_dataset": self.ptx_dataset, "data_collator": self.ptx_dataset.get_collator(shift=True)} - ): + self, { + "train_dataset": self.ptx_dataset, + "data_collator": self.ptx_dataset.get_collator() + }): self.ptx_dataloader = self.get_train_dataloader() else: - self.ptx_dataloader = DataLoader(DummyDataset(len(self.prompt_only_dataloader))) + self.ptx_dataloader = range(100) ( total_train_batch_size, len_dataloader, @@ -1168,9 +1195,9 @@ def train( # TODO(guosheng): make rl_step/ptx_step run with autocast_smart_context_manager logger.info("Doing rl step...") self.timers and self.timers("rl_step").start() - with self.enable(self.policy_trainer.optimizer): + with self.enable(self.actor_model, self.policy_trainer.optimizer): # with self.enable(self.value_trainer.optimizer): - with self.enable(): + with self.enable(): # put value optimizer guard in rl_step rl_info = self.rl_step(rl_batch) paddle.device.cuda.empty_cache() self.timers and self.timers("rl_step").stop() @@ -1178,7 +1205,11 @@ def train( if self.use_ptx: logger.info("Doing ptx step...") self.timers and self.timers("ptx_step").start() - ptx_info = self.ptx_step(ptx_batch) + with guard_set_args(self._model_config, { + # "set_attn_func": True, + # "use_flash_attention": True + }): + ptx_info = self.ptx_step(ptx_batch) rl_info.update(ptx_info) self.timers and self.timers("ptx_step").stop() paddle.device.cuda.empty_cache() @@ -1195,6 +1226,8 @@ def train( self.control = self.callback_handler.on_substep_end(args, self.state, self.control) self._maybe_log_save_evaluate(rl_info, None, epoch, ignore_keys_for_eval, inputs=inputs) self._print_timer() + if self.control.should_epoch_stop or self.control.should_training_stop: + break if step < 0: logger.warning( @@ -1266,11 +1299,19 @@ def add_kl_divergence_regularization( max=self.clip_range_score, ) # TODO(guosheng): use scatter_add/put_along_axis - batch_size = log_probs.shape[0] - for i in range(batch_size): - end_index = sequence_mask[i].nonzero()[-1] - # rewards[i, end_index] += reward_clip[i] - rewards[i, end_index] = rewards[i, end_index] + reward_clip[i] + index = paddle.cumsum(sequence_mask.cast(paddle.int64), + axis=-1).argmax(-1, keepdim=True) + rewards = paddle.put_along_axis(rewards, + index, + reward_clip.unsqueeze(axis=-1), + axis=-1, + reduce="add") + # batch_size = log_probs.shape[0] + # for i in range(batch_size): + # # print("="*20, sequence_mask[i]) + # end_index = sequence_mask[i].nonzero()[-1] + # # rewards[i, end_index] += reward_clip[i] + # rewards[i, end_index] = rewards[i, end_index] + reward_clip[i] return rewards @@ -1334,7 +1375,7 @@ def rl_step(self, rl_batch: Dict[str, paddle.Tensor]) -> Dict[str, Any]: "reward_returns": reward_returns, "sequence_mask": sequence_mask, } - with self.enable(self.value_trainer.optimizer): + with self.enable(self.reward_critic_model, self.value_trainer.optimizer): reward_critic_loss = self.value_trainer.full_training_step(**value_trainer_inputs) policy_trainer_inputs = { @@ -1383,10 +1424,10 @@ def enable(self, *args): # are property enable_map = { # maybe use `model: (pattern, enable_method, disable_method)`` - self.actor_model: "eval", - self.reward_critic_model: "eval", - self.reference_model: "reward", - self.reward_model: "reward", + self.actor_model: "train_model", + self.reward_critic_model: "train_model", + self.reference_model: "freeze_model", + self.reward_model: "freeze_model", self.policy_trainer.optimizer: "optimizer", self.value_trainer.optimizer: "optimizer", } @@ -1397,8 +1438,6 @@ def enable(self, *args): if getattr(self.value_trainer, "_inner_eval_model", None) is not None: enable_map.pop(self.reward_critic_model) objs = [arg for arg in args if enable_map.get(arg, "") in self.args.offload_level] - # print("=" * 20, "enable", self.args.offload_level, len(objs), - # [arg for arg in args if enable_map.get(arg, "")]) return enable(*objs) def split_ptx_micro_batches( @@ -1418,23 +1457,23 @@ def split_ptx_micro_batches( micro_batches.append(micro_batch) return micro_batches - @staticmethod - def data_dispatch(fun): - def _impl(self, data): - gp = getattr(self.policy_trainer, "_data_trans_group", None) - data = data_group_split(data, group=gp) - data = fun(self, data) - data = data_group_merge(data, group=gp) - return data + # @staticmethod + # def data_dispatch(fun): + # def _impl(self, data): + # gp = getattr(self.policy_trainer, "_data_trans_group", None) + # data = data_group_split(data, group=gp) + # data = fun(self, data) + # data = data_group_merge(data, group=gp) + # return data - return _impl + # return _impl @paddle.no_grad() - @data_dispatch + @data_dispatch # 3.10 static methods are now callable as regular functions. def split_rl_micro_batches( self, - prompt_only_batch: PromptOnlyBatch, - ) -> List[PromptOnlyBatch]: + prompt_only_batch: Dict, + ) -> List[Dict]: """Split a batch of RL samples into micro-batches.""" total_batch_size = prompt_only_batch["input_ids"].shape[0] micro_batch_size = self.args.per_device_train_batch_size @@ -1481,7 +1520,7 @@ def split_rl_micro_batches( return micro_batches @paddle.no_grad() - def generate(self, prompt_only_batch: PromptOnlyBatch) -> List[Dict[str, Any]]: + def generate(self, prompt_only_batch: Dict) -> List[Dict[str, Any]]: """Rollout a batch of experiences.""" input_ids = prompt_only_batch["input_ids"] attention_mask = prompt_only_batch["attention_mask"] @@ -1555,7 +1594,7 @@ def rollout_reward_value( reward_attention_mask = reward_tokenize_output["attention_mask"] reward_position_ids = make_position_ids(reward_attention_mask) else: - # for text in self.tokenizer.batch_decode(input_ids, skip_special_tokens=True): + # for text in self.tokenizer.batch_decode(input_ids, skip_special_tokens=False): # print(text) reward_input_ids = input_ids reward_attention_mask = attention_mask diff --git a/examples/RLHF/trainer_utils.py b/examples/RLHF/trainer_utils.py index d4edab0f0042..5073be784287 100644 --- a/examples/RLHF/trainer_utils.py +++ b/examples/RLHF/trainer_utils.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import inspect import os import time @@ -494,7 +495,7 @@ def eval(self): def train(self): self.model.train() - + def __getattr__(self, name): try: return super().__getattr__(name) From c8b3c61d0b8b379cf2ce2bc6b9b9bdedb45e76ce Mon Sep 17 00:00:00 2001 From: gongenlei Date: Tue, 11 Jun 2024 09:02:03 +0000 Subject: [PATCH 43/46] format code --- examples/RLHF/infer_utils.py | 1 + examples/RLHF/ppo_main.py | 9 ++--- examples/RLHF/ppo_trainer.py | 56 +++++++++++++++---------------- examples/RLHF/trainer_utils.py | 1 + paddlenlp/trainer/utils/helper.py | 1 - 5 files changed, 32 insertions(+), 36 deletions(-) diff --git a/examples/RLHF/infer_utils.py b/examples/RLHF/infer_utils.py index b3979c65dfd7..d0667aefe061 100644 --- a/examples/RLHF/infer_utils.py +++ b/examples/RLHF/infer_utils.py @@ -13,6 +13,7 @@ # limitations under the License. from __future__ import annotations + import copy import inspect import types diff --git a/examples/RLHF/ppo_main.py b/examples/RLHF/ppo_main.py index 1ff4cfe3059f..d52b30b95f90 100644 --- a/examples/RLHF/ppo_main.py +++ b/examples/RLHF/ppo_main.py @@ -37,8 +37,8 @@ # os.environ["https_proxy"] = "agent.baidu.com:8118" from dataclasses import dataclass, field -from typing import Any, Dict, Tuple from functools import partial +from typing import Any, Dict, Tuple import paddle from data import PromptOnlyDataset, SupervisedDataset, parse_dataset @@ -151,9 +151,7 @@ class TrainingArguments(TrainingArguments): ) use_fusemt: bool = field( default=True, - metadata={ - "help": "use inference model to speedup in rollout generation" - }, + metadata={"help": "use inference model to speedup in rollout generation"}, ) # save_generation_output: bool = field( @@ -485,8 +483,7 @@ def main(): ) if ptx_ds is not None: # PretrainingCriterion requires shifted inputs and labels - ptx_ds.get_collator = types.MethodType( - partial(ptx_ds.get_collator.__func__, shift=True), ptx_ds) + ptx_ds.get_collator = types.MethodType(partial(ptx_ds.get_collator.__func__, shift=True), ptx_ds) # offload # cleanup actor_eval_model, reward_critic_eval_model diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 3059537df61e..02e01b97ebfa 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -1005,11 +1005,10 @@ def init_train_model_opt( return policy_model, value_model def get_epoch_iterator(self): - def gen_epoch_data(): for prompt_only_batch, ptx_batch in zip( - self.prompt_only_dataloader, - itertools.cycle(self.ptx_dataloader), + self.prompt_only_dataloader, + itertools.cycle(self.ptx_dataloader), ): # generate batches self.set_eval() @@ -1037,10 +1036,11 @@ def __iter__(self): def __len__(self): return len(self.prompt_only_dataloader) * ( - self.args.update_iters * - self.args.per_device_prompt_batch_size * - self.args.num_return_sequences // - self.args.per_device_train_batch_size) + self.args.update_iters + * self.args.per_device_prompt_batch_size + * self.args.num_return_sequences + // self.args.per_device_train_batch_size + ) return EpochIterator() @@ -1051,10 +1051,13 @@ def init_train_num(self: Trainer, train_dataloader: DataLoader): len_dataloader = None if not self._is_iterable_dataset(self.train_dataset): len_dataloader = len(train_dataloader) - num_train_sub_steps = (len_dataloader * self.args.update_iters * - self.args.per_device_prompt_batch_size * - self.args.num_return_sequences // - self.args.per_device_train_batch_size) + num_train_sub_steps = ( + len_dataloader + * self.args.update_iters + * self.args.per_device_prompt_batch_size + * self.args.num_return_sequences + // self.args.per_device_train_batch_size + ) num_update_steps_per_epoch = num_train_sub_steps // args.gradient_accumulation_steps num_examples = len(self.train_dataset) if args.max_steps > 0: @@ -1116,18 +1119,15 @@ def train( if self.use_ptx: with guard_set_args( - args, + args, { - "per_device_train_batch_size": - 1 if getattr(self.ptx_dataset, "is_intokens", False) else - self.args.per_device_prompt_batch_size * - self.args.num_return_sequences + "per_device_train_batch_size": 1 + if getattr(self.ptx_dataset, "is_intokens", False) + else self.args.per_device_prompt_batch_size * self.args.num_return_sequences }, ), guard_set_args( - self, { - "train_dataset": self.ptx_dataset, - "data_collator": self.ptx_dataset.get_collator() - }): + self, {"train_dataset": self.ptx_dataset, "data_collator": self.ptx_dataset.get_collator()} + ): self.ptx_dataloader = self.get_train_dataloader() else: self.ptx_dataloader = range(100) @@ -1205,10 +1205,13 @@ def train( if self.use_ptx: logger.info("Doing ptx step...") self.timers and self.timers("ptx_step").start() - with guard_set_args(self._model_config, { + with guard_set_args( + self._model_config, + { # "set_attn_func": True, # "use_flash_attention": True - }): + }, + ): ptx_info = self.ptx_step(ptx_batch) rl_info.update(ptx_info) self.timers and self.timers("ptx_step").stop() @@ -1299,13 +1302,8 @@ def add_kl_divergence_regularization( max=self.clip_range_score, ) # TODO(guosheng): use scatter_add/put_along_axis - index = paddle.cumsum(sequence_mask.cast(paddle.int64), - axis=-1).argmax(-1, keepdim=True) - rewards = paddle.put_along_axis(rewards, - index, - reward_clip.unsqueeze(axis=-1), - axis=-1, - reduce="add") + index = paddle.cumsum(sequence_mask.cast(paddle.int64), axis=-1).argmax(-1, keepdim=True) + rewards = paddle.put_along_axis(rewards, index, reward_clip.unsqueeze(axis=-1), axis=-1, reduce="add") # batch_size = log_probs.shape[0] # for i in range(batch_size): # # print("="*20, sequence_mask[i]) diff --git a/examples/RLHF/trainer_utils.py b/examples/RLHF/trainer_utils.py index 5073be784287..865d34cea653 100644 --- a/examples/RLHF/trainer_utils.py +++ b/examples/RLHF/trainer_utils.py @@ -13,6 +13,7 @@ # limitations under the License. from __future__ import annotations + import inspect import os import time diff --git a/paddlenlp/trainer/utils/helper.py b/paddlenlp/trainer/utils/helper.py index e063c5966769..25f593f71e35 100644 --- a/paddlenlp/trainer/utils/helper.py +++ b/paddlenlp/trainer/utils/helper.py @@ -29,7 +29,6 @@ nested_broadcast_tensor, nested_empty_tensor, nested_reduce_tensor, - nested_broadcast_tensor_with_empty, ) __all__ = [ From c26583f5dff98ab6a6fcb77a61949808011996ee Mon Sep 17 00:00:00 2001 From: whucsgs Date: Wed, 12 Jun 2024 02:25:49 +0000 Subject: [PATCH 44/46] Fix make_position_ids by 4d causal mask. --- examples/RLHF/models/ppo_model_utils.py | 27 ++++++ examples/RLHF/ppo_trainer.py | 108 ++++++++++++++---------- 2 files changed, 89 insertions(+), 46 deletions(-) diff --git a/examples/RLHF/models/ppo_model_utils.py b/examples/RLHF/models/ppo_model_utils.py index 5fa1b4e1a6ed..da8972cc5c6d 100644 --- a/examples/RLHF/models/ppo_model_utils.py +++ b/examples/RLHF/models/ppo_model_utils.py @@ -100,6 +100,12 @@ def create_loss(loss_cls, config, extra_args, merge_labels=None): @paddle.no_grad() def make_position_ids(attention_mask, source=None): + if len(attention_mask.shape) == 4: # causal mask + position_ids_p1 = attention_mask.cast(paddle.int64).sum(-1) + position_ids = position_ids_p1 - 1 + position_ids = paddle.where(position_ids == -1, position_ids_p1, position_ids) + return position_ids[:, 0, :] + assert len(attention_mask.shape) == 2 # padding mask attention_mask_bool = attention_mask attention_mask = attention_mask.cast(paddle.int64) position_ids = attention_mask.cumsum(-1) - 1 @@ -121,6 +127,27 @@ def make_position_ids(attention_mask, source=None): return position_ids +@paddle.no_grad() +def make_attention_mask(input_ids, pad_id, unk_id=None, past_key_values_length=0, causal_mask=True): + attention_mask = input_ids != pad_id + if unk_id is not None and pad_id != unk_id: + attention_mask = paddle.logical_and(attention_mask, input_ids != unk_id) + if not causal_mask: + return attention_mask + + batch_size, target_length = input_ids.shape # target_length: seq_len + mask = paddle.tril(paddle.ones((target_length, target_length), dtype="bool")) + if past_key_values_length > 0: + # [tgt_len, tgt_len + past_len] + mask = paddle.concat([paddle.ones([target_length, past_key_values_length], dtype="bool"), mask], axis=-1) + # [bs, 1, tgt_len, tgt_len + past_len] + causal_mask = mask[None, None, :, :].expand([batch_size, 1, target_length, target_length + past_key_values_length]) + + attention_mask = attention_mask[:, None, None, :] + expanded_attn_mask = attention_mask & causal_mask + return expanded_attn_mask + + def gather_log_probabilities(logits: paddle.Tensor, labels: paddle.Tensor) -> paddle.Tensor: """Gather log probabilities of the given labels from the logits.""" log_probs = F.log_softmax(logits, axis=-1) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 3059537df61e..3d82c982ac11 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -37,6 +37,7 @@ RLHFValueLoss, create_loss, gather_log_probabilities, + make_attention_mask, make_position_ids, ) from paddle.distributed import fleet @@ -569,21 +570,27 @@ class ValueTrainer(StepTrainer): class PPOMetric: - metric_names = [ - "train/" + name - for name in [ - "actor_loss", - "ptx_loss", - "reward_critic_loss", - "reward", - "kl_divergence", - "mean_generated_length", - "max_generated_length", + def set_metric_meta(self, use_ptx=True): + self.metric_names = [ + "train/" + name + for name in [ + "actor_loss", + "ptx_loss", + "reward_critic_loss", + "reward", + "kl_divergence", + "mean_generated_length", + "max_generated_length", + ] ] - ] - metric_ops = ["mean", "mean", "mean", "mean", "mean", "mean", "max"] - def __init__(self, freq, use_stack=True): + self.metric_ops = ["mean", "mean", "mean", "mean", "mean", "mean", "max"] + if not use_ptx: + self.metric_names.pop(1) + self.metric_ops.pop(1) + + def __init__(self, freq, use_stack=True, use_ptx=True): + self.set_metric_meta(use_ptx=use_ptx) self.freq = freq self.counter = 0 self.use_stack = use_stack @@ -1005,11 +1012,10 @@ def init_train_model_opt( return policy_model, value_model def get_epoch_iterator(self): - def gen_epoch_data(): for prompt_only_batch, ptx_batch in zip( - self.prompt_only_dataloader, - itertools.cycle(self.ptx_dataloader), + self.prompt_only_dataloader, + itertools.cycle(self.ptx_dataloader), ): # generate batches self.set_eval() @@ -1037,10 +1043,11 @@ def __iter__(self): def __len__(self): return len(self.prompt_only_dataloader) * ( - self.args.update_iters * - self.args.per_device_prompt_batch_size * - self.args.num_return_sequences // - self.args.per_device_train_batch_size) + self.args.update_iters + * self.args.per_device_prompt_batch_size + * self.args.num_return_sequences + // self.args.per_device_train_batch_size + ) return EpochIterator() @@ -1051,10 +1058,13 @@ def init_train_num(self: Trainer, train_dataloader: DataLoader): len_dataloader = None if not self._is_iterable_dataset(self.train_dataset): len_dataloader = len(train_dataloader) - num_train_sub_steps = (len_dataloader * self.args.update_iters * - self.args.per_device_prompt_batch_size * - self.args.num_return_sequences // - self.args.per_device_train_batch_size) + num_train_sub_steps = ( + len_dataloader + * self.args.update_iters + * self.args.per_device_prompt_batch_size + * self.args.num_return_sequences + // self.args.per_device_train_batch_size + ) num_update_steps_per_epoch = num_train_sub_steps // args.gradient_accumulation_steps num_examples = len(self.train_dataset) if args.max_steps > 0: @@ -1116,18 +1126,15 @@ def train( if self.use_ptx: with guard_set_args( - args, + args, { - "per_device_train_batch_size": - 1 if getattr(self.ptx_dataset, "is_intokens", False) else - self.args.per_device_prompt_batch_size * - self.args.num_return_sequences + "per_device_train_batch_size": 1 + if getattr(self.ptx_dataset, "is_intokens", False) + else self.args.per_device_prompt_batch_size * self.args.num_return_sequences }, ), guard_set_args( - self, { - "train_dataset": self.ptx_dataset, - "data_collator": self.ptx_dataset.get_collator() - }): + self, {"train_dataset": self.ptx_dataset, "data_collator": self.ptx_dataset.get_collator()} + ): self.ptx_dataloader = self.get_train_dataloader() else: self.ptx_dataloader = range(100) @@ -1173,7 +1180,7 @@ def train( self.control = self.callback_handler.on_train_begin(args, self.state, self.control) self._globalstep_last_logged = self.state.global_step - metric = PPOMetric(freq=self.args.logging_steps) + metric = PPOMetric(freq=self.args.logging_steps, use_ptx=self.use_ptx) start_time = time.time() self._globalstep_last_start_time = start_time @@ -1205,10 +1212,13 @@ def train( if self.use_ptx: logger.info("Doing ptx step...") self.timers and self.timers("ptx_step").start() - with guard_set_args(self._model_config, { + with guard_set_args( + self._model_config, + { # "set_attn_func": True, # "use_flash_attention": True - }): + }, + ): ptx_info = self.ptx_step(ptx_batch) rl_info.update(ptx_info) self.timers and self.timers("ptx_step").stop() @@ -1299,13 +1309,8 @@ def add_kl_divergence_regularization( max=self.clip_range_score, ) # TODO(guosheng): use scatter_add/put_along_axis - index = paddle.cumsum(sequence_mask.cast(paddle.int64), - axis=-1).argmax(-1, keepdim=True) - rewards = paddle.put_along_axis(rewards, - index, - reward_clip.unsqueeze(axis=-1), - axis=-1, - reduce="add") + index = paddle.cumsum(sequence_mask.cast(paddle.int64), axis=-1).argmax(-1, keepdim=True) + rewards = paddle.put_along_axis(rewards, index, reward_clip.unsqueeze(axis=-1), axis=-1, reduce="add") # batch_size = log_probs.shape[0] # for i in range(batch_size): # # print("="*20, sequence_mask[i]) @@ -1544,10 +1549,18 @@ def generate(self, prompt_only_batch: Dict) -> List[Dict[str, Any]]: { "prompt": input_ids, "input_ids": seq, # "sequence": - "attention_mask": paddle.logical_and( - seq != self.tokenizer.pad_token_id, - seq != self.tokenizer.unk_token_id, + "attention_mask": make_attention_mask( + seq, + pad_id=self.tokenizer.pad_token_id, + unk_id=self.tokenizer.unk_token_id, + causal_mask=False, ), + # "sequence_mask": make_attention_mask( + # seq, + # pad_id=self.tokenizer.pad_token_id, + # unk_id=self.tokenizer.unk_token_id, + # causal_mask=False, + # ), } for seq in sequences ] @@ -1724,6 +1737,9 @@ def normalize_data( """ prompt = rl_batch["prompt"] # length: src attention_mask = rl_batch["attention_mask"] # length: src + tgt + if len(attention_mask.shape) == 4: + # use padding mask instead of causal mask + attention_mask = rl_batch["sequence_mask"] # length: src + tgt old_log_probs = rl_batch["log_probs"] # length: src + tgt -1 ref_log_probs = rl_batch["ref_log_probs"] # length: src + tgt -1 rewards = rl_batch["rewards"] # length: 1 From ffa4658d71b8a74c1b2c42a04173737001ebe2a3 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Wed, 12 Jun 2024 03:14:49 +0000 Subject: [PATCH 45/46] Fix nested_broadcast_tensor_with_empty import --- paddlenlp/trainer/utils/helper.py | 1 + 1 file changed, 1 insertion(+) diff --git a/paddlenlp/trainer/utils/helper.py b/paddlenlp/trainer/utils/helper.py index 25f593f71e35..3e7693aaad60 100644 --- a/paddlenlp/trainer/utils/helper.py +++ b/paddlenlp/trainer/utils/helper.py @@ -25,6 +25,7 @@ from paddle.distributed import fleet from paddlenlp.utils.log import logger +from paddlenlp.utils.nested import nested_broadcast_tensor_with_empty # noqa: F401 from paddlenlp.utils.nested import ( nested_broadcast_tensor, nested_empty_tensor, From f1e66f26be7827791a37c19ef73d6c6b0bec9ca3 Mon Sep 17 00:00:00 2001 From: whucsgs Date: Wed, 12 Jun 2024 11:17:30 +0000 Subject: [PATCH 46/46] Update eval with make_attention_mask --- examples/RLHF/ppo_trainer.py | 21 ++++++++++++++------- paddlenlp/generation/utils.py | 4 ++-- paddlenlp/utils/nested.py | 1 + 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/examples/RLHF/ppo_trainer.py b/examples/RLHF/ppo_trainer.py index 3d82c982ac11..c2c72d6c5cd1 100644 --- a/examples/RLHF/ppo_trainer.py +++ b/examples/RLHF/ppo_trainer.py @@ -891,10 +891,6 @@ def prediction_step( generation_config=self.generation_config, synced_gpus=ShardingOption.FULL_SHARD in self.policy_trainer.args.sharding, )[0] - attention_mask = paddle.logical_and( - seq != self.tokenizer.pad_token_id, - seq != self.tokenizer.unk_token_id, - ) if self.reward_tokenizer is not self.tokenizer: reward_tokenize_output = batch_retokenize( input_ids=seq, @@ -904,15 +900,21 @@ def prediction_step( device=self.args.device, ) reward_input_ids = reward_tokenize_output["input_ids"] - reward_attention_mask = reward_tokenize_output["attention_mask"] else: reward_input_ids = seq - reward_attention_mask = attention_mask + reward_attention_mask = make_attention_mask( + seq, + pad_id=self.reward_tokenizer.pad_token_id, + unk_id=self.reward_tokenizer.unk_token_id, + causal_mask=False, + ) + reward_position_ids = make_position_ids(reward_attention_mask) # unify PP with others since PP always return tuple reward_score = self.reward_model( reward_input_ids, attention_mask=reward_attention_mask, + position_ids=reward_position_ids, # return_dict=True, )[ 1 @@ -1604,7 +1606,12 @@ def rollout_reward_value( skip_special_tokens=True, ) reward_input_ids = reward_tokenize_output["input_ids"] - reward_attention_mask = reward_tokenize_output["attention_mask"] + reward_attention_mask = make_attention_mask( + reward_input_ids, + pad_id=self.reward_tokenizer.pad_token_id, + unk_id=self.reward_tokenizer.unk_token_id, + causal_mask=False, + ) reward_position_ids = make_position_ids(reward_attention_mask) else: # for text in self.tokenizer.batch_decode(input_ids, skip_special_tokens=False): diff --git a/paddlenlp/generation/utils.py b/paddlenlp/generation/utils.py index 4ee94c17181f..aa2958fc26a1 100644 --- a/paddlenlp/generation/utils.py +++ b/paddlenlp/generation/utils.py @@ -1225,12 +1225,12 @@ def sample( except: group, src = None, 0 paddle.distributed.broadcast(next_tokens, src=src, group=group) - # config does not include tensor_parallel_degree, and pipeline parallel + # config does not include pipeline_parallel_degree, and pipeline parallel # uses trainer.model_wrapped to run in both train and predict mode # which has pp_group as a attribute # TODO(guosheng): only let the last stage of pipeline to do softmax # and sampling, and then broadcast to avoid broadcast logits. - if hasattr(self, "pp_group"): + if getattr(self, "pp_group", None) is not None: paddle.distributed.broadcast( next_tokens, src=self.pp_group.ranks[0], group=self.pp_group # use rank 0 for same seed to check ) diff --git a/paddlenlp/utils/nested.py b/paddlenlp/utils/nested.py index b77f783b9748..4e800231843c 100644 --- a/paddlenlp/utils/nested.py +++ b/paddlenlp/utils/nested.py @@ -16,6 +16,7 @@ import copy import paddle + from paddlenlp.utils.log import logger TensorHolder = collections.namedtuple("TensorHolder", ["shape", "dtype", "name"])