From aa859886cdc5e717bbb21835a5146d4a24959bf9 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 17 Nov 2023 09:36:31 +0000 Subject: [PATCH] support parallel reward function --- trlx/trainer/accelerate_base_trainer.py | 60 +++++++++++++++---------- trlx/trainer/accelerate_ppo_trainer.py | 8 ++-- 2 files changed, 41 insertions(+), 27 deletions(-) diff --git a/trlx/trainer/accelerate_base_trainer.py b/trlx/trainer/accelerate_base_trainer.py index 7809f2e3d..38044fa7f 100644 --- a/trlx/trainer/accelerate_base_trainer.py +++ b/trlx/trainer/accelerate_base_trainer.py @@ -419,8 +419,9 @@ def evaluate(self): # noqa: C901 if not self.config.train.reward_only_in_main_process or self.accelerator.is_main_process: str_samples, str_prompts, str_outputs = self.decode(all_prompts, all_samples, all_prompt_sizes) - columns = ["prompt", "output"] columns_data = [str_prompts, str_outputs] + if not self.config.train.reward_only_in_main_process: + columns_data = self.accelerator.gather_for_metrics(columns_data) metadata, *xs = all_metadata for k in metadata: @@ -443,41 +444,52 @@ def evaluate(self): # noqa: C901 rewards = torch.tensor([sum(reward) for reward in rewards], dtype=float) else: rewards = torch.tensor(rewards, dtype=float) - mean_reward = rewards.mean().item() - columns.append("reward") - if not isinstance(rewards, list): - rewards = rewards.tolist() - columns_data.append(rewards) - stats[f"reward/mean{sweep_suffix}"] = mean_reward + + if not self.config.train.reward_only_in_main_process: + rewards = self.accelerator.gather(rewards) + if self.accelerator.is_main_process: + mean_reward = rewards.mean().item() + + columns = ["prompt", "output", "reward"] + if not isinstance(rewards, list): + rewards = rewards.tolist() + columns_data.append(rewards) + stats[f"reward/mean{sweep_suffix}"] = mean_reward # additionally log any other metrics if self.metric_fn: logger.info("Computing metrics") metric_time = time() metrics = self.metric_fn(samples=str_samples, prompts=str_prompts, outputs=str_outputs, **metadata) - stats["time/metric"] = time() - metric_time + if not self.config.train.reward_only_in_main_process: + metrics = self.accelerator.gather_for_metrics(metrics) - mean_metrics = { - f"metrics/{k}{sweep_suffix}": torch.as_tensor(xs).mean(-1).item() for k, xs in metrics.items() - } + if self.accelerator.is_main_process: + stats["time/metric"] = time() - metric_time - stats.update(mean_metrics) + mean_metrics = { + f"metrics/{k}{sweep_suffix}": torch.as_tensor(xs).mean(-1).item() + for k, xs in metrics.items() + } - for metric, values in metrics.items(): - # Skip metrics that are scalers since they represent aggregated values - if isinstance(values, float): - continue - columns.append(metric) - if not isinstance(values, list): - values = values.tolist() - columns_data.append(values) + stats.update(mean_metrics) + + for metric, values in metrics.items(): + # Skip metrics that are scalers since they represent aggregated values + if isinstance(values, float): + continue + columns.append(metric) + if not isinstance(values, list): + values = values.tolist() + columns_data.append(values) # Prepend the sweep argument along with samples - if self.generate_sweep_kwarg: - columns.insert(0, gen_sweep_arg) - columns_data.insert(0, [gen_sweep_value] * len(samples)) + if self.accelerator.is_main_process: + if self.generate_sweep_kwarg: + columns.insert(0, gen_sweep_arg) + columns_data.insert(0, [gen_sweep_value] * len(samples)) - table.append(list(zip(*columns_data))) + table.append(list(zip(*columns_data))) # Log and display evaluation metrics logger.info("Summarizing evaluation") diff --git a/trlx/trainer/accelerate_ppo_trainer.py b/trlx/trainer/accelerate_ppo_trainer.py index 424d3d873..1060582e9 100644 --- a/trlx/trainer/accelerate_ppo_trainer.py +++ b/trlx/trainer/accelerate_ppo_trainer.py @@ -344,17 +344,19 @@ def make_experience(self, num_rollouts: int = 1024, iter_count: int = 0): # noq if torch.distributed.is_initialized(): torch.distributed.broadcast(max_len, 0) scores = torch.empty((len(samples), max_len), device=device) - torch.distributed.scatter(scores, all_scores) + torch.distributed.scatter(scores, all_scores) # scores is one shard of one process after scatter else: - scores = all_scores[0].clone().detach() + scores = all_scores[0].clone().detach() # shard of one process else: - scores = all_scores + scores = all_scores.clone().detach() # shard of one process + # `all_scores` no longer used, no need to gather it scores_mask = scores != -np.inf if self.config.train.reward_only_in_main_process: _, _, str_outputs = self.decode(prompt_tensors, samples, append_eos_token=True) else: str_outputs = all_str_outputs + # `all_str_outputs` no longer used, no need to gather it # Pad the sample outputs outputs = self.tokenizer(str_outputs).input_ids