Skip to content

Commit

Permalink
Feature/plotting (#220)
Browse files Browse the repository at this point in the history
Adding a flag to save metrics to a json file and adding a script to plot the metrics of the given file with matplotlib.
  • Loading branch information
albertodepaola authored Dec 27, 2023
2 parents 24ffd96 + 4a745cc commit 2571aed
Show file tree
Hide file tree
Showing 6 changed files with 189 additions and 11 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,8 @@ All the parameters in the examples and recipes below need to be further tuned to

* Make sure to set the right path to the model in the [training config](src/llama_recipes/configs/training.py).

* To save the loss and perplexity metrics for evaluation, enable this by passing `--save_metrics` to the finetuning script. The file can be plotted using the [plot_metrics.py](./examples/plot_metrics.py) script, `python examples/plot_metrics.py --file_path path/to/metrics.json`

### Single GPU:

```bash
Expand Down
71 changes: 71 additions & 0 deletions examples/plot_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import json
import matplotlib.pyplot as plt
import argparse
import os

def plot_metric(data, metric_name, x_label, y_label, title, colors):
plt.figure(figsize=(7, 6))

plt.plot(data[f'train_epoch_{metric_name}'], label=f'Train Epoch {metric_name.capitalize()}', color=colors[0])
plt.plot(data[f'val_epoch_{metric_name}'], label=f'Validation Epoch {metric_name.capitalize()}', color=colors[1])
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(f'Train and Validation Epoch {title}')
plt.legend()
plt.tight_layout()

def plot_single_metric_by_step(data, metric_name, x_label, y_label, title, color):
plt.plot(data[f'{metric_name}'], label=f'{title}', color=color)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.legend()
plt.tight_layout()

def plot_metrics_by_step(data, metric_name, x_label, y_label, colors):
plt.figure(figsize=(14, 6))

plt.subplot(1, 2, 1)
plot_single_metric_by_step(data, f'train_step_{metric_name}', x_label, y_label, f'Train Step {metric_name.capitalize()}', colors[0])
plt.subplot(1, 2, 2)
plot_single_metric_by_step(data, f'val_step_{metric_name}', x_label, y_label, f'Validation Step {metric_name.capitalize()}', colors[1])
plt.tight_layout()


def plot_metrics(file_path):
if not os.path.exists(file_path):
print(f"File {file_path} does not exist.")
return

with open(file_path, 'r') as f:
try:
data = json.load(f)
except json.JSONDecodeError:
print("Invalid JSON file.")
return

directory = os.path.dirname(file_path)
filename_prefix = os.path.basename(file_path).split('.')[0]

plot_metric(data, 'loss', 'Epoch', 'Loss', 'Loss', ['b', 'r'])
plt.savefig(os.path.join(directory, f"{filename_prefix}_train_and_validation_loss.png"))
plt.close()

plot_metric(data, 'perplexity', 'Epoch', 'Perplexity', 'Perplexity', ['g', 'm'])
plt.savefig(os.path.join(directory, f"{filename_prefix}_train_and_validation_perplexity.png"))
plt.close()

plot_metrics_by_step(data, 'loss', 'Step', 'Loss', ['b', 'r'])
plt.savefig(os.path.join(directory, f"{filename_prefix}_train_and_validation_loss_by_step.png"))
plt.close()

plot_metrics_by_step(data, 'perplexity', 'Step', 'Loss', ['g', 'm'])
plt.savefig(os.path.join(directory, f"{filename_prefix}_train_and_validation_perplexity_by_step.png"))
plt.close()

if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Plot metrics from JSON file.')
parser.add_argument('--file_path', required=True, type=str, help='Path to the metrics JSON file.')
args = parser.parse_args()

plot_metrics(args.file_path)
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,5 @@ transformers>=4.34.1
sentencepiece
py7zr
scipy
optimum
optimum
matplotlib
1 change: 1 addition & 0 deletions src/llama_recipes/configs/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,4 @@ class train_config:
dist_checkpoint_folder: str="fine-tuned" # will be used if using FSDP
save_optimizer: bool=False # will be used if using FSDP
use_fast_kernels: bool = False # Enable using SDPA from PyTroch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels
save_metrics: bool = False # saves training metrics to a json file for later plotting
70 changes: 60 additions & 10 deletions src/llama_recipes/utils/train_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from contextlib import nullcontext
from pathlib import Path
from pkg_resources import packaging
from datetime import datetime


import torch
Expand All @@ -16,6 +17,7 @@
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
from tqdm import tqdm
from transformers import LlamaTokenizer
import json


from llama_recipes.model_checkpointing import save_model_checkpoint, save_model_and_optimizer_sharded, save_optimizer_checkpoint
Expand Down Expand Up @@ -55,13 +57,24 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
elif train_config.use_fp16 and not train_config.enable_fsdp:
scaler = torch.cuda.amp.GradScaler()
if train_config.enable_fsdp:
world_size = int(os.environ["WORLD_SIZE"])
world_size = int(os.environ["WORLD_SIZE"])



autocast = torch.cuda.amp.autocast if train_config.use_fp16 else nullcontext

train_prep = []
train_loss = []
val_prep = []
val_loss =[]

if train_config.save_metrics:
metrics_filename = f"{train_config.output_dir}/metrics_data_{local_rank}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.json"
train_step_perplexity = []
train_step_loss = []
val_step_loss = []
val_step_perplexity = []

epoch_times = []
checkpoint_times = []
results = {}
Expand All @@ -82,6 +95,9 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
with autocast():
loss = model(**batch).loss
loss = loss / gradient_accumulation_steps
if train_config.save_metrics:
train_step_loss.append(loss.detach().float().item())
train_step_perplexity.append(float(torch.exp(loss.detach().float())))
total_loss += loss.detach().float()
if train_config.use_fp16:
# if fp16 is enabled, use gradient scaler to handle gradient update
Expand Down Expand Up @@ -111,6 +127,9 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
pbar.update(1)

pbar.set_description(f"Training Epoch: {epoch+1}/{train_config.num_epochs}, step {step}/{len(train_dataloader)} completed (loss: {loss.detach().float()})")

if train_config.save_metrics:
save_to_json(metrics_filename, train_step_loss, train_loss, train_step_perplexity, train_prep, val_step_loss, val_loss, val_step_perplexity, val_prep)
pbar.close()

epoch_end_time = time.perf_counter()-epoch_start_time
Expand All @@ -122,10 +141,10 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
if train_config.enable_fsdp:
train_epoch_loss = train_epoch_loss/world_size
train_perplexity = torch.exp(train_epoch_loss)

train_prep.append(train_perplexity)
train_loss.append(train_epoch_loss)

train_prep.append(float(train_perplexity))
train_loss.append(float(train_epoch_loss))
if train_config.enable_fsdp:
if rank==0:
print(f"Max CUDA memory allocated was {memtrace.peak} GB")
Expand All @@ -144,7 +163,11 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
lr_scheduler.step()

if train_config.run_validation:
eval_ppl, eval_epoch_loss = evaluation(model, train_config, eval_dataloader, local_rank, tokenizer)
eval_ppl, eval_epoch_loss, temp_val_loss, temp_step_perplexity = evaluation(model, train_config, eval_dataloader, local_rank, tokenizer)
if train_config.save_metrics:
val_step_loss.extend(temp_val_loss)
val_step_perplexity.extend(temp_step_perplexity)

checkpoint_start_time = time.perf_counter()
if train_config.save_model and eval_epoch_loss < best_val_loss:
if train_config.enable_fsdp:
Expand Down Expand Up @@ -195,13 +218,18 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
print(f"best eval loss on epoch {epoch+1} is {best_val_loss}")
else:
print(f"best eval loss on epoch {epoch+1} is {best_val_loss}")
val_loss.append(best_val_loss)
val_prep.append(eval_ppl)
val_loss.append(float(best_val_loss))
val_prep.append(float(eval_ppl))
if train_config.enable_fsdp:
if rank==0:
print(f"Epoch {epoch+1}: train_perplexity={train_perplexity:.4f}, train_epoch_loss={train_epoch_loss:.4f}, epoch time {epoch_end_time}s")
else:
print(f"Epoch {epoch+1}: train_perplexity={train_perplexity:.4f}, train_epoch_loss={train_epoch_loss:.4f}, epoch time {epoch_end_time}s")

# Saving the results every epoch to plot later
if train_config.save_metrics:
save_to_json(metrics_filename, train_step_loss, train_loss, train_step_perplexity, train_prep, val_step_loss, val_loss, val_step_perplexity, val_prep)

avg_epoch_time = sum(epoch_times)/ len(epoch_times)
avg_checkpoint_time = sum(checkpoint_times)/ len(checkpoint_times) if len(checkpoint_times) > 0 else 0
avg_train_prep = sum(train_prep)/len(train_prep)
Expand All @@ -217,6 +245,8 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
results['avg_eval_loss'] = avg_eval_loss
results["avg_epoch_time"] = avg_epoch_time
results["avg_checkpoint_time"] = avg_checkpoint_time
if train_config.save_metrics:
results["metrics_filename"] = metrics_filename

#saving the training params including fsdp setting for reference.
if train_config.enable_fsdp and not train_config.use_peft:
Expand All @@ -240,6 +270,8 @@ def evaluation(model,train_config, eval_dataloader, local_rank, tokenizer):
world_size = int(os.environ["WORLD_SIZE"])
model.eval()
eval_preds = []
val_step_loss = []
val_step_perplexity = []
eval_loss = 0.0 # Initialize evaluation loss
with MemoryTrace() as memtrace:
for step, batch in enumerate(tqdm(eval_dataloader,colour="green", desc="evaluating Epoch", dynamic_ncols=True)):
Expand All @@ -253,6 +285,10 @@ def evaluation(model,train_config, eval_dataloader, local_rank, tokenizer):
# Forward pass and compute loss
outputs = model(**batch)
loss = outputs.loss
if train_config.save_metrics:
val_step_loss.append(loss.detach().float().item())
val_step_perplexity.append(float(torch.exp(loss.detach().float())))

eval_loss += loss.detach().float()
# Decode predictions and add to evaluation predictions list
preds = torch.argmax(outputs.logits, -1)
Expand All @@ -276,8 +312,8 @@ def evaluation(model,train_config, eval_dataloader, local_rank, tokenizer):
print(f" {eval_ppl=} {eval_epoch_loss=}")
else:
print(f" {eval_ppl=} {eval_epoch_loss=}")

return eval_ppl, eval_epoch_loss
return eval_ppl, eval_epoch_loss, val_step_loss, val_step_perplexity

def freeze_transformer_layers(model, num_layer):
for i, layer in enumerate(model.model.layers):
Expand Down Expand Up @@ -417,3 +453,17 @@ def save_train_params(train_config, fsdp_config, rank):
f.write(config_yaml)
if rank==0:
print(f"training params are saved in {file_name}")

def save_to_json(output_filename, train_step_loss, train_epoch_loss, train_step_ppl, train_epoch_ppl, val_step_loss, val_epoch_loss, val_step_ppl, val_epoch_ppl):
metrics_data = {
"train_step_loss": train_step_loss,
"train_epoch_loss": train_epoch_loss,
"train_step_perplexity": train_step_ppl,
"train_epoch_perplexity": train_epoch_ppl,
"val_step_loss": val_step_loss,
"val_epoch_loss": val_epoch_loss,
"val_step_perplexity": val_step_ppl,
"val_epoch_perplexity": val_epoch_ppl
}
with open(output_filename, "w") as f:
json.dump(metrics_data, f)
53 changes: 53 additions & 0 deletions tests/test_train_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,27 @@
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.

from unittest.mock import patch
import pytest

import torch

import os
import shutil

from llama_recipes.utils.train_utils import train

TEMP_OUTPUT_DIR = os.getcwd() + "/tmp"

@pytest.fixture(scope="session")
def temp_output_dir():
# Create the directory during the session-level setup
temp_output_dir = "tmp"
os.mkdir(os.path.join(os.getcwd(), temp_output_dir))
yield temp_output_dir
# Delete the directory during the session-level teardown
shutil.rmtree(temp_output_dir)


@patch("llama_recipes.utils.train_utils.MemoryTrace")
@patch("llama_recipes.utils.train_utils.nullcontext")
@patch("llama_recipes.utils.train_utils.torch.cuda.amp.GradScaler")
Expand All @@ -28,6 +44,7 @@ def test_gradient_accumulation(autocast, scaler, nullcontext, mem_trace, mocker)
train_config.use_fp16 = False
train_config.run_validation = False
train_config.gradient_clipping = False
train_config.save_metrics = False

train(
model,
Expand Down Expand Up @@ -63,3 +80,39 @@ def test_gradient_accumulation(autocast, scaler, nullcontext, mem_trace, mocker)
assert optimizer.zero_grad.call_count == 3
assert nullcontext.call_count == 0
assert autocast.call_count == 5

def test_save_to_json(temp_output_dir, mocker):
model = mocker.MagicMock(name="model")
model().loss.__truediv__().detach.return_value = torch.tensor(1)
mock_tensor = mocker.MagicMock(name="tensor")
batch = {"input": mock_tensor}
train_dataloader = [batch, batch, batch, batch, batch]
eval_dataloader = None
tokenizer = mocker.MagicMock()
optimizer = mocker.MagicMock()
lr_scheduler = mocker.MagicMock()
gradient_accumulation_steps = 1
train_config = mocker.MagicMock()
train_config.enable_fsdp = False
train_config.use_fp16 = False
train_config.run_validation = False
train_config.gradient_clipping = False
train_config.save_metrics = True
train_config.output_dir = temp_output_dir

results = train(
model,
train_dataloader,
eval_dataloader,
tokenizer,
optimizer,
lr_scheduler,
gradient_accumulation_steps,
train_config,
local_rank=0
)

assert results["metrics_filename"] not in ["", None]
assert os.path.isfile(results["metrics_filename"])


0 comments on commit 2571aed

Please sign in to comment.