From 6f486460bce44988b0e26f37bce323659bf0584e Mon Sep 17 00:00:00 2001 From: rasbt Date: Sun, 5 May 2024 12:21:10 -0500 Subject: [PATCH] ouput -> output --- appendix-E/01_main-chapter-code/previous_chapters.py | 4 ++-- .../02_bonus_additional-experiments/additional-experiments.py | 4 ++-- ch06/03_bonus_imdb-classification/train-bert-hf.py | 4 ++-- ch06/03_bonus_imdb-classification/train-gpt.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/appendix-E/01_main-chapter-code/previous_chapters.py b/appendix-E/01_main-chapter-code/previous_chapters.py index b6fca51..9d7f7e3 100644 --- a/appendix-E/01_main-chapter-code/previous_chapters.py +++ b/appendix-E/01_main-chapter-code/previous_chapters.py @@ -466,7 +466,7 @@ def calc_accuracy_loader(data_loader, model, device, num_batches=None): for i, (input_batch, target_batch) in enumerate(data_loader): if i < num_batches: input_batch, target_batch = input_batch.to(device), target_batch.to(device) - logits = model(input_batch)[:, -1, :] # Logits of last ouput token + logits = model(input_batch)[:, -1, :] # Logits of last output token predicted_labels = torch.argmax(logits, dim=-1) num_examples += predicted_labels.shape[0] @@ -478,7 +478,7 @@ def calc_accuracy_loader(data_loader, model, device, num_batches=None): def calc_loss_batch(input_batch, target_batch, model, device): input_batch, target_batch = input_batch.to(device), target_batch.to(device) - logits = model(input_batch)[:, -1, :] # Logits of last ouput token + logits = model(input_batch)[:, -1, :] # Logits of last output token loss = torch.nn.functional.cross_entropy(logits, target_batch) return loss diff --git a/ch06/02_bonus_additional-experiments/additional-experiments.py b/ch06/02_bonus_additional-experiments/additional-experiments.py index 81809a2..15f784d 100644 --- a/ch06/02_bonus_additional-experiments/additional-experiments.py +++ b/ch06/02_bonus_additional-experiments/additional-experiments.py @@ -139,7 +139,7 @@ def instantiate_model(choose_model, load_weights): def calc_loss_batch(input_batch, target_batch, model, device, trainable_token=-1): input_batch, target_batch = input_batch.to(device), target_batch.to(device) - logits = model(input_batch)[:, trainable_token, :] # Logits of last ouput token + logits = model(input_batch)[:, trainable_token, :] # Logits of last output token loss = torch.nn.functional.cross_entropy(logits, target_batch) return loss @@ -175,7 +175,7 @@ def calc_accuracy_loader(data_loader, model, device, num_batches=None, trainable for i, (input_batch, target_batch) in enumerate(data_loader): if i < num_batches: input_batch, target_batch = input_batch.to(device), target_batch.to(device) - logits = model(input_batch)[:, trainable_token, :] # Logits of last ouput token + logits = model(input_batch)[:, trainable_token, :] # Logits of last output token predicted_labels = torch.argmax(logits, dim=-1) num_examples += predicted_labels.shape[0] diff --git a/ch06/03_bonus_imdb-classification/train-bert-hf.py b/ch06/03_bonus_imdb-classification/train-bert-hf.py index df78cd9..2ded1d2 100644 --- a/ch06/03_bonus_imdb-classification/train-bert-hf.py +++ b/ch06/03_bonus_imdb-classification/train-bert-hf.py @@ -54,7 +54,7 @@ class IMDBDataset(Dataset): def calc_loss_batch(input_batch, target_batch, model, device): input_batch, target_batch = input_batch.to(device), target_batch.to(device) - # logits = model(input_batch)[:, -1, :] # Logits of last ouput token + # logits = model(input_batch)[:, -1, :] # Logits of last output token logits = model(input_batch).logits loss = torch.nn.functional.cross_entropy(logits, target_batch) return loss @@ -90,7 +90,7 @@ def calc_accuracy_loader(data_loader, model, device, num_batches=None): for i, (input_batch, target_batch) in enumerate(data_loader): if i < num_batches: input_batch, target_batch = input_batch.to(device), target_batch.to(device) - # logits = model(input_batch)[:, -1, :] # Logits of last ouput token + # logits = model(input_batch)[:, -1, :] # Logits of last output token logits = model(input_batch).logits predicted_labels = torch.argmax(logits, dim=1) num_examples += predicted_labels.shape[0] diff --git a/ch06/03_bonus_imdb-classification/train-gpt.py b/ch06/03_bonus_imdb-classification/train-gpt.py index 65da198..472d005 100644 --- a/ch06/03_bonus_imdb-classification/train-gpt.py +++ b/ch06/03_bonus_imdb-classification/train-gpt.py @@ -83,7 +83,7 @@ def instantiate_model(choose_model, load_weights): def calc_loss_batch(input_batch, target_batch, model, device, trainable_token=-1): input_batch, target_batch = input_batch.to(device), target_batch.to(device) - logits = model(input_batch)[:, trainable_token, :] # Logits of last ouput token + logits = model(input_batch)[:, trainable_token, :] # Logits of last output token loss = torch.nn.functional.cross_entropy(logits, target_batch) return loss @@ -119,7 +119,7 @@ def calc_accuracy_loader(data_loader, model, device, num_batches=None, trainable for i, (input_batch, target_batch) in enumerate(data_loader): if i < num_batches: input_batch, target_batch = input_batch.to(device), target_batch.to(device) - logits = model(input_batch)[:, trainable_token, :] # Logits of last ouput token + logits = model(input_batch)[:, trainable_token, :] # Logits of last output token predicted_labels = torch.argmax(logits, dim=-1) num_examples += predicted_labels.shape[0]