mirror of
https://github.com/rasbt/LLMs-from-scratch.git
synced 2026-04-10 12:33:42 +00:00
committed by
GitHub
parent
7757c3d308
commit
c21bfe4a23
4
pkg/llms_from_scratch/__init__.py
Normal file
4
pkg/llms_from_scratch/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
44
pkg/llms_from_scratch/appendix_a.py
Normal file
44
pkg/llms_from_scratch/appendix_a.py
Normal file
@@ -0,0 +1,44 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
import torch
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
|
||||
class NeuralNetwork(torch.nn.Module):
|
||||
def __init__(self, num_inputs, num_outputs):
|
||||
super().__init__()
|
||||
|
||||
self.layers = torch.nn.Sequential(
|
||||
|
||||
# 1st hidden layer
|
||||
torch.nn.Linear(num_inputs, 30),
|
||||
torch.nn.ReLU(),
|
||||
|
||||
# 2nd hidden layer
|
||||
torch.nn.Linear(30, 20),
|
||||
torch.nn.ReLU(),
|
||||
|
||||
# output layer
|
||||
torch.nn.Linear(20, num_outputs),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
logits = self.layers(x)
|
||||
return logits
|
||||
|
||||
|
||||
class ToyDataset(Dataset):
|
||||
def __init__(self, X, y):
|
||||
self.features = X
|
||||
self.labels = y
|
||||
|
||||
def __getitem__(self, index):
|
||||
one_x = self.features[index]
|
||||
one_y = self.labels[index]
|
||||
return one_x, one_y
|
||||
|
||||
def __len__(self):
|
||||
return self.labels.shape[0]
|
||||
94
pkg/llms_from_scratch/appendix_d.py
Normal file
94
pkg/llms_from_scratch/appendix_d.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
from .ch05 import calc_loss_batch, evaluate_model, generate_and_print_sample
|
||||
|
||||
import math
|
||||
import torch
|
||||
|
||||
|
||||
def find_highest_gradient(model):
|
||||
max_grad = None
|
||||
for param in model.parameters():
|
||||
if param.grad is not None:
|
||||
grad_values = param.grad.data.flatten()
|
||||
max_grad_param = grad_values.max()
|
||||
if max_grad is None or max_grad_param > max_grad:
|
||||
max_grad = max_grad_param
|
||||
return max_grad
|
||||
|
||||
|
||||
def train_model(model, train_loader, val_loader, optimizer, device,
|
||||
n_epochs, eval_freq, eval_iter, start_context, tokenizer,
|
||||
warmup_steps, initial_lr=3e-05, min_lr=1e-6, orig_book_version=False):
|
||||
|
||||
train_losses, val_losses, track_tokens_seen, track_lrs = [], [], [], []
|
||||
tokens_seen, global_step = 0, -1
|
||||
|
||||
# Retrieve the maximum learning rate from the optimizer
|
||||
peak_lr = optimizer.param_groups[0]["lr"]
|
||||
|
||||
# Calculate the total number of iterations in the training process
|
||||
total_training_steps = len(train_loader) * n_epochs
|
||||
|
||||
# Calculate the learning rate increment during the warmup phase
|
||||
lr_increment = (peak_lr - initial_lr) / warmup_steps
|
||||
|
||||
for epoch in range(n_epochs):
|
||||
model.train()
|
||||
for input_batch, target_batch in train_loader:
|
||||
optimizer.zero_grad()
|
||||
global_step += 1
|
||||
|
||||
# Adjust the learning rate based on the current phase (warmup or cosine annealing)
|
||||
if global_step < warmup_steps:
|
||||
# Linear warmup
|
||||
lr = initial_lr + global_step * lr_increment
|
||||
else:
|
||||
# Cosine annealing after warmup
|
||||
progress = ((global_step - warmup_steps) /
|
||||
(total_training_steps - warmup_steps))
|
||||
lr = min_lr + (peak_lr - min_lr) * 0.5 * (1 + math.cos(math.pi * progress))
|
||||
|
||||
# Apply the calculated learning rate to the optimizer
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group["lr"] = lr
|
||||
track_lrs.append(lr) # Store the current learning rate
|
||||
|
||||
# Calculate and backpropagate the loss
|
||||
loss = calc_loss_batch(input_batch, target_batch, model, device)
|
||||
loss.backward()
|
||||
|
||||
# Apply gradient clipping after the warmup phase to avoid exploding gradients
|
||||
if orig_book_version:
|
||||
if global_step > warmup_steps:
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
|
||||
else:
|
||||
if global_step >= warmup_steps: # the book originally used global_step > warmup_steps, which lead to a skipped clipping step after warmup
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
|
||||
|
||||
optimizer.step()
|
||||
tokens_seen += input_batch.numel()
|
||||
|
||||
# Periodically evaluate the model on the training and validation sets
|
||||
if global_step % eval_freq == 0:
|
||||
train_loss, val_loss = evaluate_model(
|
||||
model, train_loader, val_loader,
|
||||
device, eval_iter
|
||||
)
|
||||
train_losses.append(train_loss)
|
||||
val_losses.append(val_loss)
|
||||
track_tokens_seen.append(tokens_seen)
|
||||
# Print the current losses
|
||||
print(f"Ep {epoch+1} (Iter {global_step:06d}): "
|
||||
f"Train loss {train_loss:.3f}, "
|
||||
f"Val loss {val_loss:.3f}")
|
||||
|
||||
# Generate and print a sample from the model to monitor progress
|
||||
generate_and_print_sample(
|
||||
model, tokenizer, device, start_context
|
||||
)
|
||||
|
||||
return train_losses, val_losses, track_tokens_seen, track_lrs
|
||||
42
pkg/llms_from_scratch/appendix_e.py
Normal file
42
pkg/llms_from_scratch/appendix_e.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
import torch
|
||||
import math
|
||||
|
||||
|
||||
class LoRALayer(torch.nn.Module):
|
||||
def __init__(self, in_dim, out_dim, rank, alpha):
|
||||
super().__init__()
|
||||
self.A = torch.nn.Parameter(torch.empty(in_dim, rank))
|
||||
torch.nn.init.kaiming_uniform_(self.A, a=math.sqrt(5)) # similar to standard weight initialization
|
||||
self.B = torch.nn.Parameter(torch.zeros(rank, out_dim))
|
||||
self.alpha = alpha
|
||||
|
||||
def forward(self, x):
|
||||
x = self.alpha * (x @ self.A @ self.B)
|
||||
return x
|
||||
|
||||
|
||||
class LinearWithLoRA(torch.nn.Module):
|
||||
def __init__(self, linear, rank, alpha):
|
||||
super().__init__()
|
||||
self.linear = linear
|
||||
self.lora = LoRALayer(
|
||||
linear.in_features, linear.out_features, rank, alpha
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
return self.linear(x) + self.lora(x)
|
||||
|
||||
|
||||
def replace_linear_with_lora(model, rank, alpha):
|
||||
for name, module in model.named_children():
|
||||
if isinstance(module, torch.nn.Linear):
|
||||
# Replace the Linear layer with LinearWithLoRA
|
||||
setattr(model, name, LinearWithLoRA(module, rank, alpha))
|
||||
else:
|
||||
# Recursively apply the same function to child modules
|
||||
replace_linear_with_lora(module, rank, alpha)
|
||||
46
pkg/llms_from_scratch/ch02.py
Normal file
46
pkg/llms_from_scratch/ch02.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
import torch
|
||||
from torch.utils.data import Dataset, DataLoader
|
||||
import tiktoken
|
||||
|
||||
|
||||
class GPTDatasetV1(Dataset):
|
||||
def __init__(self, txt, tokenizer, max_length, stride):
|
||||
self.tokenizer = tokenizer
|
||||
self.input_ids = []
|
||||
self.target_ids = []
|
||||
|
||||
# Tokenize the entire text
|
||||
token_ids = tokenizer.encode(txt, allowed_special={"<|endoftext|>"})
|
||||
|
||||
# Use a sliding window to chunk the book into overlapping sequences of max_length
|
||||
for i in range(0, len(token_ids) - max_length, stride):
|
||||
input_chunk = token_ids[i:i + max_length]
|
||||
target_chunk = token_ids[i + 1: i + max_length + 1]
|
||||
self.input_ids.append(torch.tensor(input_chunk))
|
||||
self.target_ids.append(torch.tensor(target_chunk))
|
||||
|
||||
def __len__(self):
|
||||
return len(self.input_ids)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return self.input_ids[idx], self.target_ids[idx]
|
||||
|
||||
|
||||
def create_dataloader_v1(txt, batch_size=4, max_length=256,
|
||||
stride=128, shuffle=True, drop_last=True, num_workers=0):
|
||||
# Initialize the tokenizer
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
|
||||
# Create dataset
|
||||
dataset = GPTDatasetV1(txt, tokenizer, max_length, stride)
|
||||
|
||||
# Create dataloader
|
||||
dataloader = DataLoader(
|
||||
dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last, num_workers=num_workers)
|
||||
|
||||
return dataloader
|
||||
151
pkg/llms_from_scratch/ch03.py
Normal file
151
pkg/llms_from_scratch/ch03.py
Normal file
@@ -0,0 +1,151 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
class SelfAttention_v1(nn.Module):
|
||||
|
||||
def __init__(self, d_in, d_out):
|
||||
super().__init__()
|
||||
self.W_query = nn.Parameter(torch.rand(d_in, d_out))
|
||||
self.W_key = nn.Parameter(torch.rand(d_in, d_out))
|
||||
self.W_value = nn.Parameter(torch.rand(d_in, d_out))
|
||||
|
||||
def forward(self, x):
|
||||
keys = x @ self.W_key
|
||||
queries = x @ self.W_query
|
||||
values = x @ self.W_value
|
||||
|
||||
attn_scores = queries @ keys.T # omega
|
||||
attn_weights = torch.softmax(
|
||||
attn_scores / keys.shape[-1]**0.5, dim=-1
|
||||
)
|
||||
|
||||
context_vec = attn_weights @ values
|
||||
return context_vec
|
||||
|
||||
|
||||
class SelfAttention_v2(nn.Module):
|
||||
|
||||
def __init__(self, d_in, d_out, qkv_bias=False):
|
||||
super().__init__()
|
||||
self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
|
||||
def forward(self, x):
|
||||
keys = self.W_key(x)
|
||||
queries = self.W_query(x)
|
||||
values = self.W_value(x)
|
||||
|
||||
attn_scores = queries @ keys.T
|
||||
attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
|
||||
|
||||
context_vec = attn_weights @ values
|
||||
return context_vec
|
||||
|
||||
|
||||
class CausalAttention(nn.Module):
|
||||
|
||||
def __init__(self, d_in, d_out, context_length,
|
||||
dropout, qkv_bias=False):
|
||||
super().__init__()
|
||||
self.d_out = d_out
|
||||
self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.dropout = nn.Dropout(dropout) # New
|
||||
self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1)) # New
|
||||
|
||||
def forward(self, x):
|
||||
b, num_tokens, d_in = x.shape # New batch dimension b
|
||||
# For inputs where `num_tokens` exceeds `context_length`, this will result in errors
|
||||
# in the mask creation further below.
|
||||
# In practice, this is not a problem since the LLM (chapters 4-7) ensures that inputs
|
||||
# do not exceed `context_length` before reaching this forward method.
|
||||
keys = self.W_key(x)
|
||||
queries = self.W_query(x)
|
||||
values = self.W_value(x)
|
||||
|
||||
attn_scores = queries @ keys.transpose(1, 2) # Changed transpose
|
||||
attn_scores.masked_fill_( # New, _ ops are in-place
|
||||
self.mask.bool()[:num_tokens, :num_tokens], -torch.inf) # `:num_tokens` to account for cases where the number of tokens in the batch is smaller than the supported context_size
|
||||
attn_weights = torch.softmax(
|
||||
attn_scores / keys.shape[-1]**0.5, dim=-1
|
||||
)
|
||||
attn_weights = self.dropout(attn_weights) # New
|
||||
|
||||
context_vec = attn_weights @ values
|
||||
return context_vec
|
||||
|
||||
|
||||
class MultiHeadAttentionWrapper(nn.Module):
|
||||
def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):
|
||||
super().__init__()
|
||||
self.heads = nn.ModuleList(
|
||||
[CausalAttention(d_in, d_out, context_length, dropout, qkv_bias)
|
||||
for _ in range(num_heads)]
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
return torch.cat([head(x) for head in self.heads], dim=-1)
|
||||
|
||||
|
||||
class MultiHeadAttention(nn.Module):
|
||||
def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):
|
||||
super().__init__()
|
||||
assert d_out % num_heads == 0, "d_out must be divisible by n_heads"
|
||||
|
||||
self.d_out = d_out
|
||||
self.num_heads = num_heads
|
||||
self.head_dim = d_out // num_heads # Reduce the projection dim to match desired output dim
|
||||
|
||||
self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
|
||||
self.out_proj = nn.Linear(d_out, d_out) # Linear layer to combine head outputs
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))
|
||||
|
||||
def forward(self, x):
|
||||
b, num_tokens, d_in = x.shape
|
||||
|
||||
keys = self.W_key(x) # Shape: (b, num_tokens, d_out)
|
||||
queries = self.W_query(x)
|
||||
values = self.W_value(x)
|
||||
|
||||
# We implicitly split the matrix by adding a `num_heads` dimension
|
||||
# Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim)
|
||||
keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
values = values.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
queries = queries.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
|
||||
# Transpose: (b, num_tokens, num_heads, head_dim) -> (b, num_heads, num_tokens, head_dim)
|
||||
keys = keys.transpose(1, 2)
|
||||
queries = queries.transpose(1, 2)
|
||||
values = values.transpose(1, 2)
|
||||
|
||||
# Compute scaled dot-product attention (aka self-attention) with a causal mask
|
||||
attn_scores = queries @ keys.transpose(2, 3) # Dot product for each head
|
||||
|
||||
# Original mask truncated to the number of tokens and converted to boolean
|
||||
mask_bool = self.mask.bool()[:num_tokens, :num_tokens]
|
||||
|
||||
# Use the mask to fill attention scores
|
||||
attn_scores.masked_fill_(mask_bool, -torch.inf)
|
||||
|
||||
attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
|
||||
attn_weights = self.dropout(attn_weights)
|
||||
|
||||
# Shape: (b, num_tokens, num_heads, head_dim)
|
||||
context_vec = (attn_weights @ values).transpose(1, 2)
|
||||
|
||||
# Combine heads, where self.d_out = self.num_heads * self.head_dim
|
||||
context_vec = context_vec.reshape(b, num_tokens, self.d_out)
|
||||
context_vec = self.out_proj(context_vec) # optional projection
|
||||
|
||||
return context_vec
|
||||
130
pkg/llms_from_scratch/ch04.py
Normal file
130
pkg/llms_from_scratch/ch04.py
Normal file
@@ -0,0 +1,130 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
from .ch03 import MultiHeadAttention
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
class LayerNorm(nn.Module):
|
||||
def __init__(self, emb_dim):
|
||||
super().__init__()
|
||||
self.eps = 1e-5
|
||||
self.scale = nn.Parameter(torch.ones(emb_dim))
|
||||
self.shift = nn.Parameter(torch.zeros(emb_dim))
|
||||
|
||||
def forward(self, x):
|
||||
mean = x.mean(dim=-1, keepdim=True)
|
||||
var = x.var(dim=-1, keepdim=True, unbiased=False)
|
||||
norm_x = (x - mean) / torch.sqrt(var + self.eps)
|
||||
return self.scale * norm_x + self.shift
|
||||
|
||||
|
||||
class GELU(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return 0.5 * x * (1 + torch.tanh(
|
||||
torch.sqrt(torch.tensor(2.0 / torch.pi)) *
|
||||
(x + 0.044715 * torch.pow(x, 3))
|
||||
))
|
||||
|
||||
|
||||
class FeedForward(nn.Module):
|
||||
def __init__(self, cfg):
|
||||
super().__init__()
|
||||
self.layers = nn.Sequential(
|
||||
nn.Linear(cfg["emb_dim"], 4 * cfg["emb_dim"]),
|
||||
GELU(),
|
||||
nn.Linear(4 * cfg["emb_dim"], cfg["emb_dim"]),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layers(x)
|
||||
|
||||
|
||||
class TransformerBlock(nn.Module):
|
||||
def __init__(self, cfg):
|
||||
super().__init__()
|
||||
self.att = MultiHeadAttention(
|
||||
d_in=cfg["emb_dim"],
|
||||
d_out=cfg["emb_dim"],
|
||||
context_length=cfg["context_length"],
|
||||
num_heads=cfg["n_heads"],
|
||||
dropout=cfg["drop_rate"],
|
||||
qkv_bias=cfg["qkv_bias"])
|
||||
self.ff = FeedForward(cfg)
|
||||
self.norm1 = LayerNorm(cfg["emb_dim"])
|
||||
self.norm2 = LayerNorm(cfg["emb_dim"])
|
||||
self.drop_resid = nn.Dropout(cfg["drop_rate"])
|
||||
|
||||
def forward(self, x):
|
||||
# Shortcut connection for attention block
|
||||
shortcut = x
|
||||
x = self.norm1(x)
|
||||
x = self.att(x) # Shape [batch_size, num_tokens, emb_size]
|
||||
x = self.drop_resid(x)
|
||||
x = x + shortcut # Add the original input back
|
||||
|
||||
# Shortcut connection for feed-forward block
|
||||
shortcut = x
|
||||
x = self.norm2(x)
|
||||
x = self.ff(x)
|
||||
x = self.drop_resid(x)
|
||||
x = x + shortcut # Add the original input back
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class GPTModel(nn.Module):
|
||||
def __init__(self, cfg):
|
||||
super().__init__()
|
||||
self.tok_emb = nn.Embedding(cfg["vocab_size"], cfg["emb_dim"])
|
||||
self.pos_emb = nn.Embedding(cfg["context_length"], cfg["emb_dim"])
|
||||
self.drop_emb = nn.Dropout(cfg["drop_rate"])
|
||||
|
||||
self.trf_blocks = nn.Sequential(
|
||||
*[TransformerBlock(cfg) for _ in range(cfg["n_layers"])])
|
||||
|
||||
self.final_norm = LayerNorm(cfg["emb_dim"])
|
||||
self.out_head = nn.Linear(cfg["emb_dim"], cfg["vocab_size"], bias=False)
|
||||
|
||||
def forward(self, in_idx):
|
||||
batch_size, seq_len = in_idx.shape
|
||||
tok_embeds = self.tok_emb(in_idx)
|
||||
pos_embeds = self.pos_emb(torch.arange(seq_len, device=in_idx.device))
|
||||
x = tok_embeds + pos_embeds # Shape [batch_size, num_tokens, emb_size]
|
||||
x = self.drop_emb(x)
|
||||
x = self.trf_blocks(x)
|
||||
x = self.final_norm(x)
|
||||
logits = self.out_head(x)
|
||||
return logits
|
||||
|
||||
|
||||
def generate_text_simple(model, idx, max_new_tokens, context_size):
|
||||
# idx is (B, T) array of indices in the current context
|
||||
for _ in range(max_new_tokens):
|
||||
|
||||
# Crop current context if it exceeds the supported context size
|
||||
# E.g., if LLM supports only 5 tokens, and the context size is 10
|
||||
# then only the last 5 tokens are used as context
|
||||
idx_cond = idx[:, -context_size:]
|
||||
|
||||
# Get the predictions
|
||||
with torch.no_grad():
|
||||
logits = model(idx_cond)
|
||||
|
||||
# Focus only on the last time step
|
||||
# (batch, n_token, vocab_size) becomes (batch, vocab_size)
|
||||
logits = logits[:, -1, :]
|
||||
|
||||
# Get the idx of the vocab entry with the highest logits value
|
||||
idx_next = torch.argmax(logits, dim=-1, keepdim=True) # (batch, 1)
|
||||
|
||||
# Append sampled index to the running sequence
|
||||
idx = torch.cat((idx, idx_next), dim=1) # (batch, n_tokens+1)
|
||||
|
||||
return idx
|
||||
233
pkg/llms_from_scratch/ch05.py
Normal file
233
pkg/llms_from_scratch/ch05.py
Normal file
@@ -0,0 +1,233 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
from .ch04 import generate_text_simple
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.ticker import MaxNLocator
|
||||
import torch
|
||||
|
||||
|
||||
def generate(model, idx, max_new_tokens, context_size, temperature=0.0, top_k=None, eos_id=None):
|
||||
|
||||
# For-loop is the same as before: Get logits, and only focus on last time step
|
||||
for _ in range(max_new_tokens):
|
||||
idx_cond = idx[:, -context_size:]
|
||||
with torch.no_grad():
|
||||
logits = model(idx_cond)
|
||||
logits = logits[:, -1, :]
|
||||
|
||||
# New: Filter logits with top_k sampling
|
||||
if top_k is not None:
|
||||
# Keep only top_k values
|
||||
top_logits, _ = torch.topk(logits, top_k)
|
||||
min_val = top_logits[:, -1]
|
||||
logits = torch.where(logits < min_val, torch.tensor(float('-inf')).to(logits.device), logits)
|
||||
|
||||
# New: Apply temperature scaling
|
||||
if temperature > 0.0:
|
||||
logits = logits / temperature
|
||||
|
||||
# Apply softmax to get probabilities
|
||||
probs = torch.softmax(logits, dim=-1) # (batch_size, context_len)
|
||||
|
||||
# Sample from the distribution
|
||||
idx_next = torch.multinomial(probs, num_samples=1) # (batch_size, 1)
|
||||
|
||||
# Otherwise same as before: get idx of the vocab entry with the highest logits value
|
||||
else:
|
||||
idx_next = torch.argmax(logits, dim=-1, keepdim=True) # (batch_size, 1)
|
||||
|
||||
if idx_next == eos_id: # Stop generating early if end-of-sequence token is encountered and eos_id is specified
|
||||
break
|
||||
|
||||
# Same as before: append sampled index to the running sequence
|
||||
idx = torch.cat((idx, idx_next), dim=1) # (batch_size, num_tokens+1)
|
||||
|
||||
return idx
|
||||
|
||||
|
||||
def train_model_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
|
||||
eval_freq, eval_iter, start_context, tokenizer):
|
||||
# Initialize lists to track losses and tokens seen
|
||||
train_losses, val_losses, track_tokens_seen = [], [], []
|
||||
tokens_seen, global_step = 0, -1
|
||||
|
||||
# Main training loop
|
||||
for epoch in range(num_epochs):
|
||||
model.train() # Set model to training mode
|
||||
|
||||
for input_batch, target_batch in train_loader:
|
||||
optimizer.zero_grad() # Reset loss gradients from previous batch iteration
|
||||
loss = calc_loss_batch(input_batch, target_batch, model, device)
|
||||
loss.backward() # Calculate loss gradients
|
||||
optimizer.step() # Update model weights using loss gradients
|
||||
tokens_seen += input_batch.numel()
|
||||
global_step += 1
|
||||
|
||||
# Optional evaluation step
|
||||
if global_step % eval_freq == 0:
|
||||
train_loss, val_loss = evaluate_model(
|
||||
model, train_loader, val_loader, device, eval_iter)
|
||||
train_losses.append(train_loss)
|
||||
val_losses.append(val_loss)
|
||||
track_tokens_seen.append(tokens_seen)
|
||||
print(f"Ep {epoch+1} (Step {global_step:06d}): "
|
||||
f"Train loss {train_loss:.3f}, Val loss {val_loss:.3f}")
|
||||
|
||||
# Print a sample text after each epoch
|
||||
generate_and_print_sample(
|
||||
model, tokenizer, device, start_context
|
||||
)
|
||||
|
||||
return train_losses, val_losses, track_tokens_seen
|
||||
|
||||
|
||||
def evaluate_model(model, train_loader, val_loader, device, eval_iter):
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
train_loss = calc_loss_loader(train_loader, model, device, num_batches=eval_iter)
|
||||
val_loss = calc_loss_loader(val_loader, model, device, num_batches=eval_iter)
|
||||
model.train()
|
||||
return train_loss, val_loss
|
||||
|
||||
|
||||
def generate_and_print_sample(model, tokenizer, device, start_context):
|
||||
model.eval()
|
||||
context_size = model.pos_emb.weight.shape[0]
|
||||
encoded = text_to_token_ids(start_context, tokenizer).to(device)
|
||||
with torch.no_grad():
|
||||
token_ids = generate_text_simple(
|
||||
model=model, idx=encoded,
|
||||
max_new_tokens=50, context_size=context_size
|
||||
)
|
||||
decoded_text = token_ids_to_text(token_ids, tokenizer)
|
||||
print(decoded_text.replace("\n", " ")) # Compact print format
|
||||
model.train()
|
||||
|
||||
|
||||
def assign(left, right):
|
||||
if left.shape != right.shape:
|
||||
raise ValueError(f"Shape mismatch. Left: {left.shape}, Right: {right.shape}")
|
||||
return torch.nn.Parameter(torch.tensor(right))
|
||||
|
||||
|
||||
def load_weights_into_gpt(gpt, params):
|
||||
gpt.pos_emb.weight = assign(gpt.pos_emb.weight, params['wpe'])
|
||||
gpt.tok_emb.weight = assign(gpt.tok_emb.weight, params['wte'])
|
||||
|
||||
for b in range(len(params["blocks"])):
|
||||
q_w, k_w, v_w = np.split(
|
||||
(params["blocks"][b]["attn"]["c_attn"])["w"], 3, axis=-1)
|
||||
gpt.trf_blocks[b].att.W_query.weight = assign(
|
||||
gpt.trf_blocks[b].att.W_query.weight, q_w.T)
|
||||
gpt.trf_blocks[b].att.W_key.weight = assign(
|
||||
gpt.trf_blocks[b].att.W_key.weight, k_w.T)
|
||||
gpt.trf_blocks[b].att.W_value.weight = assign(
|
||||
gpt.trf_blocks[b].att.W_value.weight, v_w.T)
|
||||
|
||||
q_b, k_b, v_b = np.split(
|
||||
(params["blocks"][b]["attn"]["c_attn"])["b"], 3, axis=-1)
|
||||
gpt.trf_blocks[b].att.W_query.bias = assign(
|
||||
gpt.trf_blocks[b].att.W_query.bias, q_b)
|
||||
gpt.trf_blocks[b].att.W_key.bias = assign(
|
||||
gpt.trf_blocks[b].att.W_key.bias, k_b)
|
||||
gpt.trf_blocks[b].att.W_value.bias = assign(
|
||||
gpt.trf_blocks[b].att.W_value.bias, v_b)
|
||||
|
||||
gpt.trf_blocks[b].att.out_proj.weight = assign(
|
||||
gpt.trf_blocks[b].att.out_proj.weight,
|
||||
params["blocks"][b]["attn"]["c_proj"]["w"].T)
|
||||
gpt.trf_blocks[b].att.out_proj.bias = assign(
|
||||
gpt.trf_blocks[b].att.out_proj.bias,
|
||||
params["blocks"][b]["attn"]["c_proj"]["b"])
|
||||
|
||||
gpt.trf_blocks[b].ff.layers[0].weight = assign(
|
||||
gpt.trf_blocks[b].ff.layers[0].weight,
|
||||
params["blocks"][b]["mlp"]["c_fc"]["w"].T)
|
||||
gpt.trf_blocks[b].ff.layers[0].bias = assign(
|
||||
gpt.trf_blocks[b].ff.layers[0].bias,
|
||||
params["blocks"][b]["mlp"]["c_fc"]["b"])
|
||||
gpt.trf_blocks[b].ff.layers[2].weight = assign(
|
||||
gpt.trf_blocks[b].ff.layers[2].weight,
|
||||
params["blocks"][b]["mlp"]["c_proj"]["w"].T)
|
||||
gpt.trf_blocks[b].ff.layers[2].bias = assign(
|
||||
gpt.trf_blocks[b].ff.layers[2].bias,
|
||||
params["blocks"][b]["mlp"]["c_proj"]["b"])
|
||||
|
||||
gpt.trf_blocks[b].norm1.scale = assign(
|
||||
gpt.trf_blocks[b].norm1.scale,
|
||||
params["blocks"][b]["ln_1"]["g"])
|
||||
gpt.trf_blocks[b].norm1.shift = assign(
|
||||
gpt.trf_blocks[b].norm1.shift,
|
||||
params["blocks"][b]["ln_1"]["b"])
|
||||
gpt.trf_blocks[b].norm2.scale = assign(
|
||||
gpt.trf_blocks[b].norm2.scale,
|
||||
params["blocks"][b]["ln_2"]["g"])
|
||||
gpt.trf_blocks[b].norm2.shift = assign(
|
||||
gpt.trf_blocks[b].norm2.shift,
|
||||
params["blocks"][b]["ln_2"]["b"])
|
||||
|
||||
gpt.final_norm.scale = assign(gpt.final_norm.scale, params["g"])
|
||||
gpt.final_norm.shift = assign(gpt.final_norm.shift, params["b"])
|
||||
gpt.out_head.weight = assign(gpt.out_head.weight, params["wte"])
|
||||
|
||||
|
||||
def text_to_token_ids(text, tokenizer):
|
||||
encoded = tokenizer.encode(text, allowed_special={"<|endoftext|>"})
|
||||
encoded_tensor = torch.tensor(encoded).unsqueeze(0) # add batch dimension
|
||||
return encoded_tensor
|
||||
|
||||
|
||||
def token_ids_to_text(token_ids, tokenizer):
|
||||
flat = token_ids.squeeze(0) # remove batch dimension
|
||||
return tokenizer.decode(flat.tolist())
|
||||
|
||||
|
||||
def calc_loss_batch(input_batch, target_batch, model, device):
|
||||
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
|
||||
logits = model(input_batch)
|
||||
loss = torch.nn.functional.cross_entropy(logits.flatten(0, 1), target_batch.flatten())
|
||||
return loss
|
||||
|
||||
|
||||
def calc_loss_loader(data_loader, model, device, num_batches=None):
|
||||
total_loss = 0.
|
||||
if len(data_loader) == 0:
|
||||
return float("nan")
|
||||
elif num_batches is None:
|
||||
num_batches = len(data_loader)
|
||||
else:
|
||||
# Reduce the number of batches to match the total number of batches in the data loader
|
||||
# if num_batches exceeds the number of batches in the data loader
|
||||
num_batches = min(num_batches, len(data_loader))
|
||||
for i, (input_batch, target_batch) in enumerate(data_loader):
|
||||
if i < num_batches:
|
||||
loss = calc_loss_batch(input_batch, target_batch, model, device)
|
||||
total_loss += loss.item()
|
||||
else:
|
||||
break
|
||||
return total_loss / num_batches
|
||||
|
||||
|
||||
def plot_losses(epochs_seen, tokens_seen, train_losses, val_losses):
|
||||
fig, ax1 = plt.subplots(figsize=(5, 3))
|
||||
|
||||
# Plot training and validation loss against epochs
|
||||
ax1.plot(epochs_seen, train_losses, label="Training loss")
|
||||
ax1.plot(epochs_seen, val_losses, linestyle="-.", label="Validation loss")
|
||||
ax1.set_xlabel("Epochs")
|
||||
ax1.set_ylabel("Loss")
|
||||
ax1.legend(loc="upper right")
|
||||
ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) # only show integer labels on x-axis
|
||||
|
||||
# Create a second x-axis for tokens seen
|
||||
ax2 = ax1.twiny() # Create a second x-axis that shares the same y-axis
|
||||
ax2.plot(tokens_seen, train_losses, alpha=0) # Invisible plot for aligning ticks
|
||||
ax2.set_xlabel("Tokens seen")
|
||||
|
||||
fig.tight_layout() # Adjust layout to make room
|
||||
plt.savefig("loss-plot.pdf")
|
||||
plt.show()
|
||||
254
pkg/llms_from_scratch/ch06.py
Normal file
254
pkg/llms_from_scratch/ch06.py
Normal file
@@ -0,0 +1,254 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
|
||||
import urllib.request
|
||||
import zipfile
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
from torch.utils.data import Dataset
|
||||
import torch
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def download_and_unzip_spam_data(url, zip_path, extracted_path, data_file_path):
|
||||
if data_file_path.exists():
|
||||
print(f"{data_file_path} already exists. Skipping download and extraction.")
|
||||
return
|
||||
|
||||
# Downloading the file
|
||||
with urllib.request.urlopen(url) as response:
|
||||
with open(zip_path, "wb") as out_file:
|
||||
out_file.write(response.read())
|
||||
|
||||
# Unzipping the file
|
||||
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
||||
zip_ref.extractall(extracted_path)
|
||||
|
||||
# Add .tsv file extension
|
||||
original_file_path = Path(extracted_path) / "SMSSpamCollection"
|
||||
os.rename(original_file_path, data_file_path)
|
||||
print(f"File downloaded and saved as {data_file_path}")
|
||||
|
||||
|
||||
def create_balanced_dataset(df):
|
||||
|
||||
# Count the instances of "spam"
|
||||
num_spam = df[df["Label"] == "spam"].shape[0]
|
||||
|
||||
# Randomly sample "ham" instances to match the number of "spam" instances
|
||||
ham_subset = df[df["Label"] == "ham"].sample(num_spam, random_state=123)
|
||||
|
||||
# Combine ham "subset" with "spam"
|
||||
balanced_df = pd.concat([ham_subset, df[df["Label"] == "spam"]])
|
||||
|
||||
return balanced_df
|
||||
|
||||
|
||||
def random_split(df, train_frac, validation_frac):
|
||||
# Shuffle the entire DataFrame
|
||||
df = df.sample(frac=1, random_state=123).reset_index(drop=True)
|
||||
|
||||
# Calculate split indices
|
||||
train_end = int(len(df) * train_frac)
|
||||
validation_end = train_end + int(len(df) * validation_frac)
|
||||
|
||||
# Split the DataFrame
|
||||
train_df = df[:train_end]
|
||||
validation_df = df[train_end:validation_end]
|
||||
test_df = df[validation_end:]
|
||||
|
||||
return train_df, validation_df, test_df
|
||||
|
||||
|
||||
class SpamDataset(Dataset):
|
||||
def __init__(self, csv_file, tokenizer, max_length=None, pad_token_id=50256):
|
||||
self.data = pd.read_csv(csv_file)
|
||||
|
||||
# Pre-tokenize texts
|
||||
self.encoded_texts = [
|
||||
tokenizer.encode(text) for text in self.data["Text"]
|
||||
]
|
||||
|
||||
if max_length is None:
|
||||
self.max_length = self._longest_encoded_length()
|
||||
else:
|
||||
self.max_length = max_length
|
||||
# Truncate sequences if they are longer than max_length
|
||||
self.encoded_texts = [
|
||||
encoded_text[:self.max_length]
|
||||
for encoded_text in self.encoded_texts
|
||||
]
|
||||
|
||||
# Pad sequences to the longest sequence
|
||||
self.encoded_texts = [
|
||||
encoded_text + [pad_token_id] * (self.max_length - len(encoded_text))
|
||||
for encoded_text in self.encoded_texts
|
||||
]
|
||||
|
||||
def __getitem__(self, index):
|
||||
encoded = self.encoded_texts[index]
|
||||
label = self.data.iloc[index]["Label"]
|
||||
return (
|
||||
torch.tensor(encoded, dtype=torch.long),
|
||||
torch.tensor(label, dtype=torch.long)
|
||||
)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def _longest_encoded_length(self):
|
||||
max_length = 0
|
||||
for encoded_text in self.encoded_texts:
|
||||
encoded_length = len(encoded_text)
|
||||
if encoded_length > max_length:
|
||||
max_length = encoded_length
|
||||
return max_length
|
||||
# Note: A more pythonic version to implement this method
|
||||
# is the following, which is also used in the next chapter:
|
||||
# return max(len(encoded_text) for encoded_text in self.encoded_texts)
|
||||
|
||||
|
||||
def calc_accuracy_loader(data_loader, model, device, num_batches=None):
|
||||
model.eval()
|
||||
correct_predictions, num_examples = 0, 0
|
||||
|
||||
if num_batches is None:
|
||||
num_batches = len(data_loader)
|
||||
else:
|
||||
num_batches = min(num_batches, len(data_loader))
|
||||
for i, (input_batch, target_batch) in enumerate(data_loader):
|
||||
if i < num_batches:
|
||||
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
|
||||
|
||||
with torch.no_grad():
|
||||
logits = model(input_batch)[:, -1, :] # Logits of last output token
|
||||
predicted_labels = torch.argmax(logits, dim=-1)
|
||||
|
||||
num_examples += predicted_labels.shape[0]
|
||||
correct_predictions += (predicted_labels == target_batch).sum().item()
|
||||
else:
|
||||
break
|
||||
return correct_predictions / num_examples
|
||||
|
||||
|
||||
def calc_loss_batch(input_batch, target_batch, model, device):
|
||||
input_batch, target_batch = input_batch.to(device), target_batch.to(device)
|
||||
logits = model(input_batch)[:, -1, :] # Logits of last output token
|
||||
loss = torch.nn.functional.cross_entropy(logits, target_batch)
|
||||
return loss
|
||||
|
||||
|
||||
def calc_loss_loader(data_loader, model, device, num_batches=None):
|
||||
total_loss = 0.
|
||||
if len(data_loader) == 0:
|
||||
return float("nan")
|
||||
elif num_batches is None:
|
||||
num_batches = len(data_loader)
|
||||
else:
|
||||
# Reduce the number of batches to match the total number of batches in the data loader
|
||||
# if num_batches exceeds the number of batches in the data loader
|
||||
num_batches = min(num_batches, len(data_loader))
|
||||
for i, (input_batch, target_batch) in enumerate(data_loader):
|
||||
if i < num_batches:
|
||||
loss = calc_loss_batch(input_batch, target_batch, model, device)
|
||||
total_loss += loss.item()
|
||||
else:
|
||||
break
|
||||
return total_loss / num_batches
|
||||
|
||||
|
||||
def evaluate_model(model, train_loader, val_loader, device, eval_iter):
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
train_loss = calc_loss_loader(train_loader, model, device, num_batches=eval_iter)
|
||||
val_loss = calc_loss_loader(val_loader, model, device, num_batches=eval_iter)
|
||||
model.train()
|
||||
return train_loss, val_loss
|
||||
|
||||
|
||||
def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
|
||||
eval_freq, eval_iter):
|
||||
# Initialize lists to track losses and examples seen
|
||||
train_losses, val_losses, train_accs, val_accs = [], [], [], []
|
||||
examples_seen, global_step = 0, -1
|
||||
|
||||
# Main training loop
|
||||
for epoch in range(num_epochs):
|
||||
model.train() # Set model to training mode
|
||||
|
||||
for input_batch, target_batch in train_loader:
|
||||
optimizer.zero_grad() # Reset loss gradients from previous batch iteration
|
||||
loss = calc_loss_batch(input_batch, target_batch, model, device)
|
||||
loss.backward() # Calculate loss gradients
|
||||
optimizer.step() # Update model weights using loss gradients
|
||||
examples_seen += input_batch.shape[0] # New: track examples instead of tokens
|
||||
global_step += 1
|
||||
|
||||
# Optional evaluation step
|
||||
if global_step % eval_freq == 0:
|
||||
train_loss, val_loss = evaluate_model(
|
||||
model, train_loader, val_loader, device, eval_iter)
|
||||
train_losses.append(train_loss)
|
||||
val_losses.append(val_loss)
|
||||
print(f"Ep {epoch+1} (Step {global_step:06d}): "
|
||||
f"Train loss {train_loss:.3f}, Val loss {val_loss:.3f}")
|
||||
|
||||
# Calculate accuracy after each epoch
|
||||
train_accuracy = calc_accuracy_loader(train_loader, model, device, num_batches=eval_iter)
|
||||
val_accuracy = calc_accuracy_loader(val_loader, model, device, num_batches=eval_iter)
|
||||
print(f"Training accuracy: {train_accuracy*100:.2f}% | ", end="")
|
||||
print(f"Validation accuracy: {val_accuracy*100:.2f}%")
|
||||
train_accs.append(train_accuracy)
|
||||
val_accs.append(val_accuracy)
|
||||
|
||||
return train_losses, val_losses, train_accs, val_accs, examples_seen
|
||||
|
||||
|
||||
def plot_values(epochs_seen, examples_seen, train_values, val_values, label="loss"):
|
||||
fig, ax1 = plt.subplots(figsize=(5, 3))
|
||||
|
||||
# Plot training and validation loss against epochs
|
||||
ax1.plot(epochs_seen, train_values, label=f"Training {label}")
|
||||
ax1.plot(epochs_seen, val_values, linestyle="-.", label=f"Validation {label}")
|
||||
ax1.set_xlabel("Epochs")
|
||||
ax1.set_ylabel(label.capitalize())
|
||||
ax1.legend()
|
||||
|
||||
# Create a second x-axis for examples seen
|
||||
ax2 = ax1.twiny() # Create a second x-axis that shares the same y-axis
|
||||
ax2.plot(examples_seen, train_values, alpha=0) # Invisible plot for aligning ticks
|
||||
ax2.set_xlabel("Examples seen")
|
||||
|
||||
fig.tight_layout() # Adjust layout to make room
|
||||
plt.savefig(f"{label}-plot.pdf")
|
||||
plt.show()
|
||||
|
||||
|
||||
def classify_review(text, model, tokenizer, device, max_length=None, pad_token_id=50256):
|
||||
model.eval()
|
||||
|
||||
# Prepare inputs to the model
|
||||
input_ids = tokenizer.encode(text)
|
||||
supported_context_length = model.pos_emb.weight.shape[0]
|
||||
# Note: In the book, this was originally written as pos_emb.weight.shape[1] by mistake
|
||||
# It didn't break the code but would have caused unnecessary truncation (to 768 instead of 1024)
|
||||
|
||||
# Truncate sequences if they too long
|
||||
input_ids = input_ids[:min(max_length, supported_context_length)]
|
||||
|
||||
# Pad sequences to the longest sequence
|
||||
input_ids += [pad_token_id] * (max_length - len(input_ids))
|
||||
input_tensor = torch.tensor(input_ids, device=device).unsqueeze(0) # add batch dimension
|
||||
|
||||
# Model inference
|
||||
with torch.no_grad():
|
||||
logits = model(input_tensor)[:, -1, :] # Logits of the last output token
|
||||
predicted_label = torch.argmax(logits, dim=-1).item()
|
||||
|
||||
# Return the classified result
|
||||
return "spam" if predicted_label == 1 else "not spam"
|
||||
247
pkg/llms_from_scratch/ch07.py
Normal file
247
pkg/llms_from_scratch/ch07.py
Normal file
@@ -0,0 +1,247 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
import json
|
||||
import os
|
||||
import psutil
|
||||
import urllib
|
||||
|
||||
import torch
|
||||
import tqdm
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
|
||||
def download_and_load_file(file_path, url):
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
with urllib.request.urlopen(url) as response:
|
||||
text_data = response.read().decode("utf-8")
|
||||
with open(file_path, "w", encoding="utf-8") as file:
|
||||
file.write(text_data)
|
||||
|
||||
# The book originally contained this unnecessary "else" clause:
|
||||
# else:
|
||||
# with open(file_path, "r", encoding="utf-8") as file:
|
||||
# text_data = file.read()
|
||||
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
data = json.load(file)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def format_input(entry):
|
||||
instruction_text = (
|
||||
f"Below is an instruction that describes a task. "
|
||||
f"Write a response that appropriately completes the request."
|
||||
f"\n\n### Instruction:\n{entry['instruction']}"
|
||||
)
|
||||
|
||||
input_text = f"\n\n### Input:\n{entry['input']}" if entry["input"] else ""
|
||||
|
||||
return instruction_text + input_text
|
||||
|
||||
|
||||
class InstructionDataset(Dataset):
|
||||
def __init__(self, data, tokenizer):
|
||||
self.data = data
|
||||
|
||||
# Pre-tokenize texts
|
||||
self.encoded_texts = []
|
||||
for entry in data:
|
||||
instruction_plus_input = format_input(entry)
|
||||
response_text = f"\n\n### Response:\n{entry['output']}"
|
||||
full_text = instruction_plus_input + response_text
|
||||
self.encoded_texts.append(
|
||||
tokenizer.encode(full_text)
|
||||
)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.encoded_texts[index]
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
|
||||
def custom_collate_draft_1(
|
||||
batch,
|
||||
pad_token_id=50256,
|
||||
device="cpu"
|
||||
):
|
||||
# Find the longest sequence in the batch
|
||||
# and increase the max length by +1, which will add one extra
|
||||
# padding token below
|
||||
batch_max_length = max(len(item)+1 for item in batch)
|
||||
|
||||
# Pad and prepare inputs
|
||||
inputs_lst = []
|
||||
|
||||
for item in batch:
|
||||
new_item = item.copy()
|
||||
# Add an <|endoftext|> token
|
||||
new_item += [pad_token_id]
|
||||
# Pad sequences to batch_max_length
|
||||
padded = (
|
||||
new_item + [pad_token_id] *
|
||||
(batch_max_length - len(new_item))
|
||||
)
|
||||
# Via padded[:-1], we remove the extra padded token
|
||||
# that has been added via the +1 setting in batch_max_length
|
||||
# (the extra padding token will be relevant in later codes)
|
||||
inputs = torch.tensor(padded[:-1])
|
||||
inputs_lst.append(inputs)
|
||||
|
||||
# Convert list of inputs to tensor and transfer to target device
|
||||
inputs_tensor = torch.stack(inputs_lst).to(device)
|
||||
return inputs_tensor
|
||||
|
||||
|
||||
def custom_collate_draft_2(
|
||||
batch,
|
||||
pad_token_id=50256,
|
||||
device="cpu"
|
||||
):
|
||||
# Find the longest sequence in the batch
|
||||
batch_max_length = max(len(item)+1 for item in batch)
|
||||
|
||||
# Pad and prepare inputs
|
||||
inputs_lst, targets_lst = [], []
|
||||
|
||||
for item in batch:
|
||||
new_item = item.copy()
|
||||
# Add an <|endoftext|> token
|
||||
new_item += [pad_token_id]
|
||||
# Pad sequences to max_length
|
||||
padded = (
|
||||
new_item + [pad_token_id] *
|
||||
(batch_max_length - len(new_item))
|
||||
)
|
||||
inputs = torch.tensor(padded[:-1]) # Truncate the last token for inputs
|
||||
targets = torch.tensor(padded[1:]) # Shift +1 to the right for targets
|
||||
inputs_lst.append(inputs)
|
||||
targets_lst.append(targets)
|
||||
|
||||
# Convert list of inputs to tensor and transfer to target device
|
||||
inputs_tensor = torch.stack(inputs_lst).to(device)
|
||||
targets_tensor = torch.stack(targets_lst).to(device)
|
||||
return inputs_tensor, targets_tensor
|
||||
|
||||
|
||||
def custom_collate_fn(
|
||||
batch,
|
||||
pad_token_id=50256,
|
||||
ignore_index=-100,
|
||||
allowed_max_length=None,
|
||||
device="cpu"
|
||||
):
|
||||
# Find the longest sequence in the batch
|
||||
batch_max_length = max(len(item)+1 for item in batch)
|
||||
|
||||
# Pad and prepare inputs and targets
|
||||
inputs_lst, targets_lst = [], []
|
||||
|
||||
for item in batch:
|
||||
new_item = item.copy()
|
||||
# Add an <|endoftext|> token
|
||||
new_item += [pad_token_id]
|
||||
# Pad sequences to max_length
|
||||
padded = (
|
||||
new_item + [pad_token_id] *
|
||||
(batch_max_length - len(new_item))
|
||||
)
|
||||
inputs = torch.tensor(padded[:-1]) # Truncate the last token for inputs
|
||||
targets = torch.tensor(padded[1:]) # Shift +1 to the right for targets
|
||||
|
||||
# New: Replace all but the first padding tokens in targets by ignore_index
|
||||
mask = targets == pad_token_id
|
||||
indices = torch.nonzero(mask).squeeze()
|
||||
if indices.numel() > 1:
|
||||
targets[indices[1:]] = ignore_index
|
||||
|
||||
# New: Optionally truncate to maximum sequence length
|
||||
if allowed_max_length is not None:
|
||||
inputs = inputs[:allowed_max_length]
|
||||
targets = targets[:allowed_max_length]
|
||||
|
||||
inputs_lst.append(inputs)
|
||||
targets_lst.append(targets)
|
||||
|
||||
# Convert list of inputs and targets to tensors and transfer to target device
|
||||
inputs_tensor = torch.stack(inputs_lst).to(device)
|
||||
targets_tensor = torch.stack(targets_lst).to(device)
|
||||
|
||||
return inputs_tensor, targets_tensor
|
||||
|
||||
|
||||
def check_if_running(process_name):
|
||||
running = False
|
||||
for proc in psutil.process_iter(["name"]):
|
||||
if process_name in proc.info["name"]:
|
||||
running = True
|
||||
break
|
||||
return running
|
||||
|
||||
|
||||
def query_model(
|
||||
prompt,
|
||||
model="llama3",
|
||||
url="http://localhost:11434/api/chat"
|
||||
):
|
||||
# Create the data payload as a dictionary
|
||||
data = {
|
||||
"model": model,
|
||||
"messages": [
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
"options": { # Settings below are required for deterministic responses
|
||||
"seed": 123,
|
||||
"temperature": 0,
|
||||
"num_ctx": 2048
|
||||
}
|
||||
}
|
||||
|
||||
# Convert the dictionary to a JSON formatted string and encode it to bytes
|
||||
payload = json.dumps(data).encode("utf-8")
|
||||
|
||||
# Create a request object, setting the method to POST and adding necessary headers
|
||||
request = urllib.request.Request(
|
||||
url,
|
||||
data=payload,
|
||||
method="POST"
|
||||
)
|
||||
request.add_header("Content-Type", "application/json")
|
||||
|
||||
# Send the request and capture the response
|
||||
response_data = ""
|
||||
with urllib.request.urlopen(request) as response:
|
||||
# Read and decode the response
|
||||
while True:
|
||||
line = response.readline().decode("utf-8")
|
||||
if not line:
|
||||
break
|
||||
response_json = json.loads(line)
|
||||
response_data += response_json["message"]["content"]
|
||||
|
||||
return response_data
|
||||
|
||||
|
||||
def generate_model_scores(json_data, json_key, model="llama3"):
|
||||
scores = []
|
||||
for entry in tqdm(json_data, desc="Scoring entries"):
|
||||
prompt = (
|
||||
f"Given the input `{format_input(entry)}` "
|
||||
f"and correct output `{entry['output']}`, "
|
||||
f"score the model response `{entry[json_key]}`"
|
||||
f" on a scale from 0 to 100, where 100 is the best score. "
|
||||
f"Respond with the integer number only."
|
||||
)
|
||||
score = query_model(prompt, model)
|
||||
try:
|
||||
scores.append(int(score))
|
||||
except ValueError:
|
||||
print(f"Could not convert score: {score}")
|
||||
continue
|
||||
|
||||
return scores
|
||||
70
pkg/llms_from_scratch/tests/test_appendix_a.py
Normal file
70
pkg/llms_from_scratch/tests/test_appendix_a.py
Normal file
@@ -0,0 +1,70 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
from llms_from_scratch.appendix_a import NeuralNetwork, ToyDataset
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
|
||||
def test_dataset():
|
||||
|
||||
X_train = torch.tensor([
|
||||
[-1.2, 3.1],
|
||||
[-0.9, 2.9],
|
||||
[-0.5, 2.6],
|
||||
[2.3, -1.1],
|
||||
[2.7, -1.5]
|
||||
])
|
||||
|
||||
y_train = torch.tensor([0, 0, 0, 1, 1])
|
||||
train_ds = ToyDataset(X_train, y_train)
|
||||
|
||||
len(train_ds) == 5
|
||||
torch.manual_seed(123)
|
||||
|
||||
train_loader = DataLoader(
|
||||
dataset=train_ds,
|
||||
batch_size=2,
|
||||
shuffle=True,
|
||||
num_workers=0
|
||||
)
|
||||
|
||||
torch.manual_seed(123)
|
||||
model = NeuralNetwork(num_inputs=2, num_outputs=2)
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr=0.5)
|
||||
|
||||
num_epochs = 3
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
|
||||
model.train()
|
||||
for batch_idx, (features, labels) in enumerate(train_loader):
|
||||
|
||||
logits = model(features)
|
||||
|
||||
loss = F.cross_entropy(logits, labels)
|
||||
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
print(f"Epoch: {epoch+1:03d}/{num_epochs:03d}"
|
||||
f" | Batch {batch_idx:03d}/{len(train_loader):03d}"
|
||||
f" | Train/Val Loss: {loss:.2f}")
|
||||
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(X_train)
|
||||
|
||||
expected = torch.tensor([
|
||||
[2.8569, -4.1618],
|
||||
[2.5382, -3.7548],
|
||||
[2.0944, -3.1820],
|
||||
[-1.4814, 1.4816],
|
||||
[-1.7176, 1.7342]
|
||||
])
|
||||
torch.equal(outputs, expected)
|
||||
118
pkg/llms_from_scratch/tests/test_appendix_d.py
Normal file
118
pkg/llms_from_scratch/tests/test_appendix_d.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
from llms_from_scratch.ch02 import create_dataloader_v1
|
||||
from llms_from_scratch.ch04 import GPTModel
|
||||
from llms_from_scratch.appendix_d import train_model
|
||||
|
||||
import os
|
||||
import urllib
|
||||
|
||||
import tiktoken
|
||||
import torch
|
||||
from torch.utils.data import Subset, DataLoader
|
||||
|
||||
|
||||
def test_train(tmp_path):
|
||||
|
||||
GPT_CONFIG_124M = {
|
||||
"vocab_size": 50257, # Vocabulary size
|
||||
"context_length": 256, # Shortened context length (orig: 1024)
|
||||
"emb_dim": 768, # Embedding dimension
|
||||
"n_heads": 12, # Number of attention heads
|
||||
"n_layers": 12, # Number of layers
|
||||
"drop_rate": 0.1, # Dropout rate
|
||||
"qkv_bias": False # Query-key-value bias
|
||||
}
|
||||
|
||||
OTHER_SETTINGS = {
|
||||
"learning_rate": 5e-4,
|
||||
"num_epochs": 2,
|
||||
"batch_size": 1,
|
||||
"weight_decay": 0.1
|
||||
}
|
||||
|
||||
torch.manual_seed(123)
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
##############################
|
||||
# Download data if necessary
|
||||
##############################
|
||||
|
||||
file_path = tmp_path / "the-verdict.txt"
|
||||
url = "https://raw.githubusercontent.com/rasbt/LLMs-from-scratch/main/ch02/01_main-chapter-code/the-verdict.txt"
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
with urllib.request.urlopen(url) as response:
|
||||
text_data = response.read().decode("utf-8")
|
||||
with open(file_path, "w", encoding="utf-8") as file:
|
||||
file.write(text_data)
|
||||
else:
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
text_data = file.read()
|
||||
|
||||
##############################
|
||||
# Initialize model
|
||||
##############################
|
||||
|
||||
model = GPTModel(GPT_CONFIG_124M)
|
||||
model.to(device) # no assignment model = model.to(device) necessary for nn.Module classes
|
||||
|
||||
##############################
|
||||
# Set up dataloaders
|
||||
##############################
|
||||
|
||||
# Train/validation ratio
|
||||
train_ratio = 0.90
|
||||
split_idx = int(train_ratio * len(text_data))
|
||||
|
||||
train_loader = create_dataloader_v1(
|
||||
text_data[:split_idx],
|
||||
batch_size=OTHER_SETTINGS["batch_size"],
|
||||
max_length=GPT_CONFIG_124M["context_length"],
|
||||
stride=GPT_CONFIG_124M["context_length"],
|
||||
drop_last=True,
|
||||
shuffle=True,
|
||||
num_workers=0
|
||||
)
|
||||
|
||||
val_loader = create_dataloader_v1(
|
||||
text_data[split_idx:],
|
||||
batch_size=OTHER_SETTINGS["batch_size"],
|
||||
max_length=GPT_CONFIG_124M["context_length"],
|
||||
stride=GPT_CONFIG_124M["context_length"],
|
||||
drop_last=False,
|
||||
shuffle=False,
|
||||
num_workers=0
|
||||
)
|
||||
|
||||
##############################
|
||||
# Train model
|
||||
##############################
|
||||
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
|
||||
train_subset = Subset(train_loader.dataset, range(1))
|
||||
one_batch_train_loader = DataLoader(train_subset, batch_size=1)
|
||||
val_subset = Subset(val_loader.dataset, range(1))
|
||||
one_batch_val_loader = DataLoader(val_subset, batch_size=1)
|
||||
|
||||
peak_lr = 0.001 # this was originally set to 5e-4 in the book by mistake
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=peak_lr, weight_decay=0.1) # the book accidentally omitted the lr assignment
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
|
||||
n_epochs = 6
|
||||
warmup_steps = 1
|
||||
|
||||
train_losses, val_losses, tokens_seen, lrs = train_model(
|
||||
model, one_batch_train_loader, one_batch_val_loader, optimizer, device, n_epochs=n_epochs,
|
||||
eval_freq=5, eval_iter=1, start_context="Every effort moves you",
|
||||
tokenizer=tokenizer, warmup_steps=warmup_steps,
|
||||
initial_lr=1e-5, min_lr=1e-5
|
||||
)
|
||||
|
||||
assert round(train_losses[0], 1) == 10.9
|
||||
assert round(val_losses[0], 1) == 11.0
|
||||
assert train_losses[-1] < train_losses[0]
|
||||
150
pkg/llms_from_scratch/tests/test_appendix_e.py
Normal file
150
pkg/llms_from_scratch/tests/test_appendix_e.py
Normal file
@@ -0,0 +1,150 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
|
||||
from llms_from_scratch.ch04 import GPTModel
|
||||
from llms_from_scratch.ch06 import (
|
||||
download_and_unzip_spam_data, create_balanced_dataset,
|
||||
random_split, SpamDataset, train_classifier_simple
|
||||
)
|
||||
from llms_from_scratch.appendix_e import replace_linear_with_lora
|
||||
|
||||
from pathlib import Path
|
||||
import urllib
|
||||
|
||||
import pandas as pd
|
||||
import tiktoken
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, Subset
|
||||
|
||||
|
||||
def test_train_classifier_lora(tmp_path):
|
||||
|
||||
########################################
|
||||
# Download and prepare dataset
|
||||
########################################
|
||||
|
||||
url = "https://archive.ics.uci.edu/static/public/228/sms+spam+collection.zip"
|
||||
zip_path = tmp_path / "sms_spam_collection.zip"
|
||||
extracted_path = tmp_path / "sms_spam_collection"
|
||||
data_file_path = Path(extracted_path) / "SMSSpamCollection.tsv"
|
||||
|
||||
try:
|
||||
download_and_unzip_spam_data(
|
||||
url, zip_path, extracted_path, data_file_path
|
||||
)
|
||||
except (urllib.error.HTTPError, urllib.error.URLError, TimeoutError) as e:
|
||||
print(f"Primary URL failed: {e}. Trying backup URL...")
|
||||
backup_url = "https://f001.backblazeb2.com/file/LLMs-from-scratch/sms%2Bspam%2Bcollection.zip"
|
||||
download_and_unzip_spam_data(
|
||||
backup_url, zip_path, extracted_path, data_file_path
|
||||
)
|
||||
|
||||
df = pd.read_csv(data_file_path, sep="\t", header=None, names=["Label", "Text"])
|
||||
balanced_df = create_balanced_dataset(df)
|
||||
balanced_df["Label"] = balanced_df["Label"].map({"ham": 0, "spam": 1})
|
||||
|
||||
train_df, validation_df, test_df = random_split(balanced_df, 0.7, 0.1)
|
||||
train_df.to_csv(tmp_path / "train.csv", index=None)
|
||||
validation_df.to_csv(tmp_path / "validation.csv", index=None)
|
||||
test_df.to_csv(tmp_path / "test.csv", index=None)
|
||||
|
||||
########################################
|
||||
# Create data loaders
|
||||
########################################
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
|
||||
train_dataset = SpamDataset(
|
||||
csv_file=tmp_path / "train.csv",
|
||||
max_length=None,
|
||||
tokenizer=tokenizer
|
||||
)
|
||||
|
||||
val_dataset = SpamDataset(
|
||||
csv_file=tmp_path / "validation.csv",
|
||||
max_length=train_dataset.max_length,
|
||||
tokenizer=tokenizer
|
||||
)
|
||||
|
||||
num_workers = 0
|
||||
batch_size = 8
|
||||
|
||||
torch.manual_seed(123)
|
||||
|
||||
train_loader = DataLoader(
|
||||
dataset=train_dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=True,
|
||||
num_workers=num_workers,
|
||||
drop_last=True,
|
||||
)
|
||||
|
||||
val_loader = DataLoader(
|
||||
dataset=val_dataset,
|
||||
batch_size=batch_size,
|
||||
num_workers=num_workers,
|
||||
drop_last=False,
|
||||
)
|
||||
|
||||
########################################
|
||||
# Load pretrained model
|
||||
########################################
|
||||
|
||||
# Small GPT model for testing purposes
|
||||
BASE_CONFIG = {
|
||||
"vocab_size": 50257,
|
||||
"context_length": 120,
|
||||
"drop_rate": 0.0,
|
||||
"qkv_bias": False,
|
||||
"emb_dim": 12,
|
||||
"n_layers": 1,
|
||||
"n_heads": 2
|
||||
}
|
||||
model = GPTModel(BASE_CONFIG)
|
||||
model.eval()
|
||||
device = "cpu"
|
||||
|
||||
########################################
|
||||
# Modify and pretrained model
|
||||
########################################
|
||||
|
||||
for param in model.parameters():
|
||||
param.requires_grad = False
|
||||
|
||||
torch.manual_seed(123)
|
||||
|
||||
num_classes = 2
|
||||
model.out_head = torch.nn.Linear(in_features=BASE_CONFIG["emb_dim"], out_features=num_classes)
|
||||
replace_linear_with_lora(model, rank=16, alpha=16)
|
||||
model.to(device)
|
||||
|
||||
for param in model.trf_blocks[-1].parameters():
|
||||
param.requires_grad = True
|
||||
|
||||
for param in model.final_norm.parameters():
|
||||
param.requires_grad = True
|
||||
|
||||
########################################
|
||||
# Finetune modified model
|
||||
########################################
|
||||
|
||||
torch.manual_seed(123)
|
||||
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5, weight_decay=0.1)
|
||||
|
||||
train_subset = Subset(train_loader.dataset, range(5))
|
||||
batch_train_loader = DataLoader(train_subset, batch_size=5)
|
||||
val_subset = Subset(val_loader.dataset, range(5))
|
||||
batch_val_loader = DataLoader(val_subset, batch_size=5)
|
||||
|
||||
num_epochs = 6
|
||||
train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
|
||||
model, batch_train_loader, batch_val_loader, optimizer, device,
|
||||
num_epochs=num_epochs, eval_freq=1, eval_iter=1,
|
||||
)
|
||||
|
||||
assert round(train_losses[0], 1) == 0.8
|
||||
assert round(val_losses[0], 1) == 0.8
|
||||
assert train_losses[-1] < train_losses[0]
|
||||
54
pkg/llms_from_scratch/tests/test_ch02.py
Normal file
54
pkg/llms_from_scratch/tests/test_ch02.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
from llms_from_scratch.ch02 import create_dataloader_v1
|
||||
|
||||
import os
|
||||
import urllib.request
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
|
||||
@pytest.mark.parametrize("file_name", ["the-verdict.txt"])
|
||||
def test_dataloader(tmp_path, file_name):
|
||||
|
||||
if not os.path.exists("the-verdict.txt"):
|
||||
url = ("https://raw.githubusercontent.com/rasbt/"
|
||||
"LLMs-from-scratch/main/ch02/01_main-chapter-code/"
|
||||
"the-verdict.txt")
|
||||
file_path = "the-verdict.txt"
|
||||
urllib.request.urlretrieve(url, file_path)
|
||||
|
||||
with open("the-verdict.txt", "r", encoding="utf-8") as f:
|
||||
raw_text = f.read()
|
||||
|
||||
vocab_size = 50257
|
||||
output_dim = 256
|
||||
context_length = 1024
|
||||
|
||||
token_embedding_layer = torch.nn.Embedding(vocab_size, output_dim)
|
||||
pos_embedding_layer = torch.nn.Embedding(context_length, output_dim)
|
||||
|
||||
batch_size = 8
|
||||
max_length = 4
|
||||
dataloader = create_dataloader_v1(
|
||||
raw_text,
|
||||
batch_size=batch_size,
|
||||
max_length=max_length,
|
||||
stride=max_length
|
||||
)
|
||||
|
||||
for batch in dataloader:
|
||||
x, y = batch
|
||||
|
||||
token_embeddings = token_embedding_layer(x)
|
||||
pos_embeddings = pos_embedding_layer(torch.arange(max_length))
|
||||
|
||||
input_embeddings = token_embeddings + pos_embeddings
|
||||
|
||||
break
|
||||
|
||||
input_embeddings.shape == torch.Size([8, 4, 256])
|
||||
22
pkg/llms_from_scratch/tests/test_ch03.py
Normal file
22
pkg/llms_from_scratch/tests/test_ch03.py
Normal file
@@ -0,0 +1,22 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
|
||||
from llms_from_scratch.ch03 import MultiHeadAttention
|
||||
import torch
|
||||
|
||||
|
||||
def test_mha():
|
||||
|
||||
context_length = 100
|
||||
d_in = 256
|
||||
d_out = 16
|
||||
|
||||
mha = MultiHeadAttention(d_in, d_out, context_length, 0.0, num_heads=2)
|
||||
|
||||
batch = torch.rand(8, 6, d_in)
|
||||
context_vecs = mha(batch)
|
||||
|
||||
context_vecs.shape == torch.Size([8, 6, d_out])
|
||||
50
pkg/llms_from_scratch/tests/test_ch04.py
Normal file
50
pkg/llms_from_scratch/tests/test_ch04.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
from llms_from_scratch.ch04 import GPTModel
|
||||
from llms_from_scratch.ch04 import generate_text_simple
|
||||
|
||||
import torch
|
||||
import tiktoken
|
||||
|
||||
|
||||
def test_GPTModel():
|
||||
GPT_CONFIG_124M = {
|
||||
"vocab_size": 50257, # Vocabulary size
|
||||
"context_length": 1024, # Context length
|
||||
"emb_dim": 768, # Embedding dimension
|
||||
"n_heads": 12, # Number of attention heads
|
||||
"n_layers": 12, # Number of layers
|
||||
"drop_rate": 0.1, # Dropout rate
|
||||
"qkv_bias": False # Query-Key-Value bias
|
||||
}
|
||||
|
||||
torch.manual_seed(123)
|
||||
model = GPTModel(GPT_CONFIG_124M)
|
||||
model.eval() # disable dropout
|
||||
|
||||
start_context = "Hello, I am"
|
||||
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
encoded = tokenizer.encode(start_context)
|
||||
encoded_tensor = torch.tensor(encoded).unsqueeze(0)
|
||||
|
||||
print(f"\n{50*'='}\n{22*' '}IN\n{50*'='}")
|
||||
print("\nInput text:", start_context)
|
||||
print("Encoded input text:", encoded)
|
||||
print("encoded_tensor.shape:", encoded_tensor.shape)
|
||||
|
||||
out = generate_text_simple(
|
||||
model=model,
|
||||
idx=encoded_tensor,
|
||||
max_new_tokens=10,
|
||||
context_size=GPT_CONFIG_124M["context_length"]
|
||||
)
|
||||
|
||||
expect = torch.tensor([
|
||||
[15496, 11, 314, 716, 27018, 24086, 47843, 30961, 42348, 7267,
|
||||
49706, 43231, 47062, 34657]
|
||||
])
|
||||
torch.equal(expect, out)
|
||||
115
pkg/llms_from_scratch/tests/test_ch05.py
Normal file
115
pkg/llms_from_scratch/tests/test_ch05.py
Normal file
@@ -0,0 +1,115 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
from llms_from_scratch.ch02 import create_dataloader_v1
|
||||
from llms_from_scratch.ch04 import GPTModel
|
||||
from llms_from_scratch.ch05 import train_model_simple
|
||||
|
||||
import os
|
||||
import urllib
|
||||
|
||||
import pytest
|
||||
import tiktoken
|
||||
import torch
|
||||
from torch.utils.data import Subset, DataLoader
|
||||
|
||||
|
||||
@pytest.mark.parametrize("file_name", ["the-verdict.txt"])
|
||||
def test_train_simple(tmp_path, file_name):
|
||||
|
||||
GPT_CONFIG_124M = {
|
||||
"vocab_size": 50257, # Vocabulary size
|
||||
"context_length": 256, # Shortened context length (orig: 1024)
|
||||
"emb_dim": 768, # Embedding dimension
|
||||
"n_heads": 12, # Number of attention heads
|
||||
"n_layers": 12, # Number of layers
|
||||
"drop_rate": 0.1, # Dropout rate
|
||||
"qkv_bias": False # Query-key-value bias
|
||||
}
|
||||
|
||||
OTHER_SETTINGS = {
|
||||
"learning_rate": 5e-4,
|
||||
"num_epochs": 2,
|
||||
"batch_size": 1,
|
||||
"weight_decay": 0.1
|
||||
}
|
||||
|
||||
torch.manual_seed(123)
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
##############################
|
||||
# Download data if necessary
|
||||
##############################
|
||||
|
||||
file_path = tmp_path / "the-verdict.txt"
|
||||
url = "https://raw.githubusercontent.com/rasbt/LLMs-from-scratch/main/ch02/01_main-chapter-code/the-verdict.txt"
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
with urllib.request.urlopen(url) as response:
|
||||
text_data = response.read().decode('utf-8')
|
||||
with open(file_path, "w", encoding="utf-8") as file:
|
||||
file.write(text_data)
|
||||
else:
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
text_data = file.read()
|
||||
|
||||
##############################
|
||||
# Initialize model
|
||||
##############################
|
||||
|
||||
model = GPTModel(GPT_CONFIG_124M)
|
||||
model.to(device) # no assignment model = model.to(device) necessary for nn.Module classes
|
||||
optimizer = torch.optim.AdamW(
|
||||
model.parameters(), lr=OTHER_SETTINGS["learning_rate"], weight_decay=OTHER_SETTINGS["weight_decay"]
|
||||
)
|
||||
|
||||
##############################
|
||||
# Set up dataloaders
|
||||
##############################
|
||||
|
||||
# Train/validation ratio
|
||||
train_ratio = 0.90
|
||||
split_idx = int(train_ratio * len(text_data))
|
||||
|
||||
train_loader = create_dataloader_v1(
|
||||
text_data[:split_idx],
|
||||
batch_size=OTHER_SETTINGS["batch_size"],
|
||||
max_length=GPT_CONFIG_124M["context_length"],
|
||||
stride=GPT_CONFIG_124M["context_length"],
|
||||
drop_last=True,
|
||||
shuffle=True,
|
||||
num_workers=0
|
||||
)
|
||||
|
||||
val_loader = create_dataloader_v1(
|
||||
text_data[split_idx:],
|
||||
batch_size=OTHER_SETTINGS["batch_size"],
|
||||
max_length=GPT_CONFIG_124M["context_length"],
|
||||
stride=GPT_CONFIG_124M["context_length"],
|
||||
drop_last=False,
|
||||
shuffle=False,
|
||||
num_workers=0
|
||||
)
|
||||
|
||||
##############################
|
||||
# Train model
|
||||
##############################
|
||||
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
|
||||
train_subset = Subset(train_loader.dataset, range(1))
|
||||
one_batch_train_loader = DataLoader(train_subset, batch_size=1)
|
||||
val_subset = Subset(val_loader.dataset, range(1))
|
||||
one_batch_val_loader = DataLoader(val_subset, batch_size=1)
|
||||
|
||||
train_losses, val_losses, tokens_seen = train_model_simple(
|
||||
model, one_batch_train_loader, one_batch_val_loader, optimizer, device,
|
||||
num_epochs=OTHER_SETTINGS["num_epochs"], eval_freq=1, eval_iter=1,
|
||||
start_context="Every effort moves you", tokenizer=tokenizer
|
||||
)
|
||||
|
||||
assert round(train_losses[0], 1) == 7.6
|
||||
assert round(val_losses[0], 1) == 10.1
|
||||
assert train_losses[-1] < train_losses[0]
|
||||
148
pkg/llms_from_scratch/tests/test_ch06.py
Normal file
148
pkg/llms_from_scratch/tests/test_ch06.py
Normal file
@@ -0,0 +1,148 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
|
||||
from llms_from_scratch.ch04 import GPTModel
|
||||
from llms_from_scratch.ch06 import (
|
||||
download_and_unzip_spam_data, create_balanced_dataset,
|
||||
random_split, SpamDataset, train_classifier_simple
|
||||
)
|
||||
|
||||
from pathlib import Path
|
||||
import urllib
|
||||
|
||||
import pandas as pd
|
||||
import tiktoken
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, Subset
|
||||
|
||||
|
||||
def test_train_classifier(tmp_path):
|
||||
|
||||
########################################
|
||||
# Download and prepare dataset
|
||||
########################################
|
||||
|
||||
url = "https://archive.ics.uci.edu/static/public/228/sms+spam+collection.zip"
|
||||
zip_path = tmp_path / "sms_spam_collection.zip"
|
||||
extracted_path = tmp_path / "sms_spam_collection"
|
||||
data_file_path = Path(extracted_path) / "SMSSpamCollection.tsv"
|
||||
|
||||
try:
|
||||
download_and_unzip_spam_data(
|
||||
url, zip_path, extracted_path, data_file_path
|
||||
)
|
||||
except (urllib.error.HTTPError, urllib.error.URLError, TimeoutError) as e:
|
||||
print(f"Primary URL failed: {e}. Trying backup URL...")
|
||||
backup_url = "https://f001.backblazeb2.com/file/LLMs-from-scratch/sms%2Bspam%2Bcollection.zip"
|
||||
download_and_unzip_spam_data(
|
||||
backup_url, zip_path, extracted_path, data_file_path
|
||||
)
|
||||
|
||||
df = pd.read_csv(data_file_path, sep="\t", header=None, names=["Label", "Text"])
|
||||
balanced_df = create_balanced_dataset(df)
|
||||
balanced_df["Label"] = balanced_df["Label"].map({"ham": 0, "spam": 1})
|
||||
|
||||
train_df, validation_df, test_df = random_split(balanced_df, 0.7, 0.1)
|
||||
train_df.to_csv(tmp_path / "train.csv", index=None)
|
||||
validation_df.to_csv(tmp_path / "validation.csv", index=None)
|
||||
test_df.to_csv(tmp_path / "test.csv", index=None)
|
||||
|
||||
########################################
|
||||
# Create data loaders
|
||||
########################################
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
|
||||
train_dataset = SpamDataset(
|
||||
csv_file=tmp_path / "train.csv",
|
||||
max_length=None,
|
||||
tokenizer=tokenizer
|
||||
)
|
||||
|
||||
val_dataset = SpamDataset(
|
||||
csv_file=tmp_path / "validation.csv",
|
||||
max_length=train_dataset.max_length,
|
||||
tokenizer=tokenizer
|
||||
)
|
||||
|
||||
num_workers = 0
|
||||
batch_size = 8
|
||||
|
||||
torch.manual_seed(123)
|
||||
|
||||
train_loader = DataLoader(
|
||||
dataset=train_dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=True,
|
||||
num_workers=num_workers,
|
||||
drop_last=True,
|
||||
)
|
||||
|
||||
val_loader = DataLoader(
|
||||
dataset=val_dataset,
|
||||
batch_size=batch_size,
|
||||
num_workers=num_workers,
|
||||
drop_last=False,
|
||||
)
|
||||
|
||||
########################################
|
||||
# Load pretrained model
|
||||
########################################
|
||||
|
||||
# Small GPT model for testing purposes
|
||||
BASE_CONFIG = {
|
||||
"vocab_size": 50257,
|
||||
"context_length": 120,
|
||||
"drop_rate": 0.0,
|
||||
"qkv_bias": False,
|
||||
"emb_dim": 12,
|
||||
"n_layers": 1,
|
||||
"n_heads": 2
|
||||
}
|
||||
model = GPTModel(BASE_CONFIG)
|
||||
model.eval()
|
||||
device = "cpu"
|
||||
|
||||
########################################
|
||||
# Modify and pretrained model
|
||||
########################################
|
||||
|
||||
for param in model.parameters():
|
||||
param.requires_grad = False
|
||||
|
||||
torch.manual_seed(123)
|
||||
|
||||
num_classes = 2
|
||||
model.out_head = torch.nn.Linear(in_features=BASE_CONFIG["emb_dim"], out_features=num_classes)
|
||||
model.to(device)
|
||||
|
||||
for param in model.trf_blocks[-1].parameters():
|
||||
param.requires_grad = True
|
||||
|
||||
for param in model.final_norm.parameters():
|
||||
param.requires_grad = True
|
||||
|
||||
########################################
|
||||
# Finetune modified model
|
||||
########################################
|
||||
|
||||
torch.manual_seed(123)
|
||||
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5, weight_decay=0.0)
|
||||
|
||||
train_subset = Subset(train_loader.dataset, range(5))
|
||||
batch_train_loader = DataLoader(train_subset, batch_size=5)
|
||||
val_subset = Subset(val_loader.dataset, range(5))
|
||||
batch_val_loader = DataLoader(val_subset, batch_size=5)
|
||||
|
||||
num_epochs = 5
|
||||
train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
|
||||
model, batch_train_loader, batch_val_loader, optimizer, device,
|
||||
num_epochs=num_epochs, eval_freq=1, eval_iter=1,
|
||||
)
|
||||
|
||||
assert round(train_losses[0], 1) == 0.8
|
||||
assert round(val_losses[0], 1) == 0.8
|
||||
assert train_losses[-1] < train_losses[0]
|
||||
108
pkg/llms_from_scratch/tests/test_ch07.py
Normal file
108
pkg/llms_from_scratch/tests/test_ch07.py
Normal file
@@ -0,0 +1,108 @@
|
||||
# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
|
||||
# Source for "Build a Large Language Model From Scratch"
|
||||
# - https://www.manning.com/books/build-a-large-language-model-from-scratch
|
||||
# Code: https://github.com/rasbt/LLMs-from-scratch
|
||||
|
||||
from llms_from_scratch.ch04 import GPTModel
|
||||
from llms_from_scratch.ch05 import train_model_simple
|
||||
from llms_from_scratch.ch07 import (
|
||||
download_and_load_file, InstructionDataset, format_input, custom_collate_fn
|
||||
)
|
||||
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
import tiktoken
|
||||
|
||||
|
||||
def test_instruction_finetune(tmp_path):
|
||||
|
||||
#######################################
|
||||
# Download and prepare dataset
|
||||
#######################################
|
||||
file_path = tmp_path / "instruction-data.json"
|
||||
url = "https://raw.githubusercontent.com/rasbt/LLMs-from-scratch/main/ch07/01_main-chapter-code/instruction-data.json"
|
||||
data = download_and_load_file(file_path, url)
|
||||
|
||||
train_portion = int(len(data) * 0.85) # 85% for training
|
||||
test_portion = int(len(data) * 0.1) # 10% for testing
|
||||
|
||||
train_data = data[:train_portion]
|
||||
test_data = data[train_portion:train_portion + test_portion]
|
||||
val_data = data[train_portion + test_portion:]
|
||||
|
||||
# Use very small subset for testing purposes
|
||||
train_data = train_data[:15]
|
||||
val_data = val_data[:15]
|
||||
test_data = test_data[:15]
|
||||
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
customized_collate_fn = partial(custom_collate_fn, device=device, allowed_max_length=100)
|
||||
|
||||
num_workers = 0
|
||||
batch_size = 8
|
||||
|
||||
torch.manual_seed(123)
|
||||
|
||||
train_dataset = InstructionDataset(train_data, tokenizer)
|
||||
train_loader = DataLoader(
|
||||
train_dataset,
|
||||
batch_size=batch_size,
|
||||
collate_fn=customized_collate_fn,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
num_workers=num_workers
|
||||
)
|
||||
|
||||
val_dataset = InstructionDataset(val_data, tokenizer)
|
||||
val_loader = DataLoader(
|
||||
val_dataset,
|
||||
batch_size=batch_size,
|
||||
collate_fn=customized_collate_fn,
|
||||
shuffle=False,
|
||||
drop_last=False,
|
||||
num_workers=num_workers
|
||||
)
|
||||
|
||||
#######################################
|
||||
# Load pretrained model
|
||||
#######################################
|
||||
|
||||
# Small GPT model for testing purposes
|
||||
BASE_CONFIG = {
|
||||
"vocab_size": 50257,
|
||||
"context_length": 120,
|
||||
"drop_rate": 0.0,
|
||||
"qkv_bias": False,
|
||||
"emb_dim": 12,
|
||||
"n_layers": 1,
|
||||
"n_heads": 2
|
||||
}
|
||||
model = GPTModel(BASE_CONFIG)
|
||||
model.eval()
|
||||
device = "cpu"
|
||||
CHOOSE_MODEL = "Small test model"
|
||||
|
||||
print("Loaded model:", CHOOSE_MODEL)
|
||||
print(50*"-")
|
||||
|
||||
#######################################
|
||||
# Finetuning the model
|
||||
#######################################
|
||||
|
||||
num_epochs = 10
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5, weight_decay=0.1)
|
||||
|
||||
torch.manual_seed(123)
|
||||
train_losses, val_losses, tokens_seen = train_model_simple(
|
||||
model, train_loader, val_loader, optimizer, device,
|
||||
num_epochs=num_epochs, eval_freq=5, eval_iter=5,
|
||||
start_context=format_input(val_data[0]), tokenizer=tokenizer
|
||||
)
|
||||
|
||||
assert round(train_losses[0], 1) == 10.9
|
||||
assert round(val_losses[0], 1) == 10.9
|
||||
assert train_losses[-1] < train_losses[0]
|
||||
Reference in New Issue
Block a user