mirror of
https://github.com/rasbt/LLMs-from-scratch.git
synced 2026-04-10 12:33:42 +00:00
committed by
GitHub
parent
e316cafd9f
commit
9d6da22ebb
@@ -100,7 +100,7 @@ class MultiHeadAttention(nn.Module):
|
||||
attn_weights = self.dropout(attn_weights)
|
||||
|
||||
# Shape: (b, num_tokens, num_heads, head_dim)
|
||||
context_vec = (attn_weights @ values).transpose(1, 2)
|
||||
context_vec = (attn_weights @ values).transpose(1, 2)
|
||||
|
||||
# Combine heads, where self.d_out = self.num_heads * self.head_dim
|
||||
context_vec = context_vec.reshape(b, num_tokens, self.d_out)
|
||||
@@ -132,7 +132,7 @@ class GELU(nn.Module):
|
||||
|
||||
def forward(self, x):
|
||||
return 0.5 * x * (1 + torch.tanh(
|
||||
torch.sqrt(torch.tensor(2.0 / torch.pi)) *
|
||||
torch.sqrt(torch.tensor(2.0 / torch.pi)) *
|
||||
(x + 0.044715 * torch.pow(x, 3))
|
||||
))
|
||||
|
||||
@@ -158,7 +158,7 @@ class TransformerBlock(nn.Module):
|
||||
d_in=cfg["emb_dim"],
|
||||
d_out=cfg["emb_dim"],
|
||||
block_size=cfg["ctx_len"],
|
||||
num_heads=cfg["n_heads"],
|
||||
num_heads=cfg["n_heads"],
|
||||
dropout=cfg["drop_rate"],
|
||||
qkv_bias=cfg["qkv_bias"])
|
||||
self.ff = FeedForward(cfg)
|
||||
@@ -224,7 +224,7 @@ def generate_text_simple(model, idx, max_new_tokens, context_size):
|
||||
|
||||
# Focus only on the last time step
|
||||
# (batch, n_token, vocab_size) becomes (batch, vocab_size)
|
||||
logits = logits[:, -1, :]
|
||||
logits = logits[:, -1, :]
|
||||
|
||||
# Get the idx of the vocab entry with the highest logits value
|
||||
idx_next = torch.argmax(logits, dim=-1, keepdim=True) # (batch, 1)
|
||||
|
||||
@@ -159,7 +159,7 @@ if __name__ == "__main__":
|
||||
stride=GPT_CONFIG_124M["ctx_len"],
|
||||
drop_last=False,
|
||||
shuffle=False
|
||||
)
|
||||
)
|
||||
|
||||
model = GPTModel(GPT_CONFIG_124M)
|
||||
model.to(device)
|
||||
@@ -199,4 +199,4 @@ if __name__ == "__main__":
|
||||
if not interrupted:
|
||||
print("Hyperparameter search completed.")
|
||||
print(f"Best hyperparameters: {best_hparams}")
|
||||
print(f"Best Val loss: {best_val_loss} | Training loss {train_loss}")
|
||||
print(f"Best Val loss: {best_val_loss} | Training loss {train_loss}")
|
||||
|
||||
@@ -35,7 +35,7 @@ class GPTDatasetV1(Dataset):
|
||||
return self.input_ids[idx], self.target_ids[idx]
|
||||
|
||||
|
||||
def create_dataloader_v1(txt, batch_size=4, max_length=256,
|
||||
def create_dataloader_v1(txt, batch_size=4, max_length=256,
|
||||
stride=128, shuffle=True, drop_last=True):
|
||||
# Initialize the tokenizer
|
||||
tokenizer = tiktoken.get_encoding("gpt2")
|
||||
@@ -78,7 +78,7 @@ class MultiHeadAttention(nn.Module):
|
||||
|
||||
# We implicitly split the matrix by adding a `num_heads` dimension
|
||||
# Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim)
|
||||
keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
values = values.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
queries = queries.view(b, num_tokens, self.num_heads, self.head_dim)
|
||||
|
||||
@@ -100,7 +100,7 @@ class MultiHeadAttention(nn.Module):
|
||||
attn_weights = self.dropout(attn_weights)
|
||||
|
||||
# Shape: (b, num_tokens, num_heads, head_dim)
|
||||
context_vec = (attn_weights @ values).transpose(1, 2)
|
||||
context_vec = (attn_weights @ values).transpose(1, 2)
|
||||
|
||||
# Combine heads, where self.d_out = self.num_heads * self.head_dim
|
||||
context_vec = context_vec.contiguous().view(b, num_tokens, self.d_out)
|
||||
@@ -132,7 +132,7 @@ class GELU(nn.Module):
|
||||
|
||||
def forward(self, x):
|
||||
return 0.5 * x * (1 + torch.tanh(
|
||||
torch.sqrt(torch.tensor(2.0 / torch.pi)) *
|
||||
torch.sqrt(torch.tensor(2.0 / torch.pi)) *
|
||||
(x + 0.044715 * torch.pow(x, 3))
|
||||
))
|
||||
|
||||
@@ -158,7 +158,7 @@ class TransformerBlock(nn.Module):
|
||||
d_in=cfg["emb_dim"],
|
||||
d_out=cfg["emb_dim"],
|
||||
block_size=cfg["ctx_len"],
|
||||
num_heads=cfg["n_heads"],
|
||||
num_heads=cfg["n_heads"],
|
||||
dropout=cfg["drop_rate"],
|
||||
qkv_bias=cfg["qkv_bias"])
|
||||
self.ff = FeedForward(cfg)
|
||||
@@ -224,7 +224,7 @@ def generate_text_simple(model, idx, max_new_tokens, context_size):
|
||||
|
||||
# Focus only on the last time step
|
||||
# (batch, n_token, vocab_size) becomes (batch, vocab_size)
|
||||
logits = logits[:, -1, :]
|
||||
logits = logits[:, -1, :]
|
||||
|
||||
# Get the idx of the vocab entry with the highest logits value
|
||||
idx_next = torch.argmax(logits, dim=-1, keepdim=True) # (batch, 1)
|
||||
|
||||
@@ -63,4 +63,4 @@ if __name__ == "__main__":
|
||||
target_dir = "path_to_your_large_files"
|
||||
print(f"{len(all_files)} files to process.")
|
||||
|
||||
combine_files(all_files, args.output_dir)
|
||||
combine_files(all_files, args.output_dir)
|
||||
|
||||
@@ -99,7 +99,7 @@ def train_model_simple(model, optimizer, device, n_epochs,
|
||||
max_length=GPT_CONFIG_124M["ctx_len"],
|
||||
stride=GPT_CONFIG_124M["ctx_len"]
|
||||
)
|
||||
print(f"Training ...")
|
||||
print("Training ...")
|
||||
model.train()
|
||||
for input_batch, target_batch in train_loader:
|
||||
optimizer.zero_grad()
|
||||
|
||||
@@ -9,11 +9,11 @@ from torch.utils.data import Dataset, DataLoader
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
|
||||
#####################################
|
||||
# Chapter 2
|
||||
#####################################
|
||||
|
||||
|
||||
class GPTDatasetV1(Dataset):
|
||||
def __init__(self, txt, tokenizer, max_length, stride):
|
||||
self.tokenizer = tokenizer
|
||||
@@ -310,5 +310,3 @@ def text_to_token_ids(text, tokenizer):
|
||||
def token_ids_to_text(token_ids, tokenizer):
|
||||
flat = token_ids.squeeze(0) # remove batch dimension
|
||||
return tokenizer.decode(flat.tolist())
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user