fixes for code (#206)

* updated .gitignore

* removed unused GELU import

* fixed model_configs, fixed all tensors on same device

* removed unused tiktoken

* update

* update hparam search

* remove redundant tokenizer argument

---------

Co-authored-by: rasbt <mail@sebastianraschka.com>
This commit is contained in:
Daniel Kleine
2024-06-12 03:59:48 +02:00
committed by GitHub
parent 1a65020d81
commit dcbdc1d2e5
12 changed files with 33 additions and 46 deletions

View File

@@ -65,9 +65,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
"numpy version: 1.25.2\n",
"torch version: 2.2.1\n",
"transformers version: 4.33.2\n"
"numpy version: 1.24.3\n",
"torch version: 2.3.0\n",
"transformers version: 4.41.2\n"
]
}
],
@@ -85,16 +85,6 @@
"id": "ffc17d7d-bcd8-42ee-82a9-04fd55acf15d",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
" torch.utils._pytree._register_pytree_node(\n",
"/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
" torch.utils._pytree._register_pytree_node(\n"
]
},
{
"data": {
"text/plain": [
@@ -162,10 +152,10 @@
"}\n",
"\n",
"model_configs = {\n",
" \"gpt2-small\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n",
" \"gpt2-medium\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n",
" \"gpt2-large\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n",
" \"gpt2-xl\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n",
" \"gpt2-small (124M)\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n",
" \"gpt2-medium (355M)\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n",
" \"gpt2-large (774M)\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n",
" \"gpt2-xl (1558M)\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n",
"}\n",
"\n",
"\n",
@@ -242,7 +232,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/jg/tpqyh1fd5js5wsr1d138k3n40000gn/T/ipykernel_32618/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
"/tmp/ipykernel_9385/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
" return torch.nn.Parameter(torch.tensor(right))\n"
]
}
@@ -255,13 +245,12 @@
"gpt = GPTModel(BASE_CONFIG)\n",
"\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
"load_weights(gpt, gpt_hf)\n",
"gpt.to(device);"
"load_weights(gpt, gpt_hf)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"id": "4ddd0d51-3ade-4890-9bab-d63f141d095f",
"metadata": {},
"outputs": [
@@ -285,8 +274,8 @@
"tokenizer = tiktoken.get_encoding(\"gpt2\")\n",
"\n",
"token_ids = generate(\n",
" model=gpt,\n",
" idx=text_to_token_ids(\"Every effort moves\", tokenizer),\n",
" model=gpt.to(device),\n",
" idx=text_to_token_ids(\"Every effort moves\", tokenizer).to(device),\n",
" max_new_tokens=30,\n",
" context_size=BASE_CONFIG[\"context_length\"],\n",
" top_k=1,\n",

View File

@@ -53,8 +53,8 @@ def calc_loss_batch(input_batch, target_batch, model, device):
def evaluate_model(model, train_loader, val_loader, device, eval_iter):
model.eval()
with torch.no_grad():
train_loss = calc_loss_loader(train_loader, model, device, num_iters=eval_iter)
val_loss = calc_loss_loader(val_loader, model, device, num_iters=eval_iter)
train_loss = calc_loss_loader(train_loader, model, device, num_batches=eval_iter)
val_loss = calc_loss_loader(val_loader, model, device, num_batches=eval_iter)
model.train()
return train_loss, val_loss

View File

@@ -40,12 +40,12 @@ class GPTDatasetV1(Dataset):
def create_dataloader_v1(txt, batch_size=4, max_length=256,
stride=128, shuffle=True, drop_last=True):
stride=128, shuffle=True, drop_last=True, num_workers=0):
# Initialize the tokenizer
tokenizer = tiktoken.get_encoding("gpt2")
# Create dataset
dataset = GPTDatasetV1(txt, tokenizer, max_length, stride, num_workers=0)
dataset = GPTDatasetV1(txt, tokenizer, max_length, stride)
# Create dataloader
dataloader = DataLoader(