add a few lectures

This commit is contained in:
Frank Xu
2025-05-01 19:03:03 -04:00
parent fa99f9d99a
commit 02953d8608
29 changed files with 266665 additions and 0 deletions

View File

@@ -0,0 +1,2 @@
pip install ipywidgets
pip install scikit-learn

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,860 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "41d7e9ff",
"metadata": {},
"source": [
"### PyTorch Fundamentals Part A\n",
"\n",
"- A PyTorch tensor is a multi-dimensional array (0D to nD) that contains elements of a single data type (e.g., integers, floats). \n",
"- Tensors are used to represent scalars, vectors, matrices, or higher-dimensional data and are optimized for mathematical operations, automatic differentiation, and GPU computation"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "739c5173",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'2.6.0+cu126'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import torch\n",
"torch.__version__"
]
},
{
"cell_type": "markdown",
"id": "75acf7d8",
"metadata": {},
"source": [
"#### Multi-dimensional"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "0e82be1e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor(5)"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# 0D: Scalar \n",
"x= torch.tensor(5)\n",
"x"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "7c239759",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x.ndim"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "d176548d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.Size([])"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x.shape"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "07e03145",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x.item()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "41fcc46e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([1, 2, 3])"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# 1D: Vector \n",
"x=torch.tensor([1, 2, 3])\n",
"x"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "f9894c37",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x.ndim"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "7dc166eb",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.Size([3])"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# shape is a tuple, although looks like a list\n",
"x.shape"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "2581817b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[ 7, 8],\n",
" [ 9, 10]])"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# 2D Matrix\n",
"MATRIX = torch.tensor([[7, 8], \n",
" [9, 10]])\n",
"MATRIX"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "46961042",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"MATRIX.ndim"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "9669fda8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.Size([2, 2])"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"MATRIX.shape"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "15297945",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[[1, 2, 3],\n",
" [3, 6, 9],\n",
" [2, 4, 5]]])"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Tensor\n",
"TENSOR = torch.tensor([[[1, 2, 3],\n",
" [3, 6, 9],\n",
" [2, 4, 5]]])\n",
"TENSOR"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "5bbed071",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"3"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"TENSOR.ndim"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "483d25c7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.Size([1, 3, 3])"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"TENSOR.shape"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "c4e76ef2",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.Size([1, 3, 3])"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"TENSOR.size()"
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "b56abf50",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[1, 2],\n",
" [3, 3],\n",
" [6, 9]])"
]
},
"execution_count": 34,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"torch.tensor([[[1, 2, 3],\n",
" [3, 6, 9]]]).reshape(3,2)"
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "cdd39ae8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[1],\n",
" [2],\n",
" [3],\n",
" [3],\n",
" [6],\n",
" [9]])"
]
},
"execution_count": 35,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"torch.tensor([[[1, 2, 3],\n",
" [3, 6, 9]]]).reshape(-1,1)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "adf1ab41",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[[1., 2., 3.],\n",
" [3., 6., 9.],\n",
" [2., 4., 5.]]])"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Tensor\n",
"TENSOR = torch.tensor([[[1, 2, 3],\n",
" [3, 6, 9],\n",
" [2, 4, 5]]], dtype=torch.float32)\n",
"TENSOR"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "a368079f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.float32"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"TENSOR.dtype"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "4d00ea95",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(tensor([[0.7310, 0.5572],\n",
" [0.9469, 0.2378]]),\n",
" tensor([[0.2700, 0.9798],\n",
" [0.4980, 0.8848]]))"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"y = torch.rand(2, 2)\n",
"x = torch.rand(2, 2)\n",
"x, y"
]
},
{
"cell_type": "markdown",
"id": "02a00747",
"metadata": {},
"source": [
"#### Operation"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "45267f2f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(tensor([[1, 2],\n",
" [3, 4]]),\n",
" tensor([[5, 6],\n",
" [7, 8]]))"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x= tensor = torch.tensor([[1, 2], \n",
" [3, 4]])\n",
"y= tensor = torch.tensor([[5, 6], \n",
" [7, 8]])\n",
"x, y"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "193a7828",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[ 6, 8],\n",
" [10, 12]])"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x+y"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "1ce81689",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[ 5, 12],\n",
" [21, 32]])"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"z=x*y\n",
"z"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "62f8cde3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([11, 12, 13])"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Create a tensor of values and add a number to it\n",
"# recall broadcasting rules\n",
"tensor = torch.tensor([1, 2, 3])\n",
"results= tensor + 10\n",
"results"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "2098ad78",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[1, 2, 3],\n",
" [4, 5, 6]])"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# reshaping\n",
"tensor = torch.tensor([1, 2, 3, 4, 5, 6])\n",
"tensor.view(2, 3) # Reshape to 2x2"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "883321f8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[1, 2],\n",
" [3, 4],\n",
" [5, 6]])"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tensor.view(3, 2) # Reshape to 3x2"
]
},
{
"cell_type": "markdown",
"id": "9d716eb9",
"metadata": {},
"source": [
"### Comparison to NumPy Arrays\n",
"Tensors are similar to NumPy arrays but add:\n",
"- GPU support.\n",
"- Automatic differentiation (requires_grad).\n",
" - Integration with PyTorchs deep learning ecosystem."
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "2a3fd4ae",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([1, 2, 3])"
]
},
"execution_count": 27,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import numpy as np\n",
"tensor = torch.tensor([1, 2, 3])\n",
"tensor"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "df247bd3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([1, 2, 3])"
]
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"numpy_array = tensor.numpy() # To NumPy\n",
"numpy_array "
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "9ada07ab",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([1, 2, 3])"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tensor_from_numpy = torch.from_numpy(numpy_array) # Back to tensor\n",
"tensor_from_numpy "
]
},
{
"cell_type": "markdown",
"id": "350b8332",
"metadata": {},
"source": [
"### Running tensors on GPUs (and making faster computations)\n"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "30c9ea9f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Check for GPU\n",
"import torch\n",
"torch.cuda.is_available()"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "dd523b3e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'cuda'"
]
},
"execution_count": 31,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Set device type\n",
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
"device"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "11d1a029",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([1, 2, 3]) cpu\n"
]
},
{
"data": {
"text/plain": [
"tensor([1, 2, 3], device='cuda:0')"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Create tensor (default on CPU)\n",
"tensor = torch.tensor([1, 2, 3])\n",
"\n",
"# Tensor not on GPU\n",
"print(tensor, tensor.device)\n",
"\n",
"# Move tensor to GPU (if available)\n",
"tensor_on_gpu = tensor.to(device)\n",
"tensor_on_gpu"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "db5249d0",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\Weife\\AppData\\Local\\Temp\\ipykernel_54156\\3540074575.py:6: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
" y = torch.tensor(x, device=device) # directly create a tensor on GPU\n"
]
}
],
"source": [
"# by default all tensors are created on the CPU,\n",
"# but you can also move them to the GPU (only if it's available )\n",
"if torch.cuda.is_available():\n",
" device = torch.device(\"cuda\") # a CUDA device object\n",
" x = torch.tensor([1, 2, 3])\n",
" y = torch.tensor(x, device=device) # directly create a tensor on GPU\n",
" x = x.to(device) # or just use strings ``.to(\"cuda\")``\n",
" z = x + y\n",
" # z = z.numpy() # not possible because numpy cannot handle GPU tenors\n",
" # move to CPU again\n",
" z= z.to(\"cpu\") # ``.to`` can also change dtype together!\n",
" # z = z.numpy()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ddbdf99a",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 221 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

View File

@@ -0,0 +1,329 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Python Object-Oriented Programming (OOP) Tutorial\n",
"\n",
"This tutorial introduces Object-Oriented Programming (OOP) in Python for beginners. We'll cover classes, objects, attributes, methods, and inheritance with simple examples."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1. What is OOP?\n",
"OOP is a programming paradigm that organizes code into **objects**, which are instances of **classes**. A class is like a blueprint, and an object is a specific instance created from that blueprint."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. Creating a Class and Object\n",
"\n",
"Let's create a simple `Dog` class to represent a dog with a name and age."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Buddy\n",
"3\n",
"Buddy says Woof!\n",
"Luna\n",
"Luna says Woof!\n"
]
}
],
"source": [
"# Define the Dog class\n",
"class Dog:\n",
" # Constructor method to initialize attributes\n",
" def __init__(self, name, age):\n",
" self.name = name # Instance attribute\n",
" self.age = age # Instance attribute\n",
"\n",
" # Method to make the dog bark\n",
" def bark(self):\n",
" return f\"{self.name} says Woof!\"\n",
"\n",
"# Create objects (instances) of the Dog class\n",
"dog1 = Dog(\"Buddy\", 3)\n",
"dog2 = Dog(\"Luna\", 5)\n",
"\n",
"# Access attributes and call methods\n",
"print(dog1.name) # Output: Buddy\n",
"print(dog1.age) # Output: 3\n",
"print(dog1.bark()) # Output: Buddy says Woof!\n",
"print(dog2.name) # Output: Luna\n",
"print(dog2.bark()) # Output: Luna says Woof!"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Explanation:\n",
"- `class Dog:` defines the class.\n",
"- `__init__` is the constructor, called when an object is created. It sets the object's initial attributes (`name` and `age`).\n",
"- `self` refers to the instance of the class.\n",
"- `bark` is a method that returns a string.\n",
"- `dog1` and `dog2` are objects created from the `Dog` class."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3. Class Attributes\n",
"Class attributes are shared by all instances of a class. Let's add a class attribute to track the species."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Canis familiaris\n",
"Canis familiaris\n"
]
}
],
"source": [
"class Dog:\n",
" # Class attribute\n",
" species = \"Canis familiaris\"\n",
"\n",
" def __init__(self, name, age):\n",
" self.name = name\n",
" self.age = age\n",
"\n",
" def bark(self):\n",
" return f\"{self.name} says Woof!\"\n",
"\n",
"dog1 = Dog(\"Buddy\", 3)\n",
"print(dog1.species) # Output: Canis familiaris\n",
"print(Dog.species) # Output: Canis familiaris"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Explanation:\n",
"- `species` is a class attribute, shared by all `Dog` objects.\n",
"- You can access it via the class (`Dog.species`) or an instance (`dog1.species`)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 4. Inheritance\n",
"Inheritance allows a class to inherit attributes and methods from another class. Let's create a `Puppy` class that inherits from `Dog`."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Max says Yip!\n",
"Max is playing!\n",
"Canis familiaris\n",
"True\n"
]
}
],
"source": [
"class Dog:\n",
" species = \"Canis familiaris\"\n",
"\n",
" def __init__(self, name, age):\n",
" self.name = name\n",
" self.age = age\n",
"\n",
" def bark(self):\n",
" return f\"{self.name} says Woof!\"\n",
"\n",
"# Puppy inherits from Dog\n",
"class Puppy(Dog):\n",
" def __init__(self, name, age, is_cute):\n",
" # Call the parent class's __init__\n",
" super().__init__(name, age)\n",
" self.is_cute = is_cute\n",
"\n",
" # Override the bark method\n",
" def bark(self):\n",
" return f\"{self.name} says Yip!\"\n",
"\n",
" # New method specific to Puppy\n",
" def play(self):\n",
" return f\"{self.name} is playing!\"\n",
"\n",
"puppy1 = Puppy(\"Max\", 1, True)\n",
"print(puppy1.bark()) # Output: Max says Yip!\n",
"print(puppy1.play()) # Output: Max is playing!\n",
"print(puppy1.species) # Output: Canis familiaris\n",
"print(puppy1.is_cute) # Output: True"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Explanation:\n",
"- `Puppy` inherits from `Dog` using `class Puppy(Dog)`.\n",
"- `super().__init__(name, age)` calls the parent class's constructor.\n",
"- The `bark` method is overridden in `Puppy` to say \"Yip!\" instead of \"Woof!\".\n",
"- `play` is a new method unique to `Puppy`."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5. Practice Exercise\n",
"Create a `Student` class with:\n",
"- Instance attributes: `name` and `grade`.\n",
"- A class attribute: `school = \"High School\"`.\n",
"- A method `study` that returns `\"[name] is studying!\"`.\n",
"- Create a `Freshman` class that inherits from `Student` and adds a method `welcome` that returns `\"[name] is a new freshman!\"`."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Alice is studying!\n",
"Bob is studying!\n",
"Bob is a new freshman!\n",
"High School\n"
]
}
],
"source": [
"class Student:\n",
" school = \"High School\"\n",
"\n",
" def __init__(self, name, grade):\n",
" self.name = name\n",
" self.grade = grade\n",
"\n",
" def study(self):\n",
" return f\"{self.name} is studying!\"\n",
"\n",
"class Freshman(Student):\n",
" def welcome(self):\n",
" return f\"{self.name} is a new freshman!\"\n",
"\n",
"# Test the classes\n",
"student1 = Student(\"Alice\", 10)\n",
"freshman1 = Freshman(\"Bob\", 9)\n",
"print(student1.study()) # Output: Alice is studying!\n",
"print(freshman1.study()) # Output: Bob is studying!\n",
"print(freshman1.welcome()) # Output: Bob is a new freshman!\n",
"print(freshman1.school) # Output: High School"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 6. Reuse class in a package\n",
"- Often we need to reuse a class developed by other programmer"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Alice is studying!\n",
"High School\n"
]
}
],
"source": [
"import ub\n",
"student = ub.Student(\"Alice\", 10)\n",
"print(student.study()) # Output: Alice is studying!\n",
"print(student.school) # Output: High School"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 7. Key OOP Concepts\n",
"- **Encapsulation**: Bundling data (attributes) and methods into a class.\n",
"- **Inheritance**: Allowing a class to inherit from another class.\n",
"- **Polymorphism**: Allowing different classes to be treated as instances of the same class (e.g., `Puppy` and `Dog` both have `bark` but behave differently).\n",
"- **Abstraction**: Hiding complex details and showing only necessary features (not covered in this basic tutorial)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 8. Next Steps\n",
"- Experiment with more complex classes and methods.\n",
"- Learn about private attributes (using `_` or `__` prefixes).\n",
"- Explore abstract base classes and polymorphism in Python.\n",
"\n",
"This tutorial provides a foundation for understanding OOP in Python. Practice by creating your own classes and experimenting with inheritance!"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.2"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,279 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "c0fc8d3e",
"metadata": {},
"source": [
"## Step 1: Implment linear regression: replace the gradient function with autograd\n",
"\n",
"- Recall key steps for training\n",
" - Forward model (1) = compute prediction with model\n",
" - Forward model (2) = Compute loss\n",
" - Backward = compute gradients\n",
" - Update weights \n",
"\n",
"- replace np array with pytorch tensor \n",
"- replace gradient function with `loss.backward()` "
]
},
{
"cell_type": "code",
"execution_count": 61,
"id": "016485d6",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 1: w = 25.779, loss = 5939.33496094\n",
"epoch 2: w = 22.191, loss = 4291.27685547\n",
"epoch 3: w = 19.141, loss = 3100.55566406\n",
"epoch 4: w = 16.549, loss = 2240.25878906\n",
"epoch 5: w = 14.346, loss = 1618.69470215\n",
"epoch 6: w = 12.473, loss = 1169.61450195\n",
"epoch 7: w = 10.881, loss = 845.15423584\n",
"epoch 8: w = 9.528, loss = 610.73156738\n",
"epoch 9: w = 8.378, loss = 441.36120605\n",
"epoch 10: w = 7.400, loss = 318.99108887\n",
"epoch 11: w = 6.569, loss = 230.57872009\n",
"epoch 12: w = 5.863, loss = 166.70083618\n",
"epoch 13: w = 5.262, loss = 120.54901886\n",
"epoch 14: w = 4.752, loss = 87.20434570\n",
"epoch 15: w = 4.318, loss = 63.11280441\n",
"epoch 16: w = 3.949, loss = 45.70667267\n",
"epoch 17: w = 3.636, loss = 33.13073730\n",
"epoch 18: w = 3.370, loss = 24.04463005\n",
"epoch 19: w = 3.143, loss = 17.47991371\n",
"epoch 20: w = 2.951, loss = 12.73690891\n",
"Prediction after training: f(6) = 17.704\n"
]
}
],
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"import torch\n",
"\n",
"def forward(w, x):\n",
" return w * x\n",
"\n",
"# MSE as the loss function\n",
"def loss(y, y_pred):\n",
" return ((y_pred - y)**2).mean()\n",
"\n",
"# don't need this any more as we use autograd\n",
"# MSE = j = 1/N * (w*x - y)**2\n",
"# dJ/dw = 2/N (w*x - y)*x\n",
"\"\"\"\n",
"def gradient(x, y, y_pred):\n",
" return np.mean(2*x*(y_pred - y))\n",
"\"\"\"\n",
"\n",
"# Train function\n",
"def train(learning_rate, n_iters, w, X, Y):\n",
" # Convert inputs to PyTorch tensors\n",
" w = torch.tensor(w, dtype=torch.float32, requires_grad=True)\n",
"\n",
" for epoch in range(n_iters):\n",
" y_pred = forward(w, X) # Forward pass\n",
" l = loss(Y, y_pred) # Loss\n",
" \n",
" # Backward pass, compute autograde\n",
" l.backward() \n",
"\n",
" # Update weights\n",
" with torch.no_grad():\n",
" w.data -= learning_rate * w.grad\n",
" \n",
" w.grad.zero_() # Reset gradients\n",
" \n",
" # Print using .item() for scalars to avoid NumPy conversion\n",
" print(f'epoch {epoch+1}: w = {w.item():.3f}, loss = {l.item():.8f}')\n",
" \n",
" print(f'Prediction after training: f(6) = {forward(w.item(), 6):.3f}')\n",
" \n",
" \n",
"# Define the data, make sure to use torch tensor, not np.array\n",
"X = torch.tensor([1.0, 2.0, 3, 4], dtype=torch.float32)\n",
"Y = torch.tensor([2.3, 3.4, 6.5, 6.8], dtype=torch.float32)\n",
"\n",
"# Configration\n",
"learning_rate = 0.01\n",
"n_iters = 20\n",
"w_init = 30\n",
"train(learning_rate, n_iters, w_init, X, Y)\n"
]
},
{
"cell_type": "markdown",
"id": "58726271",
"metadata": {},
"source": [
"## Step 2: Implment linear regression: replace the update weights (gradient descent) with an optimizor\n",
"- replace loss function with built-in loss function `loss = nn.MSELoss()`\n",
"- Update weights with ` optimizer.step()`"
]
},
{
"cell_type": "code",
"execution_count": 62,
"id": "712d00f9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 1: w = 0.900, b = 0.874, loss = 5.15727806\n",
"epoch 2: w = 1.001, b = 0.907, loss = 3.69042563\n",
"epoch 3: w = 1.084, b = 0.934, loss = 2.67253804\n",
"epoch 4: w = 1.154, b = 0.956, loss = 1.96617866\n",
"epoch 5: w = 1.212, b = 0.974, loss = 1.47598338\n",
"epoch 6: w = 1.260, b = 0.989, loss = 1.13578010\n",
"epoch 7: w = 1.301, b = 1.001, loss = 0.89965343\n",
"epoch 8: w = 1.335, b = 1.011, loss = 0.73574245\n",
"epoch 9: w = 1.363, b = 1.019, loss = 0.62194228\n",
"epoch 10: w = 1.387, b = 1.025, loss = 0.54291308\n",
"epoch 11: w = 1.406, b = 1.031, loss = 0.48801088\n",
"epoch 12: w = 1.423, b = 1.035, loss = 0.44985026\n",
"epoch 13: w = 1.437, b = 1.038, loss = 0.42330658\n",
"epoch 14: w = 1.448, b = 1.040, loss = 0.40482411\n",
"epoch 15: w = 1.458, b = 1.042, loss = 0.39193565\n",
"epoch 16: w = 1.466, b = 1.043, loss = 0.38292903\n",
"epoch 17: w = 1.473, b = 1.044, loss = 0.37661636\n",
"epoch 18: w = 1.479, b = 1.045, loss = 0.37217349\n",
"epoch 19: w = 1.484, b = 1.045, loss = 0.36902806\n",
"epoch 20: w = 1.488, b = 1.045, loss = 0.36678365\n"
]
}
],
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"import torch\n",
"import torch.nn as nn\n",
"\n",
"# replace this with a Linear Model\n",
"\"\"\"\n",
"def forward(w, x):\n",
" return w * x\n",
"\"\"\"\n",
"\n",
"# don't need this any more as we use autograd\n",
"# MSE as the loss function\n",
"\"\"\"\n",
"def loss(y, y_pred):\n",
" return ((y_pred - y)**2).mean()\n",
"\"\"\"\n",
"\n",
"# don't need this any more as we use autograd\n",
"# MSE = j = 1/N * (w*x - y)**2\n",
"# dJ/dw = 2/N (w*x - y)*x\n",
"\"\"\"\n",
"def gradient(x, y, y_pred):\n",
" return np.mean(2*x*(y_pred - y))\n",
"\"\"\"\n",
"\n",
"# Train function\n",
"def train(n_iters, X, Y):\n",
" for epoch in range(n_iters):\n",
" y_pred = model(X) # Forward pass\n",
" l = loss(Y, y_pred) # Loss\n",
" \n",
" # Backward pass, compute autograde (directioin of change for each parameter)\n",
" l.backward() \n",
"\n",
" # Update weights\n",
" optimizer.step()\n",
" \n",
" optimizer.zero_grad() # Reset gradients\n",
" \n",
" # Print using .item() for scalars to avoid NumPy conversion\n",
" # Print w and b\n",
" w = model.weight.item() # Scalar value of weight\n",
" b = model.bias.item() # Scalar value of bias\n",
" print(f'epoch {epoch+1}: w = {w:.3f}, b = {b:.3f}, loss = {l.item():.8f}')\n",
" \n",
"# Define the data, make sure to use torch tensor, not np.array\n",
"X = torch.tensor([1.0, 2.0, 3, 4], dtype=torch.float32)\n",
"X = X.reshape(4, 1)\n",
"Y = torch.tensor([2.3, 3.4, 6.5, 6.8], dtype=torch.float32)\n",
"Y = Y.reshape(4, 1)\n",
"\n",
"n_samples, n_features = X.shape \n",
"\n",
"# Linear model f = wx + b\n",
"input_size = n_features\n",
"output_size = 1\n",
"model = nn.Linear(input_size, output_size)\n",
"\n",
"\n",
"# Loss and optimizer\n",
"learning_rate = 0.01\n",
"criterion = nn.MSELoss()\n",
"\n",
"# Stochastic Gradient Descent (SGD)\n",
"optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) \n",
"\n",
"\n",
"n_iters = 20\n",
"\n",
"train(n_iters, X, Y)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "09623107",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Prediction for x = 6: 9.973\n"
]
}
],
"source": [
"# Test the model with x = 6\n",
"# predicted = model(X).detach().numpy()\n",
"test_input = torch.tensor([[6.0]], dtype=torch.float32) # Shape: (1, 1)\n",
"with torch.no_grad(): # Disable gradient tracking for inference\n",
" y_pred = model(test_input)\n",
"print(f'Prediction for x = 6: {y_pred.item():.3f}')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "44ad547f",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,9 @@
class Student:
school = "High School"
def __init__(self, name, grade):
self.name = name
self.grade = grade
def study(self):
return f"{self.name} is studying!"

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

View File

@@ -0,0 +1,15 @@
// Artificial Neuron
digraph {
x1 [label="x₁" fillcolor=lightblue shape=circle style=filled]
x2 [label="x₂" fillcolor=lightblue shape=circle style=filled]
sum [label="Σ
(w₁x₁ + w₂x₂ + b)" fillcolor=lightgreen shape=circle style=filled]
act [label="σ
(sigmoid)" fillcolor=lightyellow shape=circle style=filled]
y [label="y
(output)" fillcolor=lightcoral shape=circle style=filled]
x1 -> sum [label="w₁"]
x2 -> sum [label="w₂"]
sum -> act
act -> y
}

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB