mirror of
https://github.com/aladdinpersson/Machine-Learning-Collection.git
synced 2026-02-20 13:50:41 +00:00
Initial commit
This commit is contained in:
23
ML/Kaggles/SantanderTransaction/dataset.py
Normal file
23
ML/Kaggles/SantanderTransaction/dataset.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import pandas as pd
|
||||
import torch
|
||||
from torch.utils.data import TensorDataset
|
||||
from torch.utils.data.dataset import random_split
|
||||
from math import ceil
|
||||
|
||||
def get_data():
|
||||
train_data = pd.read_csv("new_shiny_train.csv")
|
||||
y = train_data["target"]
|
||||
X = train_data.drop(["ID_code", "target"], axis=1)
|
||||
X_tensor = torch.tensor(X.values, dtype=torch.float32)
|
||||
y_tensor = torch.tensor(y.values, dtype=torch.float32)
|
||||
ds = TensorDataset(X_tensor, y_tensor)
|
||||
train_ds, val_ds = random_split(ds, [int(0.999*len(ds)), ceil(0.001*len(ds))])
|
||||
|
||||
test_data = pd.read_csv("new_shiny_test.csv")
|
||||
test_ids = test_data["ID_code"]
|
||||
X = test_data.drop(["ID_code"], axis=1)
|
||||
X_tensor = torch.tensor(X.values, dtype=torch.float32)
|
||||
y_tensor = torch.tensor(y.values, dtype=torch.float32)
|
||||
test_ds = TensorDataset(X_tensor, y_tensor)
|
||||
|
||||
return train_ds, val_ds, test_ds, test_ids
|
||||
1356
ML/Kaggles/SantanderTransaction/get_data.ipynb
Normal file
1356
ML/Kaggles/SantanderTransaction/get_data.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
55
ML/Kaggles/SantanderTransaction/train.py
Normal file
55
ML/Kaggles/SantanderTransaction/train.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import torch
|
||||
from sklearn import metrics
|
||||
from tqdm import tqdm
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
from utils import get_predictions
|
||||
from dataset import get_data
|
||||
from torch.utils.data import DataLoader
|
||||
import torch.nn.functional as F
|
||||
|
||||
class NN(nn.Module):
|
||||
def __init__(self, input_size, hidden_dim):
|
||||
super(NN, self).__init__()
|
||||
self.bn = nn.BatchNorm1d(input_size)
|
||||
self.fc1 = nn.Linear(2, hidden_dim)
|
||||
self.fc2 = nn.Linear(input_size//2*hidden_dim, 1)
|
||||
|
||||
def forward(self, x):
|
||||
N = x.shape[0]
|
||||
x = self.bn(x)
|
||||
orig_features = x[:, :200].unsqueeze(2) # (N, 200, 1)
|
||||
new_features = x[:, 200:].unsqueeze(2) # (N, 200, 1)
|
||||
x = torch.cat([orig_features, new_features], dim=2) # (N, 200, 2)
|
||||
x = F.relu(self.fc1(x)).reshape(N, -1) # (N, 200*hidden_dim)
|
||||
return torch.sigmoid(self.fc2(x)).view(-1)
|
||||
|
||||
|
||||
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
model = NN(input_size=400, hidden_dim=100).to(DEVICE)
|
||||
optimizer = optim.Adam(model.parameters(), lr=2e-3, weight_decay=1e-4)
|
||||
loss_fn = nn.BCELoss()
|
||||
train_ds, val_ds, test_ds, test_ids = get_data()
|
||||
train_loader = DataLoader(train_ds, batch_size=1024, shuffle=True)
|
||||
val_loader = DataLoader(val_ds, batch_size=1024)
|
||||
test_loader = DataLoader(test_ds, batch_size=1024)
|
||||
|
||||
for epoch in range(20):
|
||||
probabilities, true = get_predictions(val_loader, model, device=DEVICE)
|
||||
print(f"VALIDATION ROC: {metrics.roc_auc_score(true, probabilities)}")
|
||||
|
||||
for batch_idx, (data, targets) in enumerate(train_loader):
|
||||
data = data.to(DEVICE)
|
||||
targets = targets.to(DEVICE)
|
||||
|
||||
# forward
|
||||
scores = model(data)
|
||||
loss = loss_fn(scores, targets)
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
from utils import get_submission
|
||||
get_submission(model, test_loader, test_ids, DEVICE)
|
||||
|
||||
|
||||
39
ML/Kaggles/SantanderTransaction/utils.py
Normal file
39
ML/Kaggles/SantanderTransaction/utils.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
def get_predictions(loader, model, device):
|
||||
model.eval()
|
||||
saved_preds = []
|
||||
true_labels = []
|
||||
|
||||
with torch.no_grad():
|
||||
for x,y in loader:
|
||||
x = x.to(device)
|
||||
y = y.to(device)
|
||||
scores = model(x)
|
||||
saved_preds += scores.tolist()
|
||||
true_labels += y.tolist()
|
||||
|
||||
model.train()
|
||||
return saved_preds, true_labels
|
||||
|
||||
def get_submission(model, loader, test_ids, device):
|
||||
all_preds = []
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
for x,y in loader:
|
||||
print(x.shape)
|
||||
x = x.to(device)
|
||||
score = model(x)
|
||||
prediction = score.float()
|
||||
all_preds += prediction.tolist()
|
||||
|
||||
model.train()
|
||||
|
||||
df = pd.DataFrame({
|
||||
"ID_code" : test_ids.values,
|
||||
"target" : np.array(all_preds)
|
||||
})
|
||||
|
||||
df.to_csv("sub.csv", index=False)
|
||||
364
ML/Kaggles/Titanic/FirstKaggle_Titanic.ipynb
Normal file
364
ML/Kaggles/Titanic/FirstKaggle_Titanic.ipynb
Normal file
@@ -0,0 +1,364 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "electoral-scientist",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "surrounded-albert",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = pd.read_csv(\"train.csv\")\n",
|
||||
"test = pd.read_csv(\"test.csv\")\n",
|
||||
"test_ids = test[\"PassengerId\"]\n",
|
||||
"\n",
|
||||
"def clean(data):\n",
|
||||
" data = data.drop([\"Ticket\", \"PassengerId\", \"Name\", \"Cabin\"], axis=1)\n",
|
||||
" \n",
|
||||
" cols = [\"SibSp\", \"Parch\", \"Fare\", \"Age\"]\n",
|
||||
" for col in cols:\n",
|
||||
" data[col].fillna(data[col].median(), inplace=True)\n",
|
||||
" \n",
|
||||
" data.Embarked.fillna(\"U\", inplace=True)\n",
|
||||
" return data\n",
|
||||
"\n",
|
||||
"data = clean(data)\n",
|
||||
"test = clean(test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "electronic-wyoming",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>Survived</th>\n",
|
||||
" <th>Pclass</th>\n",
|
||||
" <th>Sex</th>\n",
|
||||
" <th>Age</th>\n",
|
||||
" <th>SibSp</th>\n",
|
||||
" <th>Parch</th>\n",
|
||||
" <th>Fare</th>\n",
|
||||
" <th>Embarked</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>3</td>\n",
|
||||
" <td>male</td>\n",
|
||||
" <td>22.0</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>7.2500</td>\n",
|
||||
" <td>S</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>female</td>\n",
|
||||
" <td>38.0</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>71.2833</td>\n",
|
||||
" <td>C</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>3</td>\n",
|
||||
" <td>female</td>\n",
|
||||
" <td>26.0</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>7.9250</td>\n",
|
||||
" <td>S</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" Survived Pclass Sex Age SibSp Parch Fare Embarked\n",
|
||||
"0 0 3 male 22.0 1 0 7.2500 S\n",
|
||||
"1 1 1 female 38.0 1 0 71.2833 C\n",
|
||||
"2 1 3 female 26.0 0 0 7.9250 S"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data.head(3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "legendary-conditions",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"['female' 'male']\n",
|
||||
"['C' 'Q' 'S' 'U']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>Survived</th>\n",
|
||||
" <th>Pclass</th>\n",
|
||||
" <th>Sex</th>\n",
|
||||
" <th>Age</th>\n",
|
||||
" <th>SibSp</th>\n",
|
||||
" <th>Parch</th>\n",
|
||||
" <th>Fare</th>\n",
|
||||
" <th>Embarked</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>3</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>22.0</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>7.2500</td>\n",
|
||||
" <td>2</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>38.0</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>71.2833</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>3</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>26.0</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>7.9250</td>\n",
|
||||
" <td>2</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>35.0</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>53.1000</td>\n",
|
||||
" <td>2</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>3</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>35.0</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>8.0500</td>\n",
|
||||
" <td>2</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" Survived Pclass Sex Age SibSp Parch Fare Embarked\n",
|
||||
"0 0 3 1 22.0 1 0 7.2500 2\n",
|
||||
"1 1 1 0 38.0 1 0 71.2833 0\n",
|
||||
"2 1 3 0 26.0 0 0 7.9250 2\n",
|
||||
"3 1 1 0 35.0 1 0 53.1000 2\n",
|
||||
"4 0 3 1 35.0 0 0 8.0500 2"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from sklearn import preprocessing\n",
|
||||
"le = preprocessing.LabelEncoder()\n",
|
||||
"columns = [\"Sex\", \"Embarked\"]\n",
|
||||
"\n",
|
||||
"for col in columns:\n",
|
||||
" data[col] = le.fit_transform(data[col])\n",
|
||||
" test[col] = le.transform(test[col])\n",
|
||||
" print(le.classes_)\n",
|
||||
" \n",
|
||||
"data.head(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "assumed-screening",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"y = data[\"Survived\"]\n",
|
||||
"X = data.drop(\"Survived\", axis=1)\n",
|
||||
"\n",
|
||||
"X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "industrial-internship",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clf = LogisticRegression(random_state=0, max_iter=1000).fit(X_train, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "fifteen-enemy",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"0.8888888888888888"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions = clf.predict(X_val)\n",
|
||||
"from sklearn.metrics import accuracy_score\n",
|
||||
"accuracy_score(y_val, predictions)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "juvenile-anthropology",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"submission_preds = clf.predict(test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "virgin-settlement",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = pd.DataFrame({\"PassengerId\": test_ids.values,\n",
|
||||
" \"Survived\": submission_preds,\n",
|
||||
" })"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "tribal-bidding",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df.to_csv(\"submission.csv\", index=False)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Reference in New Issue
Block a user