import numpy as np
# Decision stump used as weak classifier
class DecisionStump():
def __init__(self):
self.polarity = 1
self.feature_idx = None
self.threshold = None
self.alpha = None
def predict(self, X):
= X.shape[0]
m = X[:, self.feature_idx]
X_column = np.ones(m)
predictions if self.polarity == 1:
< self.threshold] = -1
predictions[X_column else:
> self.threshold] = -1
predictions[X_column
return predictions
class Adaboost():
def __init__(self, n_clf=5):
self.n_clf = n_clf
def fit(self, X, y):
= X.shape
m, n
# Initialize weights to 1/N
= np.full(m, (1 / m))
w
self.clfs = []
# Iterate through classifiers
for _ in range(self.n_clf):
= DecisionStump()
clf
= float('inf')
min_error # greedy search to find best threshold and feature
for feature_i in range(n):
= X[:, feature_i]
X_column = np.unique(X_column)
thresholds
for threshold in thresholds:
# predict with polarity 1
= 1
p = np.ones(m)
predictions < threshold] = -1
predictions[X_column
# Error = sum of weights of misclassified samples
= w[y != predictions]
misclassified = sum(misclassified)
error
if error > 0.5:
= 1 - error
error = -1
p
# store the best configuration
if error < min_error:
= p
clf.polarity = threshold
clf.threshold = feature_i
clf.feature_idx = error
min_error
# calculate alpha
= 1e-10
EPS = 0.5 * np.log((1.0 - min_error + EPS) / (min_error + EPS))
clf.alpha
# calculate predictions and update weights
= clf.predict(X)
predictions
*= np.exp(-clf.alpha * y * predictions)
w # Normalize to one
/= np.sum(w)
w
# Save classifier
self.clfs.append(clf)
def predict(self, X):
= [clf.alpha * clf.predict(X) for clf in self.clfs]
clf_preds = np.sum(clf_preds, axis=0)
y_pred = np.sign(y_pred)
y_pred
return y_pred
Adaboost from scratch
You can watch this video to understand the code.
# Imports
from sklearn import datasets
from sklearn.model_selection import train_test_split
import pandas as pd
def accuracy(y_true, y_pred):
= np.sum(y_true == y_pred) / len(y_true)
accuracy return accuracy
= datasets.load_breast_cancer()
data
= pd.DataFrame(data.data, columns=data.feature_names)
data_df 'target'] = data.target
data_df[
display(data_df.head())
mean radius | mean texture | mean perimeter | mean area | mean smoothness | mean compactness | mean concavity | mean concave points | mean symmetry | mean fractal dimension | ... | worst texture | worst perimeter | worst area | worst smoothness | worst compactness | worst concavity | worst concave points | worst symmetry | worst fractal dimension | target | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 17.99 | 10.38 | 122.80 | 1001.0 | 0.11840 | 0.27760 | 0.3001 | 0.14710 | 0.2419 | 0.07871 | ... | 17.33 | 184.60 | 2019.0 | 0.1622 | 0.6656 | 0.7119 | 0.2654 | 0.4601 | 0.11890 | 0 |
1 | 20.57 | 17.77 | 132.90 | 1326.0 | 0.08474 | 0.07864 | 0.0869 | 0.07017 | 0.1812 | 0.05667 | ... | 23.41 | 158.80 | 1956.0 | 0.1238 | 0.1866 | 0.2416 | 0.1860 | 0.2750 | 0.08902 | 0 |
2 | 19.69 | 21.25 | 130.00 | 1203.0 | 0.10960 | 0.15990 | 0.1974 | 0.12790 | 0.2069 | 0.05999 | ... | 25.53 | 152.50 | 1709.0 | 0.1444 | 0.4245 | 0.4504 | 0.2430 | 0.3613 | 0.08758 | 0 |
3 | 11.42 | 20.38 | 77.58 | 386.1 | 0.14250 | 0.28390 | 0.2414 | 0.10520 | 0.2597 | 0.09744 | ... | 26.50 | 98.87 | 567.7 | 0.2098 | 0.8663 | 0.6869 | 0.2575 | 0.6638 | 0.17300 | 0 |
4 | 20.29 | 14.34 | 135.10 | 1297.0 | 0.10030 | 0.13280 | 0.1980 | 0.10430 | 0.1809 | 0.05883 | ... | 16.67 | 152.20 | 1575.0 | 0.1374 | 0.2050 | 0.4000 | 0.1625 | 0.2364 | 0.07678 | 0 |
5 rows × 31 columns
= data.data, data.target
X, y
== 0] = -1
y[y
= train_test_split(
X_train, X_test, y_train, y_test =0.2, random_state=5
X, y, test_size
)
# Adaboost classification with 5 weak classifiers
= Adaboost(n_clf=5)
clf
clf.fit(X_train, y_train)= clf.predict(X_test)
y_pred
= accuracy(y_test, y_pred)
acc print("Accuracy:", acc)
Accuracy: 0.9736842105263158