Machine Learning with Python - Day 6

Cognizant, Bangalore
March, 2015
Jigsaw Academy

Instructor: Anand Chitipothu

This live notes are avaialble online at http://bit.ly/cognizant-py.

Classification

We are going to look at various classification algorithms today.

Datasets

Wine Quality - Download

Wine recognition data from UCI machine learning datasets.

Utility Functions

We are going to use the following function for drawing the clasification boundaries to get a visual sense of how the classfication algorithm is working on the given data.

In [115]:
import os
from IPython.display import Image
from sklearn import tree

# https://gist.github.com/anandology/772d44d291a9daa198d4
def plot_decision_boundaries(X, y, model_class, **model_params):
    """Function to plot the decision boundaries of a classification model.

    This uses just the first two columns of the data for fitting 
    the model as we need to find the predicted value for every point in 
    scatter plot.
    
    One possible improvement could be to use all columns fot fitting
    and using the first 2 columns and median of all other columns
    for predicting.
    
    Adopted from:
    http://scikit-learn.org/stable/auto_examples/ensemble/plot_voting_decision_regions.html
    http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_digits.html
    """
    reduced_data = X[:, :2]
    model = model_class(**model_params)
    model.fit(reduced_data, y)

    # Step size of the mesh. Decrease to increase the quality of the VQ.
    h = .02     # point in the mesh [x_min, m_max]x[y_min, y_max].    

    # Plot the decision boundary. For that, we will assign a color to each
    x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
    y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))

    # Obtain labels for each point in mesh using the model.
    Z = model.predict(np.c_[xx.ravel(), yy.ravel()])    

    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
                         np.arange(y_min, y_max, 0.1))

    Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)

    plt.contourf(xx, yy, Z, alpha=0.4)
    plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
    return plt

def export_tree(model, filename="tree.dot", simple=False):
    """Exports the decision tree as a graphviz file.
    """
    params = {}
    if not simple:
        params = dict(filled=True, rounded=True, special_characters=True)
        
    with open(filename, "w") as f:
        tree.export_graphviz(model, out_file=f, 
                         class_names=iris.target_names, 
                         **params)    

def show_tree(model, filename="tree.dot", simple=False, text=False):
    """Displays the tree as image.
    
    This requires the graphviz package to be installed on
    the computer. If it is not installed it displays the
    graphviz file as text.
    """
    dot_exists = os.system("dot -V") == 0
    if not dot_exists or text:
        simple = True
    export_tree(model, filename, simple=simple)
    pngfile = filename.replace(".dot", ".png")
    if dot_exists and not text:
        os.system("dot -Tpng {} -o {}".format(filename, pngfile))
        return Image(pngfile)
    else:
        txt = open(filename).read().replace("\\n", "\n   ").replace(";", ";\n")
        print(txt)

Logistic Regression

$y = b + b1 * X$

$logit(p) = b + b1*X$

where:

$logit(p) = log(\frac {p}{1-p})$

If you work hard, you can figure out that:

$p = \frac{1}{1 + e^{\beta_0 + \beta_1X}}$

In [13]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

%matplotlib inline

Lets take the iris data.

In [11]:
from sklearn.datasets import load_iris
iris = load_iris()

# convert the data into two classes
X = iris.data
y = (iris.target > 0).astype(np.int8)
In [17]:
plt.scatter(X[:, 0], X[:, 1], c=y, cmap="summer")
Out[17]:
<matplotlib.collections.PathCollection at 0x1147b16d8>
In [18]:
pd.Series(y).value_counts()
Out[18]:
1    100
0     50
dtype: int64
In [19]:
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
In [20]:
model.fit(X, y)
Out[20]:
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
          intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,
          penalty='l2', random_state=None, solver='liblinear', tol=0.0001,
          verbose=0, warm_start=False)
In [21]:
model.predict(X[:5])
Out[21]:
array([0, 0, 0, 0, 0], dtype=int8)
In [22]:
y[:5]
Out[22]:
array([0, 0, 0, 0, 0], dtype=int8)
In [23]:
model.predict_proba(X[:5])
Out[23]:
array([[ 0.98407436,  0.01592564],
       [ 0.96477451,  0.03522549],
       [ 0.97692365,  0.02307635],
       [ 0.95712083,  0.04287917],
       [ 0.98563373,  0.01436627]])

Problem: Find the class for sample [5, 4, 3, 1]. What is the probablity for it to belong to class 0?

In [24]:
X[:5]
Out[24]:
array([[ 5.1,  3.5,  1.4,  0.2],
       [ 4.9,  3. ,  1.4,  0.2],
       [ 4.7,  3.2,  1.3,  0.2],
       [ 4.6,  3.1,  1.5,  0.2],
       [ 5. ,  3.6,  1.4,  0.2]])
In [25]:
X[:1]
Out[25]:
array([[ 5.1,  3.5,  1.4,  0.2]])

Lets try to plot the probability of being in class 0 for every point.

In [26]:
p0 = model.predict_proba(X)[:, 0]
In [27]:
plt.plot(sorted(p0))
Out[27]:
[<matplotlib.lines.Line2D at 0x1182c1898>]
In [30]:
iris.target_names
Out[30]:
array(['setosa', 'versicolor', 'virginica'], 
      dtype='<U10')
In [32]:
plot_decision_boundaries(X, y, LogisticRegression)
Out[32]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>

Q: Can we try with three classes?

In [38]:
plot_decision_boundaries(X, iris.target, LogisticRegression)
Out[38]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>

Problem: Take the wine data and try to fit a logistic regression model.

The first column is the class label.

In [49]:
wine = pd.read_csv("datasets/wine.data", header=None)
In [50]:
wine.shape
Out[50]:
(178, 14)
In [51]:
wine.columns
Out[51]:
Int64Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], dtype='int64')
In [52]:
wine.head()
Out[52]:
0 1 2 3 4 5 6 7 8 9 10 11 12 13
0 1 14.23 1.71 2.43 15.6 127 2.80 3.06 0.28 2.29 5.64 1.04 3.92 1065
1 1 13.20 1.78 2.14 11.2 100 2.65 2.76 0.26 1.28 4.38 1.05 3.40 1050
2 1 13.16 2.36 2.67 18.6 101 2.80 3.24 0.30 2.81 5.68 1.03 3.17 1185
3 1 14.37 1.95 2.50 16.8 113 3.85 3.49 0.24 2.18 7.80 0.86 3.45 1480
4 1 13.24 2.59 2.87 21.0 118 2.80 2.69 0.39 1.82 4.32 1.04 2.93 735
In [53]:
wine.columns = ["C"+str(n) for n in wine.columns]
In [54]:
wine.head()
Out[54]:
C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 C10 C11 C12 C13
0 1 14.23 1.71 2.43 15.6 127 2.80 3.06 0.28 2.29 5.64 1.04 3.92 1065
1 1 13.20 1.78 2.14 11.2 100 2.65 2.76 0.26 1.28 4.38 1.05 3.40 1050
2 1 13.16 2.36 2.67 18.6 101 2.80 3.24 0.30 2.81 5.68 1.03 3.17 1185
3 1 14.37 1.95 2.50 16.8 113 3.85 3.49 0.24 2.18 7.80 0.86 3.45 1480
4 1 13.24 2.59 2.87 21.0 118 2.80 2.69 0.39 1.82 4.32 1.04 2.93 735
In [55]:
X = wine.drop("C0", axis=1)
y = wine.C0
In [56]:
X.shape
Out[56]:
(178, 13)
In [57]:
y.shape
Out[57]:
(178,)
In [59]:
plot_decision_boundaries(X.values, y.values, LogisticRegression)
Out[59]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>
In [60]:
wine.C0.value_counts()
Out[60]:
2    71
1    59
3    48
Name: C0, dtype: int64

Metrics

In [61]:
model = LogisticRegression()
model.fit(X,y)
Out[61]:
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
          intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,
          penalty='l2', random_state=None, solver='liblinear', tol=0.0001,
          verbose=0, warm_start=False)
In [62]:
yp = model.predict(X)
In [63]:
from sklearn.metrics import accuracy_score
In [64]:
accuracy_score(y, yp)
Out[64]:
0.9719101123595506
In [65]:
y.shape
Out[65]:
(178,)
In [66]:
(y==yp).sum()
Out[66]:
173
In [67]:
173/178.0
Out[67]:
0.9719101123595506

Split the data into train and test.

In [68]:
from sklearn.cross_validation import train_test_split
In [69]:
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3)
In [70]:
Xtrain.shape
Out[70]:
(124, 13)
In [71]:
Xtest.shape
Out[71]:
(54, 13)
In [74]:
pd.Series(ytrain).value_counts()
Out[74]:
2    51
1    40
3    33
Name: C0, dtype: int64
In [75]:
pd.Series(ytest).value_counts()
Out[75]:
2    20
1    19
3    15
Name: C0, dtype: int64
In [76]:
model = LogisticRegression()
model.fit(Xtrain, ytrain)
Out[76]:
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
          intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,
          penalty='l2', random_state=None, solver='liblinear', tol=0.0001,
          verbose=0, warm_start=False)
In [78]:
ytrainp = model.predict(Xtrain)
print("accurary on training data is ", accuracy_score(ytrain, ytrainp))
accurary on training data is  0.991935483871
In [79]:
ytestp = model.predict(Xtest)
print("accurary on training data is ", accuracy_score(ytest, ytestp))
accurary on training data is  0.944444444444
In [80]:
from sklearn.metrics import confusion_matrix
In [81]:
confusion_matrix(ytest, ytestp)
Out[81]:
array([[17,  2,  0],
       [ 0, 19,  1],
       [ 0,  0, 15]])
In [82]:
pd.Series(ytest).value_counts()
Out[82]:
2    20
1    19
3    15
Name: C0, dtype: int64
In [86]:
pd.Series(ytestp).value_counts()
Out[86]:
2    21
1    17
3    16
dtype: int64

Decision Trees

In [87]:
from sklearn.tree import DecisionTreeClassifier
In [88]:
X = iris.data[:, [2, 3]] # petal-length and petal-width
y = iris.target
In [95]:
plt.scatter(X[:, 0], X[:, 1], c=y, cmap="hot")
Out[95]:
<matplotlib.collections.PathCollection at 0x1190cccf8>
In [96]:
plot_decision_boundaries(X, y, DecisionTreeClassifier, max_depth=1)
Out[96]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>
In [107]:
plot_decision_boundaries(X, y, DecisionTreeClassifier, max_depth=2, random_state=0)
Out[107]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>
In [98]:
plot_decision_boundaries(X, y, DecisionTreeClassifier, max_depth=3)
Out[98]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>
In [108]:
model = DecisionTreeClassifier(max_depth=2, random_state=0)
model.fit(X, y)
Out[108]:
DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=2,
            max_features=None, max_leaf_nodes=None, min_samples_leaf=1,
            min_samples_split=2, min_weight_fraction_leaf=0.0,
            presort=False, random_state=0, splitter='best')
In [109]:
show_tree(model)
Out[109]:
In [111]:
yp = model.predict(X)
In [112]:
accuracy_score(y, yp)
Out[112]:
0.95999999999999996
In [113]:
confusion_matrix(y, yp)
Out[113]:
array([[50,  0,  0],
       [ 0, 49,  1],
       [ 0,  5, 45]])
In [116]:
show_tree(model, text=True)
digraph Tree {
node [shape=box] ;

0 [label="X[1] <= 0.8
   gini = 0.6667
   samples = 150
   value = [50, 50, 50]
   class = setosa"] ;

1 [label="gini = 0.0
   samples = 50
   value = [50, 0, 0]
   class = setosa"] ;

0 -> 1 [labeldistance=2.5, labelangle=45, headlabel="True"] ;

2 [label="X[1] <= 1.75
   gini = 0.5
   samples = 100
   value = [0, 50, 50]
   class = versicolor"] ;

0 -> 2 [labeldistance=2.5, labelangle=-45, headlabel="False"] ;

3 [label="gini = 0.168
   samples = 54
   value = [0, 49, 5]
   class = versicolor"] ;

2 -> 3 ;

4 [label="gini = 0.0425
   samples = 46
   value = [0, 1, 45]
   class = virginica"] ;

2 -> 4 ;

}

Lets see what happens if we allow the decision tree to go very deep.

In [117]:
plot_decision_boundaries(X, y, DecisionTreeClassifier, max_depth=5)
Out[117]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>

Problem: Build a decision tree model using all the features of iris. split the data into training and test and see what is the model accuracy on training and test datasets.

Find out at what max_depth, the model gives best accuracy.

In [167]:
X = iris.data
y = iris.target

Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3, stratify=y)

def try_model(max_depth):
    model = DecisionTreeClassifier(max_depth=max_depth)
    model.fit(Xtrain, ytrain)
    ytrainp = model.predict(Xtrain)
    ytestp = model.predict(Xtest)
    
    return [max_depth, 
            accuracy_score(ytrain, ytrainp), 
            accuracy_score(ytest, ytestp)]

data = pd.DataFrame([try_model(i) for i in range(1, 10)])
In [168]:
data.set_index(0, inplace=True)
In [169]:
data.plot()
Out[169]:
<matplotlib.axes._subplots.AxesSubplot at 0x11a6d1b70>
In [170]:
model = DecisionTreeClassifier(max_depth=2)
model.fit(Xtrain, ytrain)
Out[170]:
DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=2,
            max_features=None, max_leaf_nodes=None, min_samples_leaf=1,
            min_samples_split=2, min_weight_fraction_leaf=0.0,
            presort=False, random_state=None, splitter='best')
In [171]:
show_tree(model)
Out[171]:

DecisionTree Regression

In [172]:
from sklearn.tree import DecisionTreeRegressor
In [173]:
d = pd.read_csv("datasets/chirps.tsv", delimiter="\t")
d.head()
Out[173]:
chirps temp
0 20.0 88.6
1 16.0 71.6
2 19.8 93.3
3 18.4 84.3
4 17.1 80.6
In [174]:
model = DecisionTreeRegressor()
model.fit(d[["temp"]], d["chirps"])
Out[174]:
DecisionTreeRegressor(criterion='mse', max_depth=None, max_features=None,
           max_leaf_nodes=None, min_samples_leaf=1, min_samples_split=2,
           min_weight_fraction_leaf=0.0, presort=False, random_state=None,
           splitter='best')
In [175]:
show_tree(model)
Out[175]:
In [176]:
yp = model.predict(d[["temp"]])

Wine Dataset

In [178]:
!cat datasets/wine.names
1. Title of Database: Wine recognition data
	Updated Sept 21, 1998 by C.Blake : Added attribute information

2. Sources:
   (a) Forina, M. et al, PARVUS - An Extendible Package for Data
       Exploration, Classification and Correlation. Institute of Pharmaceutical
       and Food Analysis and Technologies, Via Brigata Salerno, 
       16147 Genoa, Italy.

   (b) Stefan Aeberhard, email: stefan@coral.cs.jcu.edu.au
   (c) July 1991
3. Past Usage:

   (1)
   S. Aeberhard, D. Coomans and O. de Vel,
   Comparison of Classifiers in High Dimensional Settings,
   Tech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of
   Mathematics and Statistics, James Cook University of North Queensland.
   (Also submitted to Technometrics).

   The data was used with many others for comparing various 
   classifiers. The classes are separable, though only RDA 
   has achieved 100% correct classification.
   (RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data))
   (All results using the leave-one-out technique)

   In a classification context, this is a well posed problem 
   with "well behaved" class structures. A good data set 
   for first testing of a new classifier, but not very 
   challenging.

   (2) 
   S. Aeberhard, D. Coomans and O. de Vel,
   "THE CLASSIFICATION PERFORMANCE OF RDA"
   Tech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of
   Mathematics and Statistics, James Cook University of North Queensland.
   (Also submitted to Journal of Chemometrics).

   Here, the data was used to illustrate the superior performance of
   the use of a new appreciation function with RDA. 

4. Relevant Information:

   -- These data are the results of a chemical analysis of
      wines grown in the same region in Italy but derived from three
      different cultivars.
      The analysis determined the quantities of 13 constituents
      found in each of the three types of wines. 

   -- I think that the initial data set had around 30 variables, but 
      for some reason I only have the 13 dimensional version. 
      I had a list of what the 30 or so variables were, but a.) 
      I lost it, and b.), I would not know which 13 variables
      are included in the set.

   -- The attributes are (dontated by Riccardo Leardi, 
	riclea@anchem.unige.it )
 	1) Alcohol
 	2) Malic acid
 	3) Ash
	4) Alcalinity of ash  
 	5) Magnesium
	6) Total phenols
 	7) Flavanoids
 	8) Nonflavanoid phenols
 	9) Proanthocyanins
	10)Color intensity
 	11)Hue
 	12)OD280/OD315 of diluted wines
 	13)Proline            

5. Number of Instances

      	class 1 59
	class 2 71
	class 3 48

6. Number of Attributes 
	
	13

7. For Each Attribute:

	All attributes are continuous
	
	No statistics available, but suggest to standardise
	variables for certain uses (e.g. for us with classifiers
	which are NOT scale invariant)

	NOTE: 1st attribute is class identifier (1-3)

8. Missing Attribute Values:

	None

9. Class Distribution: number of instances per class

      	class 1 59
	class 2 71
	class 3 48

Preprocessing

In [179]:
from sklearn.preprocessing import StandardScaler
In [180]:
scaler = StandardScaler()
In [181]:
scaler.fit(iris.data)
Out[181]:
StandardScaler(copy=True, with_mean=True, with_std=True)
In [182]:
data2 = scaler.transform(iris.data)
In [183]:
d = pd.DataFrame(data2)
In [184]:
d.describe()
Out[184]:
0 1 2 3
count 1.500000e+02 1.500000e+02 1.500000e+02 1.500000e+02
mean -1.690315e-15 -1.637024e-15 -1.482518e-15 -1.623146e-15
std 1.003350e+00 1.003350e+00 1.003350e+00 1.003350e+00
min -1.870024e+00 -2.438987e+00 -1.568735e+00 -1.444450e+00
25% -9.006812e-01 -5.877635e-01 -1.227541e+00 -1.181504e+00
50% -5.250608e-02 -1.249576e-01 3.362659e-01 1.332259e-01
75% 6.745011e-01 5.692513e-01 7.627586e-01 7.905908e-01
max 2.492019e+00 3.114684e+00 1.786341e+00 1.710902e+00
In [185]:
x1 = [3, 4, 5, 2]
In [186]:
scaler.transform([x1])
Out[186]:
array([[-3.44520645,  2.18907205,  0.70589294,  1.05353673]])
In [187]:
scaler.mean_
Out[187]:
array([ 5.84333333,  3.054     ,  3.75866667,  1.19866667])
In [188]:
scaler.std_
/Users/anand/anaconda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function std_ is deprecated; Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead
  warnings.warn(msg, category=DeprecationWarning)
Out[188]:
array([ 0.82530129,  0.43214658,  1.75852918,  0.76061262])
In [189]:
from sklearn.pipeline import Pipeline
In [190]:
pipe = Pipeline([('scale', StandardScaler()),
                ('dt', DecisionTreeClassifier())])
In [192]:
pipe.fit(X, y)
Out[192]:
Pipeline(steps=[('scale', StandardScaler(copy=True, with_mean=True, with_std=True)), ('dt', DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None,
            max_features=None, max_leaf_nodes=None, min_samples_leaf=1,
            min_samples_split=2, min_weight_fraction_leaf=0.0,
            presort=False, random_state=None, splitter='best'))])
In [193]:
yp = pipe.predict(X)
In [194]:
accuracy_score(y, yp)
Out[194]:
1.0

Support Vector Machines

In [195]:
from sklearn.datasets import make_circles
In [196]:
X, y = make_circles(n_samples=1000, factor=0.2, noise=0.1)
In [197]:
plt.scatter(X[:, 0], X[:, 1], c=y)
Out[197]:
<matplotlib.collections.PathCollection at 0x118fc2860>
In [198]:
d = pd.DataFrame(X, columns=["x1", "x2"])
In [199]:
d["y"] = y
In [201]:
d.head()
Out[201]:
x1 x2 y
0 -0.029502 -0.098446 1
1 -0.069378 0.031678 1
2 0.725902 0.684591 0
3 0.310474 0.059206 1
4 -0.167329 -0.013290 1
In [205]:
d.plot(kind="scatter", x="x1", y="x2", c="y", cmap="summer")
Out[205]:
<matplotlib.axes._subplots.AxesSubplot at 0x11a633cf8>
In [206]:
plt.scatter(d.x1, d.x1*d.x1+d.x2*d.x2, c=d.y)
Out[206]:
<matplotlib.collections.PathCollection at 0x11b01cdd8>
In [207]:
plt.scatter(d.x1, (d.x1*d.x1+d.x2*d.x2)**0.5, c=d.y)
Out[207]:
<matplotlib.collections.PathCollection at 0x11b035080>
In [208]:
d["x3"] = (d.x1*d.x1+d.x2*d.x2)**0.5
In [216]:
from sklearn.linear_model import LogisticRegression
In [217]:
model = LogisticRegression()
model.fit(d[["x1", "x2", "x3"]], d.y)
Out[217]:
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
          intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,
          penalty='l2', random_state=None, solver='liblinear', tol=0.0001,
          verbose=0, warm_start=False)
In [218]:
yp = model.predict(d[["x1", "x2", "x3"]])
In [219]:
accuracy_score(y, yp)
Out[219]:
0.999
In [221]:
plot_decision_boundaries(d[["x1", "x3"]].values, d.y.values, LogisticRegression)
Out[221]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>
In [222]:
X[:5]
Out[222]:
array([[-0.02950167, -0.0984464 ],
       [-0.06937828,  0.03167759],
       [ 0.72590242,  0.68459104],
       [ 0.31047388,  0.0592064 ],
       [-0.1673288 , -0.01328984]])
In [223]:
plot_decision_boundaries(X, y, LogisticRegression)
Out[223]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>
In [224]:
from sklearn.svm import SVC
In [225]:
plot_decision_boundaries(X, y, SVC, kernel="linear")
Out[225]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>
In [226]:
plot_decision_boundaries(X, y, SVC, kernel="rbf")
Out[226]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>

Problem: There is another interesting generated dataset called moons. Try it with all the classification algorithms and see how it works.

In [231]:
from sklearn.datasets import make_moons
In [228]:
X, y = make_moons(1000)
In [230]:
plt.scatter(X[:, 0], X[:, 1], c = y)
Out[230]:
<matplotlib.collections.PathCollection at 0x11bf538d0>
In [237]:
X, y = make_circles(n_samples=1000, factor=0.2, noise=0.1)
d = pd.DataFrame(X, columns=["x1", "x2"])
d['y'] = y
d['x3'] = (d.x1*d.x1+d.x2*d.x2)**0.5

model = SVC(kernel="linear")
model.fit(d[['x1', 'x3']], y)
Out[237]:
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
  decision_function_shape=None, degree=3, gamma='auto', kernel='linear',
  max_iter=-1, probability=False, random_state=None, shrinking=True,
  tol=0.001, verbose=False)
In [239]:
yp = model.predict(d[['x1', 'x3']])
d['yp'] = yp
In [243]:
plt.scatter(d.x1, d.x3, c=d.yp)
plt.scatter(model.support_vectors_[:, 0], model.support_vectors_[:, 1], c='y')
Out[243]:
<matplotlib.collections.PathCollection at 0x11b8f5208>
In [241]:
model.support_vectors_
Out[241]:
array([[ 0.7240514 ,  0.75798033],
       [-0.7589596 ,  0.77344758],
       [ 0.67892007,  0.72495683],
       [-0.39991531,  0.7899176 ],
       [-0.5512959 ,  0.77584748],
       [-0.54594931,  0.74899673],
       [ 0.49009831,  0.78663495],
       [ 0.77538152,  0.77611576],
       [ 0.62208904,  0.79844371],
       [-0.16213342,  0.73424938],
       [ 0.51653989,  0.77228798],
       [-0.60348582,  0.71729419],
       [-0.30499904,  0.72307194],
       [-0.78748252,  0.80721566],
       [ 0.30404043,  0.7767671 ],
       [ 0.70741027,  0.79073725],
       [-0.49010534,  0.51678651],
       [ 0.14253554,  0.44839275],
       [-0.18006499,  0.45109046],
       [-0.33297332,  0.42623978],
       [ 0.38295324,  0.40260204],
       [ 0.43060419,  0.45631001],
       [ 0.25472396,  0.45150137],
       [ 0.52322773,  0.52718421],
       [ 0.34567872,  0.41798571],
       [ 0.40037101,  0.43926224],
       [-0.20115842,  0.5296618 ],
       [-0.10348488,  0.42848032],
       [-0.024711  ,  0.39490768],
       [-0.1842169 ,  0.4259001 ],
       [-0.05308107,  0.3935862 ],
       [-0.38763825,  0.41185849]])

Q Can we get three concentric rings?

In [258]:
def scatter_circles(n, factor, noise=0.05):
    X, y = make_circles(n, factor=factor, noise=noise)
    plt.scatter(X[:, 0], X[:, 1], c=y)
    return X, y
In [259]:
X1, y1 = scatter_circles(1000, 0.1)
X2, y2 = scatter_circles(1000, 0.6)
In [260]:
X1[:5], y1[:5]
Out[260]:
(array([[ 0.02740776, -0.10963069],
        [ 0.79540294, -0.54055613],
        [ 0.0017383 ,  0.06402802],
        [ 0.02785887,  0.08364266],
        [-0.25645206,  0.90853199]]), array([1, 0, 1, 1, 0]))
In [261]:
X2[:5], y2[:5]
Out[261]:
(array([[ 0.25874876, -0.45622191],
        [-0.59121304,  0.02794927],
        [-0.73135619, -0.6639665 ],
        [-0.2028161 , -0.61644207],
        [ 0.73020136, -0.61197933]]), array([1, 1, 0, 1, 0]))
In [262]:
plt.scatter(X1[:, 0], X1[:, 1], c=y1)
Out[262]:
<matplotlib.collections.PathCollection at 0x11d5d4588>
In [264]:
plt.scatter(X2[:, 0], X2[:, 1], c=y2)
Out[264]:
<matplotlib.collections.PathCollection at 0x11d8cea90>
In [266]:
d = pd.DataFrame(X1, columns=["x1", "x2"])
In [268]:
d["y"] = y1
In [270]:
d[y1==0].plot(x="x1", y="x2", kind="scatter")
Out[270]:
<matplotlib.axes._subplots.AxesSubplot at 0x11d899f28>
In [272]:
d[y1==1].plot(x="x1", y="x2", kind="scatter")
Out[272]:
<matplotlib.axes._subplots.AxesSubplot at 0x11d2a5828>
In [273]:
d3 = d[y1==1]
In [281]:
d3.y.head()
Out[281]:
0    2
2    2
3    2
6    2
7    2
Name: y, dtype: int64
In [282]:
d2 = pd.DataFrame(X2, columns=["x1", "x2"])
In [283]:
d2["y"] = y2
In [284]:
d2.head()
Out[284]:
x1 x2 y
0 0.258749 -0.456222 1
1 -0.591213 0.027949 1
2 -0.731356 -0.663967 0
3 -0.202816 -0.616442 1
4 0.730201 -0.611979 0
In [285]:
d3.head()
Out[285]:
x1 x2 y
0 0.027408 -0.109631 2
2 0.001738 0.064028 2
3 0.027859 0.083643 2
6 -0.094715 0.100420 2
7 -0.075943 0.062622 2
In [288]:
d4 = np.hstack?
In [292]:
d4 = d2.append(d3)
In [293]:
d4.plot(kind="scatter", x="x1", y="x2", c="y")
Out[293]:
<matplotlib.axes._subplots.AxesSubplot at 0x11d4a1cf8>
In [297]:
def circles(n, factor):
    X, y = make_circles(n, factor=factor, noise=0.05)
    d = pd.DataFrame(X, columns=["x1", "x2"])
    d["y"] = y
    return d
In [308]:
d1 = circles(1000, 0.2)
d2 = circles(1000, 0.6)
d3 = d2[d2.y==1]
d3.y = 2
d4 = d1.append(d3)
/Users/anand/anaconda/lib/python3.5/site-packages/pandas/core/generic.py:2698: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  self[name] = value
In [312]:
d4.plot(kind="scatter", x="x1", y="x2", c="y", cmap="cool")
Out[312]:
<matplotlib.axes._subplots.AxesSubplot at 0x11e21b550>
In [313]:
plot_decision_boundaries(d4[["x1", "x2"]].values, d4.y.values, SVC)
Out[313]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>
In [319]:
d4["x12"] = (d4.x1*d4.x1+d4.x2*d4.x2)
In [320]:
plot_decision_boundaries(d4[["x1", "x12"]].values, d4.y.values, LogisticRegression)
Out[320]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>
In [321]:
model = SVC()
model.fit(d4[["x1", "x2"]], d4.y)

plt.scatter(d4.x1, d4.x2, c=d4.y)
plt.scatter(model.support_vectors_[:, 0], model.support_vectors_[:, 1], c='y')
Out[321]:
<matplotlib.collections.PathCollection at 0x11eaf1c50>
In [327]:
plot_decision_boundaries(d4[["x1", "x2"]].values, d4.y.values, 
                         DecisionTreeClassifier, max_depth=8)
Out[327]:
<module 'matplotlib.pyplot' from '/Users/anand/anaconda/lib/python3.5/site-packages/matplotlib/pyplot.py'>
In [331]:
xx, yy = np.meshgrid(np.arange(5), np.arange(10, 15))
In [333]:
xx
Out[333]:
array([[0, 1, 2, 3, 4],
       [0, 1, 2, 3, 4],
       [0, 1, 2, 3, 4],
       [0, 1, 2, 3, 4],
       [0, 1, 2, 3, 4]])
In [334]:
xx.ravel()
Out[334]:
array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2,
       3, 4])
In [335]:
xx.reshape(-1)
Out[335]:
array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2,
       3, 4])
In [342]:
np.array([[x, y] for x in np.arange(5) for y in np.arange(10, 15)])
Out[342]:
array([[ 0, 10],
       [ 0, 11],
       [ 0, 12],
       [ 0, 13],
       [ 0, 14],
       [ 1, 10],
       [ 1, 11],
       [ 1, 12],
       [ 1, 13],
       [ 1, 14],
       [ 2, 10],
       [ 2, 11],
       [ 2, 12],
       [ 2, 13],
       [ 2, 14],
       [ 3, 10],
       [ 3, 11],
       [ 3, 12],
       [ 3, 13],
       [ 3, 14],
       [ 4, 10],
       [ 4, 11],
       [ 4, 12],
       [ 4, 13],
       [ 4, 14]])
In [341]:
np.c_[xx.reshape(-1), yy.reshape(-1)]
Out[341]:
array([[ 0, 10],
       [ 1, 10],
       [ 2, 10],
       [ 3, 10],
       [ 4, 10],
       [ 0, 11],
       [ 1, 11],
       [ 2, 11],
       [ 3, 11],
       [ 4, 11],
       [ 0, 12],
       [ 1, 12],
       [ 2, 12],
       [ 3, 12],
       [ 4, 12],
       [ 0, 13],
       [ 1, 13],
       [ 2, 13],
       [ 3, 13],
       [ 4, 13],
       [ 0, 14],
       [ 1, 14],
       [ 2, 14],
       [ 3, 14],
       [ 4, 14]])
In [ ]: