机器学习 August 20, 2018

9-3 实现逻辑回归

Words count 2.7k Reading time 2 mins. Read count 0

实现逻辑回归

import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets

iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y<2, :2]
y = y[y<2]
X.shape
(100, 2)
y.shape
(100,)
plt.scatter(X[y==0,0],X[y==0,1],color='red')
plt.scatter(X[y==1,0],X[y==1,1],color='blue')
plt.show()

使用逻辑回归

from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y)
from script.LogisticRegression import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X_train,y_train)
LogisticRegression()
log_reg.score(X_test,y_test)
1.0
log_reg.predict_proba(X_test)
array([0.57094625, 0.97461601, 0.1188549 , 0.06759062, 0.89906436,
       0.06925522, 0.04805963, 0.87164179, 0.94645386, 0.76925878,
       0.80989565, 0.51314751, 0.23121621, 0.05704483, 0.18259107,
       0.97365344, 0.86117869, 0.01611954, 0.28822454, 0.92921839,
       0.15369821, 0.89906436, 0.00593858, 0.01400629, 0.03148767])
y_test
array([1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1,
       0, 0, 0])
log_reg.predict(X_test)
array([1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1,
       0, 0, 0])

决策边界

def x2(x1):
    return (-log_reg.coef_[0] * x1 - log_reg.intercept_) / log_reg.coef_[1]
x1_plot = np.linspace(4,8,1000)
x2_plot = x2(x1_plot)
plt.plot(x1_plot,x2_plot)
plt.scatter(X[y==0,0],X[y==0,1],color='red')
plt.scatter(X[y==1,0],X[y==1,1],color='blue')
plt.plot(x1_plot,x2_plot)
plt.show()

0%