Perceptron model training
I have placed the generation of the dataset under this connection:
Generation of Bimonthly Datasets
I used the bi lunar dataset generated from the previous blog and sent it to the perceptron model for training. I am too lazy here and don't want to write about what a perceptron model is. Let's fill in a pit first and make up for it later.
What is a perceptron
Update weights
Dataset
Training results
It can be seen from inside that the trained network model is still acceptable, with an accuracy of 97%.
accuracy_rate: 0.975 lost_count: 10 last lost_count: 40
the weights are respectively : 0.20136202161906264, 0.9729840644208955
In the above figure, the first image is the generated crescent dataset, the second image is the test set and decision plane, the third image is the drawing of classification error points, and the fourth image is the drawing of error rate. It can be seen that with continuous learning, the error rate will decrease.
Code
import numpy
import matplotlib.pyplot as plt
from half_moonDataSet import Sj_Hal_moonDataSet
class Perception(object):
def __init__(self, w_dim, epoch = 10, l_rate = 0.01):
self.w = numpy.ones(w_dim + 1, dtype=numpy.float32)
self.epoch = epoch
self.l_rate = l_rate
self.lost = []
def sign(self, y):
if y >= 0:
return 1
else:
return -1
def weight_sum(self, x):
return float(numpy.dot(x, self.w))
def weight_sum_all(self, X):
return numpy.dot(X, self.w)
def fit(self, x_train, x_train_label):
temp = numpy.ones(len(x_train))
x_train = numpy.column_stack((x_train, temp))
del temp
self.lost.clear()
loss_temp = 0
for iter_in range(self.epoch):
for i in range(len(x_train)):
x = x_train[i]
y = x_train_label[i]
#SGD
fit_res = self.sign(float(numpy.dot(x, self.w)))
# print('1w', self.w)
# print('fit_res:',fit_res, 'y:', y)
if fit_res != y:
loss_temp += 1 #record the number of errors
#update weight
# solution 1 :
self.w = self.w + self.l_rate*(y - fit_res)*x
# solution 2 :
# self.w += x*y*self.l_rate
self.lost.append(loss_temp)
if iter_%5 == 0: #every 5 training sessions epoch just print once iter_,loss
print('We are training......' + 'iter_:', iter_, 'loss:', loss_temp)
loss_temp = 0
def predict(self, x_test):
temp = numpy.ones(len(x_test))
x_test = numpy.column_stack((x_test, temp)) # up X dimension
del temp # Recycle temp
y_predict = list(map(lambda x: 1 if x >= 0 else -1, list(self.weight_sum_all(x_test))))
return numpy.array(y_predict)
def score(self, y, label):
accuracy = 0
rate_temp = 0
rate = []
for i in range(len(y)):
if y[i] == label[i]:
accuracy += 1
rate_temp = accuracy / len(label)
rate.append(rate_temp)
return rate, len(label) - accuracy
if __name__== '__main__':
random_seed = 20 #specify a random number seed to reproduce a random number sample
makeData = Sj_Hal_moonDataSet()
makeData.random_seed(random_seed)
np_data, label = makeData.double_moon(origin_y=1, origin_x=1, sample_data=2000, ver_distance=-1, width=1,
hor_distance=3, slope=15)
Train_x, Train_label, Test_x, Test_label = makeData.moon_train_test_split.train_test_split(sample_set=np_data,
label_set=label,
test_rate=0.2)
perception = Perception(Train_x.shape[1], epoch=8000, l_rate=0.00001)
perception.fit(Train_x, Train_label)
# print('Train_x:n', Train_x, 'nTrain_label:', Train_label, 'nTest_x:', Test_x, 'nTest_label:', Test_label)
#test the trained network
y_predict = perception.predict(Test_x)
accuracy_rate, lost_count = perception.score(y_predict, Test_label)
print('accuracy_rate:', accuracy_rate[-1], 'lost_count:', lost_count, 'last lost_count:', perception.lost[len(perception.lost) - 1])
pdata_f1 = [np_data[i][0] for i in range(len(np_data)) if label[i] == 1]
pdata_f2 = [np_data[i][1] for i in range(len(np_data)) if label[i] == 1]
ndata_f1 = [np_data[i][0] for i in range(len(np_data)) if label[i] == -1]
ndata_f2 = [np_data[i][1] for i in range(len(np_data)) if label[i] == -1]
positive_f1 = [Test_x[i][0] for i in range(len(Test_x)) if Test_label[i] == 1]
positive_f2 = [Test_x[i][1] for i in range(len(Test_x)) if Test_label[i] == 1]
negetive_f1 = [Test_x[i][0] for i in range(len(Test_x)) if Test_label[i] == -1]
negetive_f2 = [Test_x[i][1] for i in range(len(Test_x)) if Test_label[i] == -1]
mistake_f1_pre = [Test_x[i][0] for i in range(len(Test_x)) if y_predict[i] != Test_label[i]]
mistake_f2_pre = [Test_x[i][1] for i in range(len(Test_x)) if y_predict[i] != Test_label[i]]
fig = plt.figure(num=1, figsize=(15, 5))
ax0 = fig.add_subplot(141)
ax0.scatter(pdata_f1, pdata_f2, color='red', alpha=0.5)
ax0.scatter(ndata_f1, ndata_f2, color='blue', alpha=0.5)
ax1 = fig.add_subplot(142)
ax1.scatter(positive_f1, positive_f2, c='red', alpha=0.5)
ax1.scatter(negetive_f1, negetive_f2, c='blue', alpha=0.5)
line_x = numpy.linspace(-6, 15, 100)
line_w = -1 * (perception.w[0] / perception.w[1])
line_b = -1 * (float(perception.w[2]) / perception.w[1])
print('n', line_w, line_b)
line_y = list(map(lambda x: x * line_w + line_b, line_x))
ax1.plot(line_x, line_y, c='orange')
ax2 = fig.add_subplot(143)
ax2.scatter(mistake_f1_pre, mistake_f2_pre, c='orange', alpha=0.5)
ax2.plot(line_x, line_y, c='orange')
ax3 = fig.add_subplot(144)
ax3.plot(range(0, len(perception.lost)), perception.lost)
# print('nlost:', perception.lost, 'lost len:', len(perception.lost))
plt.show()
Reference blog: