learning for both nn working
This commit is contained in:
parent
ea28e2ee29
commit
43a8098031
30
functions.py
30
functions.py
@ -238,8 +238,9 @@ class Network:
|
||||
:param dw: (array) Partial derivatives
|
||||
:param delta: (array) Delta error.
|
||||
"""
|
||||
self.w[index] -= self.learning_rate * np.mean(dw, 1)
|
||||
self.b[index] -= self.learning_rate * np.mean(np.mean(delta, 1), 0)
|
||||
|
||||
self.w[index] -= self.learning_rate * dw
|
||||
self.b[index] -= self.learning_rate * np.mean(delta, 0)
|
||||
|
||||
def fit(self, x, y_true, loss, epochs, batch_size, learning_rate=2e-2):
|
||||
"""
|
||||
@ -265,7 +266,8 @@ class Network:
|
||||
self.back_prop(z, a, y_[k:l])
|
||||
|
||||
if (i + 1) % 10 == 0:
|
||||
print("Loss:", self.loss.loss(y_true, z[self.n_layers]))
|
||||
_, a = self.feed_forward(x)
|
||||
print("Loss:", self.loss.loss(y_true, a[self.n_layers]))
|
||||
|
||||
def predict(self, x):
|
||||
_, a = self.feed_forward(x)
|
||||
@ -273,27 +275,37 @@ class Network:
|
||||
|
||||
if __name__ == "__main__":
|
||||
from sklearn import datasets
|
||||
#import sklearn.metrics
|
||||
import sklearn.metrics
|
||||
np.random.seed(1)
|
||||
# Load data
|
||||
data = datasets.load_iris()
|
||||
x = data["data"]
|
||||
x = (x - x.mean()) / x.std()
|
||||
y = np.expand_dims(data["target"], 1)
|
||||
y = data["target"]
|
||||
#y = np.expand_dims(data["target"], 1)
|
||||
|
||||
# one hot encoding
|
||||
y = np.eye(3)[y]
|
||||
|
||||
nn = Network((4, 8, 2, 3), (Relu, Relu, Sigmoid))
|
||||
nn = Network((4, 8, 3), (Relu, Relu, Sigmoid))
|
||||
|
||||
#nn.fit(x[:2], y[:2], MSE, 1, batch_size=2)
|
||||
nn.fit(x, y, MSE, 1000, 16)
|
||||
|
||||
# data = datasets.load_digits()
|
||||
#
|
||||
# x = data["data"]
|
||||
# y = data["target"]
|
||||
# y = np.eye(10)[y]
|
||||
#
|
||||
# nn = Network((64, 32, 10), (Relu, Sigmoid))
|
||||
# nn.fit(x, y, MSE, 100, 2)
|
||||
#
|
||||
y_ = nn.predict(x)
|
||||
a = np.argmax(y_, 1)
|
||||
|
||||
for i in range(a.size):
|
||||
print(a[i], y[i])
|
||||
|
||||
# for i in range(a.size):
|
||||
# print(a[i], y[i], "\t", np.round(y_[i], 3))
|
||||
|
||||
# y_true = []
|
||||
# y_pred = []
|
||||
|
@ -140,10 +140,10 @@ class NeuralNetwork:
|
||||
dc_dw1 = np.dot(self.x.T, delta2)
|
||||
|
||||
# update weights and biases
|
||||
self.w[2] -= self.learning_rate * np.mean(dc_dw2, 1)
|
||||
self.b[2] -= self.learning_rate * np.mean(np.mean(delta3, 1), 0)
|
||||
self.w[1] -= self.learning_rate * np.mean(dc_dw1, 1)
|
||||
self.b[1] -= self.learning_rate * np.mean(np.mean(delta2, 1), 0)
|
||||
self.w[2] -= self.learning_rate * dc_dw2
|
||||
self.b[2] -= self.learning_rate * np.mean(delta3, 0)
|
||||
self.w[1] -= self.learning_rate * dc_dw1
|
||||
self.b[1] -= self.learning_rate * np.mean(delta2, 0)
|
||||
|
||||
def stats(self):
|
||||
"""
|
||||
@ -188,24 +188,24 @@ if __name__ == "__main__":
|
||||
data = datasets.load_iris()
|
||||
x = data["data"]
|
||||
x = (x - x.mean()) / x.std()
|
||||
y = np.expand_dims(data["target"], 1)
|
||||
y = data["target"]
|
||||
|
||||
# one hot encoding
|
||||
y = np.eye(3)[y]
|
||||
|
||||
nn = NeuralNetwork(4, 8, 3, 2e-2)
|
||||
nn = NeuralNetwork(4, 4, 3, 1e-2)
|
||||
#nn.fit(x[:2], y[:2], 2, 1)
|
||||
nn.fit(x, y, 16, 1000)
|
||||
nn.fit(x, y, 8, 1000)
|
||||
_, y_ = feed_forward(x, nn.w, nn.b)
|
||||
print(y_)
|
||||
print(y_[3])
|
||||
|
||||
# # result
|
||||
# _, y_ = feed_forward(x, nn.w, nn.b)
|
||||
# y_true = []
|
||||
# y_pred = []
|
||||
# for i in range(len(y)):
|
||||
# y_pred.append(np.argmax(y_[3][i]))
|
||||
# y_true.append(np.argmax(y[i]))
|
||||
#
|
||||
# print(sklearn.metrics.classification_report(y_true, y_pred))
|
||||
#
|
||||
y_true = []
|
||||
y_pred = []
|
||||
for i in range(len(y)):
|
||||
y_pred.append(np.argmax(y_[3][i]))
|
||||
y_true.append(np.argmax(y[i]))
|
||||
|
||||
print(sklearn.metrics.classification_report(y_true, y_pred))
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user