import numpy as np from data_prep import features, targets, features_test, targets_test def sigmoid(x): """ Calculate sigmoid """ return 1 / (1 + np.exp(-x)) # TODO: We haven't provided the sigmoid_prime function like we did in # the previous lesson to encourage you to come up with a more # efficient solution. If you need a hint, check out the comments # in solution.py from the previous lecture. # Use to same seed to make debugging easier np.random.seed(42) n_records, n_features = features.shape last_loss = None # Initialize weights weights = np.random.normal(scale=1 / n_features**.5, size=n_features) # Neural Network hyperparameters epochs = 1000 learnrate = 0.5 for e in range(epochs): del_w = np.zeros(weights.shape) for x, y in zip(features.values, targets): # Loop through all records, x is the input, y is the target # Note: We haven't included the h variable from the previous # lesson. You can add it if you want, or you can calculate # the h together with the output # TODO: Calculate the output (y hat) output = sigmoid(np.dot(x, weights)) # TODO: Calculate the error error = y - output # TODO: Calculate the error term error_term = error * output * (1 - output) # TODO: Calculate the change in weights for this sample # and add it to the total weight change del_w += error_term * x # TODO: Update weights using the learning rate and the average change in # weights weights += learnrate * del_w / n_records # Printing out the mean square error on the training set if e % (epochs / 10) == 0: out = sigmoid(np.dot(features, weights)) loss = np.mean((out - targets) ** 2) if last_loss and last_loss < loss: print("Train loss: ", loss, " WARNING - Loss Increasing") else: print("Train loss: ", loss) last_loss = loss # Calculate accuracy on test data tes_out = sigmoid(np.dot(features_test, weights)) predictions = tes_out > 0.5 accuracy = np.mean(predictions == targets_test) print("Prediction accuracy: {:.3f}".format(accuracy))