completed part 1 of deep learning
This commit is contained in:
@@ -0,0 +1,244 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Implementing the Gradient Descent Algorithm\n",
|
||||
"\n",
|
||||
"In this lab, we'll implement the basic functions of the Gradient Descent algorithm to find the boundary in a small dataset. First, we'll start with some functions that will help us plot and visualize the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"#Some helper functions for plotting and drawing lines\n",
|
||||
"\n",
|
||||
"def plot_points(X, y):\n",
|
||||
" admitted = X[np.argwhere(y==1)]\n",
|
||||
" rejected = X[np.argwhere(y==0)]\n",
|
||||
" plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k')\n",
|
||||
" plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k')\n",
|
||||
"\n",
|
||||
"def display(m, b, color='g--'):\n",
|
||||
" plt.xlim(-0.05,1.05)\n",
|
||||
" plt.ylim(-0.05,1.05)\n",
|
||||
" x = np.arange(-10, 10, 0.1)\n",
|
||||
" plt.plot(x, m*x+b, color)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Reading and plotting the data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = pd.read_csv('data.csv', header=None)\n",
|
||||
"X = np.array(data[[0,1]])\n",
|
||||
"y = np.array(data[2])\n",
|
||||
"plot_points(X,y)\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Implementing the basic functions\n",
|
||||
"Here is your turn to shine. Implement the following formulas, as explained in the text.\n",
|
||||
"- Sigmoid activation function\n",
|
||||
"\n",
|
||||
"$$\\sigma(x) = \\frac{1}{1+e^{-x}}$$\n",
|
||||
"\n",
|
||||
"- Output (prediction) formula\n",
|
||||
"\n",
|
||||
"$$\\hat{y} = \\sigma(w_1 x_1 + w_2 x_2 + b)$$\n",
|
||||
"\n",
|
||||
"- Error function\n",
|
||||
"\n",
|
||||
"$$Error(y, \\hat{y}) = - y \\log(\\hat{y}) - (1-y) \\log(1-\\hat{y})$$\n",
|
||||
"\n",
|
||||
"- The function that updates the weights\n",
|
||||
"\n",
|
||||
"$$ w_i \\longrightarrow w_i + \\alpha (y - \\hat{y}) x_i$$\n",
|
||||
"\n",
|
||||
"$$ b \\longrightarrow b + \\alpha (y - \\hat{y})$$"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Implement the following functions\n",
|
||||
"\n",
|
||||
"# Activation (sigmoid) function\n",
|
||||
"def sigmoid(x):\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"# Output (prediction) formula\n",
|
||||
"def output_formula(features, weights, bias):\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"# Error (log-loss) formula\n",
|
||||
"def error_formula(y, output):\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"# Gradient descent step\n",
|
||||
"def update_weights(x, y, weights, bias, learnrate):\n",
|
||||
" pass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Training function\n",
|
||||
"This function will help us iterate the gradient descent algorithm through all the data, for a number of epochs. It will also plot the data, and some of the boundary lines obtained as we run the algorithm."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"np.random.seed(44)\n",
|
||||
"\n",
|
||||
"epochs = 100\n",
|
||||
"learnrate = 0.01\n",
|
||||
"\n",
|
||||
"def train(features, targets, epochs, learnrate, graph_lines=False):\n",
|
||||
" \n",
|
||||
" errors = []\n",
|
||||
" n_records, n_features = features.shape\n",
|
||||
" last_loss = None\n",
|
||||
" weights = np.random.normal(scale=1 / n_features**.5, size=n_features)\n",
|
||||
" bias = 0\n",
|
||||
" for e in range(epochs):\n",
|
||||
" del_w = np.zeros(weights.shape)\n",
|
||||
" for x, y in zip(features, targets):\n",
|
||||
" output = output_formula(x, weights, bias)\n",
|
||||
" error = error_formula(y, output)\n",
|
||||
" weights, bias = update_weights(x, y, weights, bias, learnrate)\n",
|
||||
" \n",
|
||||
" # Printing out the log-loss error on the training set\n",
|
||||
" out = output_formula(features, weights, bias)\n",
|
||||
" loss = np.mean(error_formula(targets, out))\n",
|
||||
" errors.append(loss)\n",
|
||||
" if e % (epochs / 10) == 0:\n",
|
||||
" print(\"\\n========== Epoch\", e,\"==========\")\n",
|
||||
" if last_loss and last_loss < loss:\n",
|
||||
" print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n",
|
||||
" else:\n",
|
||||
" print(\"Train loss: \", loss)\n",
|
||||
" last_loss = loss\n",
|
||||
" predictions = out > 0.5\n",
|
||||
" accuracy = np.mean(predictions == targets)\n",
|
||||
" print(\"Accuracy: \", accuracy)\n",
|
||||
" if graph_lines and e % (epochs / 100) == 0:\n",
|
||||
" display(-weights[0]/weights[1], -bias/weights[1])\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" # Plotting the solution boundary\n",
|
||||
" plt.title(\"Solution boundary\")\n",
|
||||
" display(-weights[0]/weights[1], -bias/weights[1], 'black')\n",
|
||||
"\n",
|
||||
" # Plotting the data\n",
|
||||
" plot_points(features, targets)\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
" # Plotting the error\n",
|
||||
" plt.title(\"Error Plot\")\n",
|
||||
" plt.xlabel('Number of epochs')\n",
|
||||
" plt.ylabel('Error')\n",
|
||||
" plt.plot(errors)\n",
|
||||
" plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Time to train the algorithm!\n",
|
||||
"When we run the function, we'll obtain the following:\n",
|
||||
"- 10 updates with the current training loss and accuracy\n",
|
||||
"- A plot of the data and some of the boundary lines obtained. The final one is in black. Notice how the lines get closer and closer to the best fit, as we go through more epochs.\n",
|
||||
"- A plot of the error function. Notice how it decreases as we go through more epochs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train(X, y, epochs, learnrate, True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Solutions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
":\n",
|
||||
"# Activation (sigmoid) function\n",
|
||||
"def sigmoid(x):\n",
|
||||
" return 1 / (1 + np.exp(-x))\n",
|
||||
"\n",
|
||||
"def output_formula(features, weights, bias):\n",
|
||||
" return sigmoid(np.dot(features, weights) + bias)\n",
|
||||
"\n",
|
||||
"def error_formula(y, output):\n",
|
||||
" return - y*np.log(output) - (1 - y) * np.log(1-output)\n",
|
||||
"\n",
|
||||
"def update_weights(x, y, weights, bias, learnrate):\n",
|
||||
" output = output_formula(x, weights, bias)\n",
|
||||
" d_error = y - output\n",
|
||||
" weights += learnrate * d_error * x\n",
|
||||
" bias += learnrate * d_error\n",
|
||||
" return weights, bias"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
0.78051,-0.063669,1
|
||||
0.28774,0.29139,1
|
||||
0.40714,0.17878,1
|
||||
0.2923,0.4217,1
|
||||
0.50922,0.35256,1
|
||||
0.27785,0.10802,1
|
||||
0.27527,0.33223,1
|
||||
0.43999,0.31245,1
|
||||
0.33557,0.42984,1
|
||||
0.23448,0.24986,1
|
||||
0.0084492,0.13658,1
|
||||
0.12419,0.33595,1
|
||||
0.25644,0.42624,1
|
||||
0.4591,0.40426,1
|
||||
0.44547,0.45117,1
|
||||
0.42218,0.20118,1
|
||||
0.49563,0.21445,1
|
||||
0.30848,0.24306,1
|
||||
0.39707,0.44438,1
|
||||
0.32945,0.39217,1
|
||||
0.40739,0.40271,1
|
||||
0.3106,0.50702,1
|
||||
0.49638,0.45384,1
|
||||
0.10073,0.32053,1
|
||||
0.69907,0.37307,1
|
||||
0.29767,0.69648,1
|
||||
0.15099,0.57341,1
|
||||
0.16427,0.27759,1
|
||||
0.33259,0.055964,1
|
||||
0.53741,0.28637,1
|
||||
0.19503,0.36879,1
|
||||
0.40278,0.035148,1
|
||||
0.21296,0.55169,1
|
||||
0.48447,0.56991,1
|
||||
0.25476,0.34596,1
|
||||
0.21726,0.28641,1
|
||||
0.67078,0.46538,1
|
||||
0.3815,0.4622,1
|
||||
0.53838,0.32774,1
|
||||
0.4849,0.26071,1
|
||||
0.37095,0.38809,1
|
||||
0.54527,0.63911,1
|
||||
0.32149,0.12007,1
|
||||
0.42216,0.61666,1
|
||||
0.10194,0.060408,1
|
||||
0.15254,0.2168,1
|
||||
0.45558,0.43769,1
|
||||
0.28488,0.52142,1
|
||||
0.27633,0.21264,1
|
||||
0.39748,0.31902,1
|
||||
0.5533,1,0
|
||||
0.44274,0.59205,0
|
||||
0.85176,0.6612,0
|
||||
0.60436,0.86605,0
|
||||
0.68243,0.48301,0
|
||||
1,0.76815,0
|
||||
0.72989,0.8107,0
|
||||
0.67377,0.77975,0
|
||||
0.78761,0.58177,0
|
||||
0.71442,0.7668,0
|
||||
0.49379,0.54226,0
|
||||
0.78974,0.74233,0
|
||||
0.67905,0.60921,0
|
||||
0.6642,0.72519,0
|
||||
0.79396,0.56789,0
|
||||
0.70758,0.76022,0
|
||||
0.59421,0.61857,0
|
||||
0.49364,0.56224,0
|
||||
0.77707,0.35025,0
|
||||
0.79785,0.76921,0
|
||||
0.70876,0.96764,0
|
||||
0.69176,0.60865,0
|
||||
0.66408,0.92075,0
|
||||
0.65973,0.66666,0
|
||||
0.64574,0.56845,0
|
||||
0.89639,0.7085,0
|
||||
0.85476,0.63167,0
|
||||
0.62091,0.80424,0
|
||||
0.79057,0.56108,0
|
||||
0.58935,0.71582,0
|
||||
0.56846,0.7406,0
|
||||
0.65912,0.71548,0
|
||||
0.70938,0.74041,0
|
||||
0.59154,0.62927,0
|
||||
0.45829,0.4641,0
|
||||
0.79982,0.74847,0
|
||||
0.60974,0.54757,0
|
||||
0.68127,0.86985,0
|
||||
0.76694,0.64736,0
|
||||
0.69048,0.83058,0
|
||||
0.68122,0.96541,0
|
||||
0.73229,0.64245,0
|
||||
0.76145,0.60138,0
|
||||
0.58985,0.86955,0
|
||||
0.73145,0.74516,0
|
||||
0.77029,0.7014,0
|
||||
0.73156,0.71782,0
|
||||
0.44556,0.57991,0
|
||||
0.85275,0.85987,0
|
||||
0.51912,0.62359,0
|
||||
|
@@ -0,0 +1,122 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
|
||||
# Some helper functions for plotting and drawing lines
|
||||
|
||||
def plot_points(X, y):
|
||||
admitted = X[np.argwhere(y == 1)]
|
||||
rejected = X[np.argwhere(y == 0)]
|
||||
plt.scatter([s[0][0] for s in rejected],
|
||||
[s[0][1] for s in rejected], s=25,
|
||||
color='blue', edgecolor='k')
|
||||
plt.scatter([s[0][0] for s in admitted],
|
||||
[s[0][1] for s in admitted],
|
||||
s=25, color='red', edgecolor='k')
|
||||
|
||||
|
||||
def display(m, b, color='g--'):
|
||||
plt.xlim(-0.05, 1.05)
|
||||
plt.ylim(-0.05, 1.05)
|
||||
x = np.arange(-10, 10, 0.1)
|
||||
plt.plot(x, m * x + b, color)
|
||||
|
||||
|
||||
data = pd.read_csv('data.csv', header=None)
|
||||
X = np.array(data[[0, 1]])
|
||||
y = np.array(data[2])
|
||||
plot_points(X, y)
|
||||
plt.show()
|
||||
|
||||
|
||||
# Implement the following functions
|
||||
|
||||
# Activation (sigmoid) function
|
||||
def sigmoid(x):
|
||||
return 1 / (1 + np.exp(-x))
|
||||
|
||||
# Output (prediction) formula
|
||||
|
||||
|
||||
def output_formula(features, weights, bias):
|
||||
return sigmoid(np.dot(features, weights) + bias)
|
||||
|
||||
# Error (log-loss) formula
|
||||
|
||||
|
||||
def error_formula(y, output):
|
||||
return -y * np.log(output) - (1 - y) * np.log(1 - output)
|
||||
|
||||
# Gradient descent step
|
||||
|
||||
|
||||
def update_weights(x, y, weights, bias, learnrate):
|
||||
output = output_formula(x, weights, bias)
|
||||
d_error = y - output
|
||||
weights += learnrate * d_error * x
|
||||
bias += learnrate * d_error
|
||||
return weights, bias
|
||||
|
||||
|
||||
"""
|
||||
Training function
|
||||
This function will help us iterate the gradient descent algorithm through all
|
||||
the data, for a number of epochs. It will also plot the data, and some of the
|
||||
boundary lines obtained as we run the algorithm.
|
||||
"""
|
||||
|
||||
np.random.seed(44)
|
||||
|
||||
epochs = 100
|
||||
learnrate = 0.01
|
||||
|
||||
|
||||
def train(features, targets, epochs, learnrate, graph_lines=False):
|
||||
|
||||
errors = []
|
||||
n_records, n_features = features.shape
|
||||
last_loss = None
|
||||
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
|
||||
bias = 0
|
||||
for e in range(epochs):
|
||||
del_w = np.zeros(weights.shape)
|
||||
for x, y in zip(features, targets):
|
||||
output = output_formula(x, weights, bias)
|
||||
error = error_formula(y, output)
|
||||
weights, bias = update_weights(x, y, weights, bias, learnrate)
|
||||
|
||||
# Printing out the log-loss error on the training set
|
||||
out = output_formula(features, weights, bias)
|
||||
loss = np.mean(error_formula(targets, out))
|
||||
errors.append(loss)
|
||||
if e % (epochs / 10) == 0:
|
||||
print("\n========== Epoch", e, "==========")
|
||||
if last_loss and last_loss < loss:
|
||||
print("Train loss: ", loss, " WARNING - Loss Increasing")
|
||||
else:
|
||||
print("Train loss: ", loss)
|
||||
last_loss = loss
|
||||
predictions = out > 0.5
|
||||
accuracy = np.mean(predictions == targets)
|
||||
print("Accuracy: ", accuracy)
|
||||
if graph_lines and e % (epochs / 100) == 0:
|
||||
display(-weights[0] / weights[1], -bias / weights[1])
|
||||
|
||||
# Plotting the solution boundary
|
||||
plt.title("Solution boundary")
|
||||
display(-weights[0] / weights[1], -bias / weights[1], 'black')
|
||||
|
||||
# Plotting the data
|
||||
plot_points(features, targets)
|
||||
plt.show()
|
||||
|
||||
# Plotting the error
|
||||
plt.title("Error Plot")
|
||||
plt.xlabel('Number of epochs')
|
||||
plt.ylabel('Error')
|
||||
plt.plot(errors)
|
||||
plt.show()
|
||||
|
||||
|
||||
train(X, y, epochs, learnrate, True)
|
||||
@@ -0,0 +1,100 @@
|
||||
0.78051,-0.063669,1
|
||||
0.28774,0.29139,1
|
||||
0.40714,0.17878,1
|
||||
0.2923,0.4217,1
|
||||
0.50922,0.35256,1
|
||||
0.27785,0.10802,1
|
||||
0.27527,0.33223,1
|
||||
0.43999,0.31245,1
|
||||
0.33557,0.42984,1
|
||||
0.23448,0.24986,1
|
||||
0.0084492,0.13658,1
|
||||
0.12419,0.33595,1
|
||||
0.25644,0.42624,1
|
||||
0.4591,0.40426,1
|
||||
0.44547,0.45117,1
|
||||
0.42218,0.20118,1
|
||||
0.49563,0.21445,1
|
||||
0.30848,0.24306,1
|
||||
0.39707,0.44438,1
|
||||
0.32945,0.39217,1
|
||||
0.40739,0.40271,1
|
||||
0.3106,0.50702,1
|
||||
0.49638,0.45384,1
|
||||
0.10073,0.32053,1
|
||||
0.69907,0.37307,1
|
||||
0.29767,0.69648,1
|
||||
0.15099,0.57341,1
|
||||
0.16427,0.27759,1
|
||||
0.33259,0.055964,1
|
||||
0.53741,0.28637,1
|
||||
0.19503,0.36879,1
|
||||
0.40278,0.035148,1
|
||||
0.21296,0.55169,1
|
||||
0.48447,0.56991,1
|
||||
0.25476,0.34596,1
|
||||
0.21726,0.28641,1
|
||||
0.67078,0.46538,1
|
||||
0.3815,0.4622,1
|
||||
0.53838,0.32774,1
|
||||
0.4849,0.26071,1
|
||||
0.37095,0.38809,1
|
||||
0.54527,0.63911,1
|
||||
0.32149,0.12007,1
|
||||
0.42216,0.61666,1
|
||||
0.10194,0.060408,1
|
||||
0.15254,0.2168,1
|
||||
0.45558,0.43769,1
|
||||
0.28488,0.52142,1
|
||||
0.27633,0.21264,1
|
||||
0.39748,0.31902,1
|
||||
0.5533,1,0
|
||||
0.44274,0.59205,0
|
||||
0.85176,0.6612,0
|
||||
0.60436,0.86605,0
|
||||
0.68243,0.48301,0
|
||||
1,0.76815,0
|
||||
0.72989,0.8107,0
|
||||
0.67377,0.77975,0
|
||||
0.78761,0.58177,0
|
||||
0.71442,0.7668,0
|
||||
0.49379,0.54226,0
|
||||
0.78974,0.74233,0
|
||||
0.67905,0.60921,0
|
||||
0.6642,0.72519,0
|
||||
0.79396,0.56789,0
|
||||
0.70758,0.76022,0
|
||||
0.59421,0.61857,0
|
||||
0.49364,0.56224,0
|
||||
0.77707,0.35025,0
|
||||
0.79785,0.76921,0
|
||||
0.70876,0.96764,0
|
||||
0.69176,0.60865,0
|
||||
0.66408,0.92075,0
|
||||
0.65973,0.66666,0
|
||||
0.64574,0.56845,0
|
||||
0.89639,0.7085,0
|
||||
0.85476,0.63167,0
|
||||
0.62091,0.80424,0
|
||||
0.79057,0.56108,0
|
||||
0.58935,0.71582,0
|
||||
0.56846,0.7406,0
|
||||
0.65912,0.71548,0
|
||||
0.70938,0.74041,0
|
||||
0.59154,0.62927,0
|
||||
0.45829,0.4641,0
|
||||
0.79982,0.74847,0
|
||||
0.60974,0.54757,0
|
||||
0.68127,0.86985,0
|
||||
0.76694,0.64736,0
|
||||
0.69048,0.83058,0
|
||||
0.68122,0.96541,0
|
||||
0.73229,0.64245,0
|
||||
0.76145,0.60138,0
|
||||
0.58985,0.86955,0
|
||||
0.73145,0.74516,0
|
||||
0.77029,0.7014,0
|
||||
0.73156,0.71782,0
|
||||
0.44556,0.57991,0
|
||||
0.85275,0.85987,0
|
||||
0.51912,0.62359,0
|
||||
|
@@ -0,0 +1,54 @@
|
||||
import numpy as np
|
||||
# Setting the random seed, feel free to change it and see different solutions.
|
||||
np.random.seed(42)
|
||||
|
||||
|
||||
def stepFunction(t):
|
||||
if t >= 0:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def prediction(X, W, b):
|
||||
return stepFunction((np.matmul(X, W) + b)[0])
|
||||
|
||||
# TODO: Fill in the code below to implement the perceptron trick.
|
||||
# The function should receive as inputs the data X, the labels y,
|
||||
# the weights W (as an array), and the bias b,
|
||||
# update the weights and bias W, b, according to the perceptron algorithm,
|
||||
# and return W and b.
|
||||
|
||||
|
||||
def perceptronStep(X, y, W, b, learn_rate=0.01):
|
||||
for i in range(len(X)):
|
||||
y_hat = prediction(X[i], W, b)
|
||||
if y[i] - y_hat == 1:
|
||||
W[0] += X[i][0] * learn_rate
|
||||
W[1] += X[i][1] * learn_rate
|
||||
b += learn_rate
|
||||
elif y[i] - y_hat == -1:
|
||||
W[0] -= X[i][0] * learn_rate
|
||||
W[1] -= X[i][1] * learn_rate
|
||||
b -= learn_rate
|
||||
return W, b
|
||||
|
||||
|
||||
# This function runs the perceptron algorithm repeatedly on the dataset,
|
||||
# and returns a few of the boundary lines obtained in the iterations,
|
||||
# for plotting purposes.
|
||||
# Feel free to play with the learning rate and the num_epochs,
|
||||
# and see your results plotted below.
|
||||
|
||||
|
||||
def trainPerceptronAlgorithm(X, y, learn_rate=0.01, num_epochs=25):
|
||||
x_min, x_max = min(X.T[0]), max(X.T[0])
|
||||
y_min, y_max = min(X.T[1]), max(X.T[1])
|
||||
W = np.array(np.random.rand(2, 1))
|
||||
b = np.random.rand(1)[0] + x_max
|
||||
# These are the solution lines that get plotted below.
|
||||
boundary_lines = []
|
||||
for i in range(num_epochs):
|
||||
# In each epoch, we apply the perceptron step.
|
||||
W, b = perceptronStep(X, y, W, b, learn_rate)
|
||||
boundary_lines.append((-W[0] / W[1], -b / W[1]))
|
||||
return boundary_lines
|
||||
@@ -0,0 +1,32 @@
|
||||
import pandas as pd
|
||||
|
||||
# TODO: Set weight1, weight2, and bias
|
||||
weight1 = 1.0
|
||||
weight2 = 1.0
|
||||
bias = -1.25
|
||||
|
||||
|
||||
# DON'T CHANGE ANYTHING BELOW
|
||||
# Inputs and outputs
|
||||
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
|
||||
correct_outputs = [False, False, False, True]
|
||||
outputs = []
|
||||
|
||||
# Generate and check output
|
||||
for test_input, correct_output in zip(test_inputs, correct_outputs):
|
||||
linear_combination = weight1 * \
|
||||
test_input[0] + weight2 * test_input[1] + bias
|
||||
output = int(linear_combination >= 0)
|
||||
is_correct_string = 'Yes' if output == correct_output else 'No'
|
||||
outputs.append([test_input[0], test_input[1],
|
||||
linear_combination, output, is_correct_string])
|
||||
|
||||
# Print output
|
||||
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
|
||||
output_frame = pd.DataFrame(outputs, columns=[
|
||||
'Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
|
||||
if not num_wrong:
|
||||
print('Nice! You got it all correct.\n')
|
||||
else:
|
||||
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
|
||||
print(output_frame.to_string(index=False))
|
||||
@@ -0,0 +1,29 @@
|
||||
import pandas as pd
|
||||
|
||||
# TODO: Set weight1, weight2, and bias
|
||||
weight1 = 0
|
||||
weight2 = -1
|
||||
bias = 0.5
|
||||
|
||||
|
||||
# DON'T CHANGE ANYTHING BELOW
|
||||
# Inputs and outputs
|
||||
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
|
||||
correct_outputs = [True, False, True, False]
|
||||
outputs = []
|
||||
|
||||
# Generate and check output
|
||||
for test_input, correct_output in zip(test_inputs, correct_outputs):
|
||||
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
|
||||
output = int(linear_combination >= 0)
|
||||
is_correct_string = 'Yes' if output == correct_output else 'No'
|
||||
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
|
||||
|
||||
# Print output
|
||||
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
|
||||
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
|
||||
if not num_wrong:
|
||||
print('Nice! You got it all correct.\n')
|
||||
else:
|
||||
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
|
||||
print(output_frame.to_string(index=False))
|
||||
@@ -0,0 +1,10 @@
|
||||
import numpy as np
|
||||
|
||||
# Write a function that takes as input two lists Y, P,
|
||||
# and returns the float corresponding to their cross-entropy.
|
||||
|
||||
|
||||
def cross_entropy(Y, P):
|
||||
Y = np.float_(Y)
|
||||
P = np.float_(P)
|
||||
return -np.sum(Y * np.log(P) + (1 - Y) * np.log(1 - P))
|
||||
@@ -0,0 +1,18 @@
|
||||
import numpy as np
|
||||
|
||||
# Write a function that takes as input a list of numbers, and returns
|
||||
# the list of values given by the softmax function.
|
||||
|
||||
|
||||
def softmax(L):
|
||||
expL = np.exp(L)
|
||||
sumExpL = sum(expL)
|
||||
result = []
|
||||
for i in expL:
|
||||
result.append(i * 1.0 / sumExpL)
|
||||
return result
|
||||
|
||||
# Note: The function np.divide can also be used here, as follows:
|
||||
# def softmax(L):
|
||||
# expL = np.exp(L)
|
||||
# return np.divide (expL, expL.sum())
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,115 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Solutions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### One-hot encoding the rank"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Make dummy variables for rank\n",
|
||||
"one_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')], axis=1)\n",
|
||||
"\n",
|
||||
"# Drop the previous rank column\n",
|
||||
"one_hot_data = one_hot_data.drop('rank', axis=1)\n",
|
||||
"\n",
|
||||
"# Print the first 10 rows of our data\n",
|
||||
"one_hot_data[:10]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Scaling the data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copying our data\n",
|
||||
"processed_data = one_hot_data[:]\n",
|
||||
"\n",
|
||||
"# Scaling the columns\n",
|
||||
"processed_data['gre'] = processed_data['gre']/800\n",
|
||||
"processed_data['gpa'] = processed_data['gpa']/4.0\n",
|
||||
"processed_data[:10]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Backpropagating the data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def error_term_formula(x, y, output):\n",
|
||||
" return (y - output)*sigmoid_prime(x)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## alternative solution ##\n",
|
||||
"# you could also *only* use y and the output \n",
|
||||
"# and calculate sigmoid_prime directly from the activated output!\n",
|
||||
"\n",
|
||||
"# below is an equally valid solution (it doesn't utilize x)\n",
|
||||
"def error_term_formula(x, y, output):\n",
|
||||
" return (y-output) * output * (1 - output)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,181 @@
|
||||
# Importing pandas and numpy
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Reading the csv file into a pandas DataFrame
|
||||
data = pd.read_csv('student_data.csv')
|
||||
|
||||
# Printing out the first 10 rows of our data
|
||||
print(data[:10])
|
||||
|
||||
|
||||
# Importing matplotlib
|
||||
|
||||
# Function to help us plot
|
||||
def plot_points(data):
|
||||
X = np.array(data[["gre", "gpa"]])
|
||||
y = np.array(data["admit"])
|
||||
admitted = X[np.argwhere(y == 1)]
|
||||
rejected = X[np.argwhere(y == 0)]
|
||||
plt.scatter([s[0][0] for s in rejected], [s[0][1]
|
||||
for s in rejected],
|
||||
s=25, color='red', edgecolor='k')
|
||||
plt.scatter([s[0][0] for s in admitted], [s[0][1]
|
||||
for s in admitted],
|
||||
s=25, color='cyan', edgecolor='k')
|
||||
plt.xlabel('Test (GRE)')
|
||||
plt.ylabel('Grades (GPA)')
|
||||
|
||||
|
||||
# Plotting the points
|
||||
plot_points(data)
|
||||
plt.show()
|
||||
|
||||
|
||||
# Separating the ranks
|
||||
data_rank1 = data[data["rank"] == 1]
|
||||
data_rank2 = data[data["rank"] == 2]
|
||||
data_rank3 = data[data["rank"] == 3]
|
||||
data_rank4 = data[data["rank"] == 4]
|
||||
|
||||
# Plotting the graphs
|
||||
plot_points(data_rank1)
|
||||
plt.title("Rank 1")
|
||||
plt.show()
|
||||
plot_points(data_rank2)
|
||||
plt.title("Rank 2")
|
||||
plt.show()
|
||||
plot_points(data_rank3)
|
||||
plt.title("Rank 3")
|
||||
plt.show()
|
||||
plot_points(data_rank4)
|
||||
plt.title("Rank 4")
|
||||
plt.show()
|
||||
|
||||
|
||||
# TODO: Make dummy variables for rank
|
||||
one_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')],
|
||||
axis=1)
|
||||
|
||||
# TODO: Drop the previous rank column
|
||||
one_hot_data = one_hot_data.drop('rank', axis=1)
|
||||
|
||||
# Print the first 10 rows of our data
|
||||
one_hot_data[:10]
|
||||
|
||||
|
||||
# Making a copy of our data
|
||||
processed_data = one_hot_data[:]
|
||||
|
||||
# TODO: Scale the columns
|
||||
processed_data['gre'] = processed_data['gre'] / 800
|
||||
processed_data['gpa'] = processed_data['gpa'] / 4.0
|
||||
processed_data[:10]
|
||||
|
||||
# Printing the first 10 rows of our procesed data
|
||||
processed_data[:10]
|
||||
|
||||
|
||||
sample = np.random.choice(processed_data.index, size=int(
|
||||
len(processed_data) * 0.9), replace=False)
|
||||
train_data, test_data = processed_data.iloc[sample], processed_data.drop(
|
||||
sample)
|
||||
|
||||
print("Number of training samples is", len(train_data))
|
||||
print("Number of testing samples is", len(test_data))
|
||||
print(train_data[:10])
|
||||
print(test_data[:10])
|
||||
|
||||
|
||||
features = train_data.drop('admit', axis=1)
|
||||
targets = train_data['admit']
|
||||
features_test = test_data.drop('admit', axis=1)
|
||||
targets_test = test_data['admit']
|
||||
|
||||
print(features[:10])
|
||||
print(targets[:10])
|
||||
|
||||
|
||||
# Activation (sigmoid) function
|
||||
def sigmoid(x):
|
||||
return 1 / (1 + np.exp(-x))
|
||||
|
||||
|
||||
def sigmoid_prime(x):
|
||||
return sigmoid(x) * (1 - sigmoid(x))
|
||||
|
||||
|
||||
def error_formula(y, output):
|
||||
return - y * np.log(output) - (1 - y) * np.log(1 - output)
|
||||
|
||||
|
||||
# TODO: Write the error term formula
|
||||
def error_term_formula(x, y, output):
|
||||
return (y - output) * sigmoid_prime(x)
|
||||
|
||||
|
||||
# Neural Network hyperparameters
|
||||
epochs = 1000
|
||||
learnrate = 0.5
|
||||
|
||||
# Training function
|
||||
|
||||
|
||||
def train_nn(features, targets, epochs, learnrate):
|
||||
|
||||
# Use to same seed to make debugging easier
|
||||
np.random.seed(42)
|
||||
|
||||
n_records, n_features = features.shape
|
||||
last_loss = None
|
||||
|
||||
# Initialize weights
|
||||
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
|
||||
|
||||
for e in range(epochs):
|
||||
del_w = np.zeros(weights.shape)
|
||||
for x, y in zip(features.values, targets):
|
||||
# Loop through all records, x is the input, y is the target
|
||||
|
||||
# Activation of the output unit
|
||||
# Notice we multiply the inputs and the weights here
|
||||
# rather than storing h as a separate variable
|
||||
output = sigmoid(np.dot(x, weights))
|
||||
|
||||
# The error, the target minus the network output
|
||||
error = error_formula(y, output)
|
||||
|
||||
# The error term
|
||||
error_term = error_term_formula(x, y, output)
|
||||
|
||||
# The gradient descent step, the error times the gradient times the inputs
|
||||
del_w += error_term * x
|
||||
|
||||
# Update the weights here. The learning rate times the
|
||||
# change in weights, divided by the number of records to average
|
||||
weights += learnrate * del_w / n_records
|
||||
|
||||
# Printing out the mean square error on the training set
|
||||
if e % (epochs / 10) == 0:
|
||||
out = sigmoid(np.dot(features, weights))
|
||||
loss = np.mean((out - targets) ** 2)
|
||||
print("Epoch:", e)
|
||||
if last_loss and last_loss < loss:
|
||||
print("Train loss: ", loss, " WARNING - Loss Increasing")
|
||||
else:
|
||||
print("Train loss: ", loss)
|
||||
last_loss = loss
|
||||
print("=========")
|
||||
print("Finished training!")
|
||||
return weights
|
||||
|
||||
|
||||
weights = train_nn(features, targets, epochs, learnrate)
|
||||
|
||||
|
||||
# Calculate accuracy on test data
|
||||
test_out = sigmoid(np.dot(features_test, weights))
|
||||
predictions = test_out > 0.5
|
||||
accuracy = np.mean(predictions == targets_test)
|
||||
print("Prediction accuracy: {:.3f}".format(accuracy))
|
||||
@@ -0,0 +1,401 @@
|
||||
admit,gre,gpa,rank
|
||||
0,380,3.61,3
|
||||
1,660,3.67,3
|
||||
1,800,4,1
|
||||
1,640,3.19,4
|
||||
0,520,2.93,4
|
||||
1,760,3,2
|
||||
1,560,2.98,1
|
||||
0,400,3.08,2
|
||||
1,540,3.39,3
|
||||
0,700,3.92,2
|
||||
0,800,4,4
|
||||
0,440,3.22,1
|
||||
1,760,4,1
|
||||
0,700,3.08,2
|
||||
1,700,4,1
|
||||
0,480,3.44,3
|
||||
0,780,3.87,4
|
||||
0,360,2.56,3
|
||||
0,800,3.75,2
|
||||
1,540,3.81,1
|
||||
0,500,3.17,3
|
||||
1,660,3.63,2
|
||||
0,600,2.82,4
|
||||
0,680,3.19,4
|
||||
1,760,3.35,2
|
||||
1,800,3.66,1
|
||||
1,620,3.61,1
|
||||
1,520,3.74,4
|
||||
1,780,3.22,2
|
||||
0,520,3.29,1
|
||||
0,540,3.78,4
|
||||
0,760,3.35,3
|
||||
0,600,3.4,3
|
||||
1,800,4,3
|
||||
0,360,3.14,1
|
||||
0,400,3.05,2
|
||||
0,580,3.25,1
|
||||
0,520,2.9,3
|
||||
1,500,3.13,2
|
||||
1,520,2.68,3
|
||||
0,560,2.42,2
|
||||
1,580,3.32,2
|
||||
1,600,3.15,2
|
||||
0,500,3.31,3
|
||||
0,700,2.94,2
|
||||
1,460,3.45,3
|
||||
1,580,3.46,2
|
||||
0,500,2.97,4
|
||||
0,440,2.48,4
|
||||
0,400,3.35,3
|
||||
0,640,3.86,3
|
||||
0,440,3.13,4
|
||||
0,740,3.37,4
|
||||
1,680,3.27,2
|
||||
0,660,3.34,3
|
||||
1,740,4,3
|
||||
0,560,3.19,3
|
||||
0,380,2.94,3
|
||||
0,400,3.65,2
|
||||
0,600,2.82,4
|
||||
1,620,3.18,2
|
||||
0,560,3.32,4
|
||||
0,640,3.67,3
|
||||
1,680,3.85,3
|
||||
0,580,4,3
|
||||
0,600,3.59,2
|
||||
0,740,3.62,4
|
||||
0,620,3.3,1
|
||||
0,580,3.69,1
|
||||
0,800,3.73,1
|
||||
0,640,4,3
|
||||
0,300,2.92,4
|
||||
0,480,3.39,4
|
||||
0,580,4,2
|
||||
0,720,3.45,4
|
||||
0,720,4,3
|
||||
0,560,3.36,3
|
||||
1,800,4,3
|
||||
0,540,3.12,1
|
||||
1,620,4,1
|
||||
0,700,2.9,4
|
||||
0,620,3.07,2
|
||||
0,500,2.71,2
|
||||
0,380,2.91,4
|
||||
1,500,3.6,3
|
||||
0,520,2.98,2
|
||||
0,600,3.32,2
|
||||
0,600,3.48,2
|
||||
0,700,3.28,1
|
||||
1,660,4,2
|
||||
0,700,3.83,2
|
||||
1,720,3.64,1
|
||||
0,800,3.9,2
|
||||
0,580,2.93,2
|
||||
1,660,3.44,2
|
||||
0,660,3.33,2
|
||||
0,640,3.52,4
|
||||
0,480,3.57,2
|
||||
0,700,2.88,2
|
||||
0,400,3.31,3
|
||||
0,340,3.15,3
|
||||
0,580,3.57,3
|
||||
0,380,3.33,4
|
||||
0,540,3.94,3
|
||||
1,660,3.95,2
|
||||
1,740,2.97,2
|
||||
1,700,3.56,1
|
||||
0,480,3.13,2
|
||||
0,400,2.93,3
|
||||
0,480,3.45,2
|
||||
0,680,3.08,4
|
||||
0,420,3.41,4
|
||||
0,360,3,3
|
||||
0,600,3.22,1
|
||||
0,720,3.84,3
|
||||
0,620,3.99,3
|
||||
1,440,3.45,2
|
||||
0,700,3.72,2
|
||||
1,800,3.7,1
|
||||
0,340,2.92,3
|
||||
1,520,3.74,2
|
||||
1,480,2.67,2
|
||||
0,520,2.85,3
|
||||
0,500,2.98,3
|
||||
0,720,3.88,3
|
||||
0,540,3.38,4
|
||||
1,600,3.54,1
|
||||
0,740,3.74,4
|
||||
0,540,3.19,2
|
||||
0,460,3.15,4
|
||||
1,620,3.17,2
|
||||
0,640,2.79,2
|
||||
0,580,3.4,2
|
||||
0,500,3.08,3
|
||||
0,560,2.95,2
|
||||
0,500,3.57,3
|
||||
0,560,3.33,4
|
||||
0,700,4,3
|
||||
0,620,3.4,2
|
||||
1,600,3.58,1
|
||||
0,640,3.93,2
|
||||
1,700,3.52,4
|
||||
0,620,3.94,4
|
||||
0,580,3.4,3
|
||||
0,580,3.4,4
|
||||
0,380,3.43,3
|
||||
0,480,3.4,2
|
||||
0,560,2.71,3
|
||||
1,480,2.91,1
|
||||
0,740,3.31,1
|
||||
1,800,3.74,1
|
||||
0,400,3.38,2
|
||||
1,640,3.94,2
|
||||
0,580,3.46,3
|
||||
0,620,3.69,3
|
||||
1,580,2.86,4
|
||||
0,560,2.52,2
|
||||
1,480,3.58,1
|
||||
0,660,3.49,2
|
||||
0,700,3.82,3
|
||||
0,600,3.13,2
|
||||
0,640,3.5,2
|
||||
1,700,3.56,2
|
||||
0,520,2.73,2
|
||||
0,580,3.3,2
|
||||
0,700,4,1
|
||||
0,440,3.24,4
|
||||
0,720,3.77,3
|
||||
0,500,4,3
|
||||
0,600,3.62,3
|
||||
0,400,3.51,3
|
||||
0,540,2.81,3
|
||||
0,680,3.48,3
|
||||
1,800,3.43,2
|
||||
0,500,3.53,4
|
||||
1,620,3.37,2
|
||||
0,520,2.62,2
|
||||
1,620,3.23,3
|
||||
0,620,3.33,3
|
||||
0,300,3.01,3
|
||||
0,620,3.78,3
|
||||
0,500,3.88,4
|
||||
0,700,4,2
|
||||
1,540,3.84,2
|
||||
0,500,2.79,4
|
||||
0,800,3.6,2
|
||||
0,560,3.61,3
|
||||
0,580,2.88,2
|
||||
0,560,3.07,2
|
||||
0,500,3.35,2
|
||||
1,640,2.94,2
|
||||
0,800,3.54,3
|
||||
0,640,3.76,3
|
||||
0,380,3.59,4
|
||||
1,600,3.47,2
|
||||
0,560,3.59,2
|
||||
0,660,3.07,3
|
||||
1,400,3.23,4
|
||||
0,600,3.63,3
|
||||
0,580,3.77,4
|
||||
0,800,3.31,3
|
||||
1,580,3.2,2
|
||||
1,700,4,1
|
||||
0,420,3.92,4
|
||||
1,600,3.89,1
|
||||
1,780,3.8,3
|
||||
0,740,3.54,1
|
||||
1,640,3.63,1
|
||||
0,540,3.16,3
|
||||
0,580,3.5,2
|
||||
0,740,3.34,4
|
||||
0,580,3.02,2
|
||||
0,460,2.87,2
|
||||
0,640,3.38,3
|
||||
1,600,3.56,2
|
||||
1,660,2.91,3
|
||||
0,340,2.9,1
|
||||
1,460,3.64,1
|
||||
0,460,2.98,1
|
||||
1,560,3.59,2
|
||||
0,540,3.28,3
|
||||
0,680,3.99,3
|
||||
1,480,3.02,1
|
||||
0,800,3.47,3
|
||||
0,800,2.9,2
|
||||
1,720,3.5,3
|
||||
0,620,3.58,2
|
||||
0,540,3.02,4
|
||||
0,480,3.43,2
|
||||
1,720,3.42,2
|
||||
0,580,3.29,4
|
||||
0,600,3.28,3
|
||||
0,380,3.38,2
|
||||
0,420,2.67,3
|
||||
1,800,3.53,1
|
||||
0,620,3.05,2
|
||||
1,660,3.49,2
|
||||
0,480,4,2
|
||||
0,500,2.86,4
|
||||
0,700,3.45,3
|
||||
0,440,2.76,2
|
||||
1,520,3.81,1
|
||||
1,680,2.96,3
|
||||
0,620,3.22,2
|
||||
0,540,3.04,1
|
||||
0,800,3.91,3
|
||||
0,680,3.34,2
|
||||
0,440,3.17,2
|
||||
0,680,3.64,3
|
||||
0,640,3.73,3
|
||||
0,660,3.31,4
|
||||
0,620,3.21,4
|
||||
1,520,4,2
|
||||
1,540,3.55,4
|
||||
1,740,3.52,4
|
||||
0,640,3.35,3
|
||||
1,520,3.3,2
|
||||
1,620,3.95,3
|
||||
0,520,3.51,2
|
||||
0,640,3.81,2
|
||||
0,680,3.11,2
|
||||
0,440,3.15,2
|
||||
1,520,3.19,3
|
||||
1,620,3.95,3
|
||||
1,520,3.9,3
|
||||
0,380,3.34,3
|
||||
0,560,3.24,4
|
||||
1,600,3.64,3
|
||||
1,680,3.46,2
|
||||
0,500,2.81,3
|
||||
1,640,3.95,2
|
||||
0,540,3.33,3
|
||||
1,680,3.67,2
|
||||
0,660,3.32,1
|
||||
0,520,3.12,2
|
||||
1,600,2.98,2
|
||||
0,460,3.77,3
|
||||
1,580,3.58,1
|
||||
1,680,3,4
|
||||
1,660,3.14,2
|
||||
0,660,3.94,2
|
||||
0,360,3.27,3
|
||||
0,660,3.45,4
|
||||
0,520,3.1,4
|
||||
1,440,3.39,2
|
||||
0,600,3.31,4
|
||||
1,800,3.22,1
|
||||
1,660,3.7,4
|
||||
0,800,3.15,4
|
||||
0,420,2.26,4
|
||||
1,620,3.45,2
|
||||
0,800,2.78,2
|
||||
0,680,3.7,2
|
||||
0,800,3.97,1
|
||||
0,480,2.55,1
|
||||
0,520,3.25,3
|
||||
0,560,3.16,1
|
||||
0,460,3.07,2
|
||||
0,540,3.5,2
|
||||
0,720,3.4,3
|
||||
0,640,3.3,2
|
||||
1,660,3.6,3
|
||||
1,400,3.15,2
|
||||
1,680,3.98,2
|
||||
0,220,2.83,3
|
||||
0,580,3.46,4
|
||||
1,540,3.17,1
|
||||
0,580,3.51,2
|
||||
0,540,3.13,2
|
||||
0,440,2.98,3
|
||||
0,560,4,3
|
||||
0,660,3.67,2
|
||||
0,660,3.77,3
|
||||
1,520,3.65,4
|
||||
0,540,3.46,4
|
||||
1,300,2.84,2
|
||||
1,340,3,2
|
||||
1,780,3.63,4
|
||||
1,480,3.71,4
|
||||
0,540,3.28,1
|
||||
0,460,3.14,3
|
||||
0,460,3.58,2
|
||||
0,500,3.01,4
|
||||
0,420,2.69,2
|
||||
0,520,2.7,3
|
||||
0,680,3.9,1
|
||||
0,680,3.31,2
|
||||
1,560,3.48,2
|
||||
0,580,3.34,2
|
||||
0,500,2.93,4
|
||||
0,740,4,3
|
||||
0,660,3.59,3
|
||||
0,420,2.96,1
|
||||
0,560,3.43,3
|
||||
1,460,3.64,3
|
||||
1,620,3.71,1
|
||||
0,520,3.15,3
|
||||
0,620,3.09,4
|
||||
0,540,3.2,1
|
||||
1,660,3.47,3
|
||||
0,500,3.23,4
|
||||
1,560,2.65,3
|
||||
0,500,3.95,4
|
||||
0,580,3.06,2
|
||||
0,520,3.35,3
|
||||
0,500,3.03,3
|
||||
0,600,3.35,2
|
||||
0,580,3.8,2
|
||||
0,400,3.36,2
|
||||
0,620,2.85,2
|
||||
1,780,4,2
|
||||
0,620,3.43,3
|
||||
1,580,3.12,3
|
||||
0,700,3.52,2
|
||||
1,540,3.78,2
|
||||
1,760,2.81,1
|
||||
0,700,3.27,2
|
||||
0,720,3.31,1
|
||||
1,560,3.69,3
|
||||
0,720,3.94,3
|
||||
1,520,4,1
|
||||
1,540,3.49,1
|
||||
0,680,3.14,2
|
||||
0,460,3.44,2
|
||||
1,560,3.36,1
|
||||
0,480,2.78,3
|
||||
0,460,2.93,3
|
||||
0,620,3.63,3
|
||||
0,580,4,1
|
||||
0,800,3.89,2
|
||||
1,540,3.77,2
|
||||
1,680,3.76,3
|
||||
1,680,2.42,1
|
||||
1,620,3.37,1
|
||||
0,560,3.78,2
|
||||
0,560,3.49,4
|
||||
0,620,3.63,2
|
||||
1,800,4,2
|
||||
0,640,3.12,3
|
||||
0,540,2.7,2
|
||||
0,700,3.65,2
|
||||
1,540,3.49,2
|
||||
0,540,3.51,2
|
||||
0,660,4,1
|
||||
1,480,2.62,2
|
||||
0,420,3.02,1
|
||||
1,740,3.86,2
|
||||
0,580,3.36,2
|
||||
0,640,3.17,2
|
||||
0,640,3.51,2
|
||||
1,800,3.05,2
|
||||
1,660,3.88,2
|
||||
1,600,3.38,3
|
||||
1,620,3.75,2
|
||||
1,460,3.99,3
|
||||
0,620,4,2
|
||||
0,560,3.04,3
|
||||
0,460,2.63,2
|
||||
0,700,3.65,2
|
||||
0,600,3.89,3
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user