completed part 1 of deep learning
This commit is contained in:
@@ -0,0 +1,10 @@
|
||||
import numpy as np
|
||||
|
||||
# Write a function that takes as input two lists Y, P,
|
||||
# and returns the float corresponding to their cross-entropy.
|
||||
|
||||
|
||||
def cross_entropy(Y, P):
|
||||
Y = np.float_(Y)
|
||||
P = np.float_(P)
|
||||
return -np.sum(Y * np.log(P) + (1 - Y) * np.log(1 - P))
|
||||
@@ -0,0 +1,18 @@
|
||||
import numpy as np
|
||||
|
||||
# Write a function that takes as input a list of numbers, and returns
|
||||
# the list of values given by the softmax function.
|
||||
|
||||
|
||||
def softmax(L):
|
||||
expL = np.exp(L)
|
||||
sumExpL = sum(expL)
|
||||
result = []
|
||||
for i in expL:
|
||||
result.append(i * 1.0 / sumExpL)
|
||||
return result
|
||||
|
||||
# Note: The function np.divide can also be used here, as follows:
|
||||
# def softmax(L):
|
||||
# expL = np.exp(L)
|
||||
# return np.divide (expL, expL.sum())
|
||||
Reference in New Issue
Block a user