updated markov for general case (incomplete)
This commit is contained in:
@@ -29,18 +29,26 @@ transitionMatrix = np.array([[0.6, 0.1, 0.3],
|
||||
|
||||
# Starting state
|
||||
startingState = 'w'
|
||||
initial_dist = np.array([0.3, 0.3, 0.4])
|
||||
initial_dist = np.array([0, 1, 0])
|
||||
# initial_dist = None
|
||||
|
||||
# Steps to run
|
||||
stepTime = 2
|
||||
stepTime = 100
|
||||
# End state you want to find probabilites of
|
||||
endState = 'W'
|
||||
|
||||
# Get P_steps
|
||||
p_steps = False
|
||||
""" Find the transition matrix for the given number of steps
|
||||
This finds the probabilities of going from step i to step j
|
||||
in N number of steps, where N is your step size
|
||||
E.g 10 steps would give P_10 which is prob of going from step i
|
||||
to step j in exactly 10 steps """
|
||||
transition_matrix_step = True
|
||||
|
||||
# Get Stationary Dist
|
||||
stat_dist = False
|
||||
""" Get Stationary distribution of the Markov Chain
|
||||
This tells you what % of time you spend in each state if you
|
||||
simulated the Markov Chain for a high number of steps
|
||||
THIS NEEDS THE INTIIAL DISTRIBUTION SETTING """
|
||||
stat_dist = True
|
||||
|
||||
|
||||
# A class that implements the Markov chain to forecast the state/mood:
|
||||
@@ -73,20 +81,22 @@ class markov(object):
|
||||
return np.random.seed(num)
|
||||
|
||||
@staticmethod
|
||||
def p_steps(transitionMatrix, initial_dist, steps):
|
||||
def transition_matrix_step(transitionMatrix, steps):
|
||||
for _ in itertools.repeat(None, steps):
|
||||
initial_dist = transitionMatrix.T.dot(initial_dist)
|
||||
return initial_dist
|
||||
step_mat = np.matmul(transitionMatrix, transitionMatrix)
|
||||
return step_mat
|
||||
|
||||
@staticmethod
|
||||
def stationary_dist(transitionMatrix, initial_dist, steps):
|
||||
for _ in itertools.repeat(None, steps):
|
||||
initial_dist = transitionMatrix.T.dot(initial_dist)
|
||||
return initial_dist
|
||||
w, v = np.linalg.eig(transitionMatrix.T)
|
||||
j_stationary = np.argmin(abs(w - 1.0))
|
||||
p_stationary = v[:, j_stationary].real
|
||||
p_stationary /= p_stationary.sum()
|
||||
return p_stationary
|
||||
|
||||
@functools.lru_cache(maxsize=128)
|
||||
def forecast(self):
|
||||
print(f'Start state: {self.currentState}')
|
||||
print(f'Start state: {self.currentState}\n')
|
||||
# Shall store the sequence of states taken
|
||||
self.stateList = [self.currentState]
|
||||
i = 0
|
||||
@@ -142,13 +152,20 @@ class markov(object):
|
||||
self.currentState = "w"
|
||||
self.stateList.append("w")
|
||||
i += 1
|
||||
print(f'Possible states: {self.stateList}')
|
||||
print(f'Path Markov Chain took in this iteration: {self.stateList}')
|
||||
print(f'End state after {self.steps} steps: {self.currentState}')
|
||||
print(f'Probability of all the possible sequence of states:'
|
||||
f' {self.prob}\n')
|
||||
print(f'Probability of this specific path:'
|
||||
f' {self.prob:.4f} or {self.prob:.2%}\n')
|
||||
return self.stateList
|
||||
|
||||
|
||||
def checker(item, name):
|
||||
try:
|
||||
item is not None
|
||||
except Exception:
|
||||
raise Exception(f'{name} is not set - set it and try again.')
|
||||
|
||||
|
||||
def main(*args, **kwargs):
|
||||
global startingState
|
||||
try:
|
||||
@@ -190,18 +207,21 @@ def main(*args, **kwargs):
|
||||
stepTime).forecast())
|
||||
for list in list_state:
|
||||
if(list[-1] == f'{endState!s}'):
|
||||
print(True, list)
|
||||
print(f'SUCCESS - path ended in the requested state {endState!s}'
|
||||
f':', list)
|
||||
count += 1
|
||||
else:
|
||||
print(False, list)
|
||||
print(f'FAILURE - path did not end in the requested state'
|
||||
f' {endState!s}:', list)
|
||||
if setSim is False:
|
||||
simNum = 1
|
||||
print(f'\nThe probability of starting in {startingState} and finishing'
|
||||
f' in {endState} after {stepTime} steps is {(count / simNum):.2%}')
|
||||
if p_steps:
|
||||
print(f'P_{stepTime} is '
|
||||
f'{markov.p_steps(transitionMatrix, initial_dist, stepTime)}')
|
||||
f' in {endState} after {stepTime} steps is {(count / simNum):.2%}\n')
|
||||
if transition_matrix_step:
|
||||
print(f'P_{stepTime} is: \n'
|
||||
f'{markov.transition_matrix_step(transitionMatrix, stepTime)}\n')
|
||||
if stat_dist:
|
||||
checker(initial_dist, 'initial distribution')
|
||||
print(f'Stat dist is {markov.stationary_dist(transitionMatrix,initial_dist, stepTime)}')
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user