updating markov documentation

This commit is contained in:
2019-12-05 04:19:00 +00:00
parent 6c84083c69
commit 9f5fb31fbc

View File

@@ -19,12 +19,20 @@ states = np.array(['L', 'w', 'W'])
# Possible sequences of events
transitionName = np.array(
[['LL', 'Lw', 'LW'], ['wL', 'ww', 'wW'], ['WL', 'Ww', 'WW']]
[
['LL', 'Lw', 'LW'],
['wL', 'ww', 'wW'],
['WL', 'Ww', 'WW'],
]
)
# Probabilities Matrix (transition matrix)
transitionMatrix = np.array(
[[0.6, 0.1, 0.3], [0.1, 0.7, 0.2], [0.2, 0.2, 0.6]]
[
[0.6, 0.1, 0.3],
[0.1, 0.7, 0.2],
[0.2, 0.2, 0.6],
]
)
# Starting state - Given as a list of probabilities of starting
@@ -57,17 +65,41 @@ stat_dist = True
# A class that implements the Markov chain to forecast the state/mood:
class markov(object):
"""simulates a markov chain given its states, current state and
"""Simulates a markov chain given its states, current state and
transition matrix.
Parameters:
states: 1-d array containing all the possible states
transitionName: 2-d array containing a list
of the all possible state directions
transitionMatrix: 2-d array containing all
the probabilites of moving to each state
currentState: a string indicating the starting state
steps: an integer determining how many steps (or times) to simulate"""
Attributes
----------
change : str
Random choice from transitionName
currentState : str
Current state of the Markov Chain
prob : float
Step probability
stateList : list
A list that tracks how `currentState` evolves over time
states : np.array
An array containing the possible states of the chain
steps : int
See below
transitionMatrix : np.array
Transition matrix
transitionName : np.array
Step representation of the probailities of the transisition matrix
Parameters
----------
states : np.array
See above
transitionName : np.array
See above
transitionMatrix : np.array
See above
currentState : str
See above
steps : int
How many steps to take
"""
def __init__(
self,
@@ -77,9 +109,9 @@ class markov(object):
currentState: str,
steps: int,
):
super(markov, self).__init__()
self.states = states
self.list = list
self.transitionName = transitionName
self.transitionMatrix = transitionMatrix
self.currentState = currentState
@@ -108,6 +140,13 @@ class markov(object):
@functools.lru_cache(maxsize=128)
def forecast(self):
"""Simulate the Markov chain
Returns
-------
np.array
Description
"""
print(f'Start state: {self.currentState}\n')
# Shall store the sequence of states taken
self.stateList = [self.currentState]
@@ -115,7 +154,6 @@ class markov(object):
# To calculate the probability of the stateList
self.prob = 1
while i != self.steps:
import pdb; pdb.set_trace() # breakpoint 24e62119 //
if self.currentState == self.states[0]:
self.change = np.random.choice(
self.transitionName[0], replace=True, p=transitionMatrix[0]
@@ -265,7 +303,9 @@ def main(*args, **kwargs):
if stat_dist:
checker(initial_dist, 'initial distribution')
print(
f'Stat dist is {markov.stationary_dist(transitionMatrix,initial_dist, stepTime)}'
f"""Stat dist is {
markov.stationary_dist(transitionMatrix,initial_dist, stepTime)
}"""
)