updated Markov scripts for Jack

This commit is contained in:
2019-07-12 01:56:47 +01:00
parent 3e1cc20559
commit 0b1cb6ea2f

View File

@@ -2,6 +2,17 @@ import numpy as np
import itertools import itertools
import functools import functools
""" Set our parameters """
# Should we seed the results?
setSeed = False
seedNum = 27
""" Simulation parameters """
# Should we simulate more than once?
setSim = True
simNum = 1
""" Define our data """ """ Define our data """
# The Statespace # The Statespace
states = np.array(['L', 'w', 'W']) states = np.array(['L', 'w', 'W'])
@@ -12,28 +23,23 @@ transitionName = np.array([['LL', 'Lw', 'LW'],
['WL', 'Ww', 'WW']]) ['WL', 'Ww', 'WW']])
# Probabilities Matrix (transition matrix) # Probabilities Matrix (transition matrix)
transitionMatrix = np.array([[0.8, 0.15, 0.05], # Fill in
[0.8, 0.15, 0.05], transitionMatrix = None
[0.8, 0.15, 0.05]])
# Starting state # Starting state
startingState = 'L' startingState = 'w'
initial_dist = None
# Steps to run # Steps to run
stepTime = 2 stepTime = 2
# End state you want to find probabilites of # End state you want to find probabilites of
endState = 'L' endState = 'W'
# Get P_steps
p_steps = False
""" Set our parameters """ # Get Stationary Dist
# Should we seed the results? stat_dist = False
setSeed = False
seedNum = 27
""" Simulation parameters """
# Should we simulate more than once?
setSim = True
simNum = 10
# A class that implements the Markov chain to forecast the state/mood: # A class that implements the Markov chain to forecast the state/mood:
@@ -65,6 +71,18 @@ class markov(object):
def setSeed(num: int): def setSeed(num: int):
return np.random.seed(num) return np.random.seed(num)
@staticmethod
def p_steps(transitionMatrix, initial_dist, steps):
for _ in itertools.repeat(None, steps):
initial_dist = transitionMatrix.T.dot(initial_dist)
return initial_dist
@staticmethod
def stationary_dist(transitionMatrix, initial_dist, steps):
for _ in itertools.repeat(None, steps):
initial_dist = transitionMatrix.T.dot(initial_dist)
return initial_dist
@functools.lru_cache(maxsize=128) @functools.lru_cache(maxsize=128)
def forecast(self): def forecast(self):
print(f'Start state: {self.currentState}') print(f'Start state: {self.currentState}')
@@ -91,46 +109,21 @@ class markov(object):
self.currentState = "W" self.currentState = "W"
self.stateList.append("W") self.stateList.append("W")
elif self.currentState == "w": elif self.currentState == "w":
self.change = np.random.choice(self.transitionName[1], # Fill in
replace=True, pass
p=transitionMatrix[1])
if self.change == "ww":
self.prob = self.prob * 0.15
self.stateList.append("w")
pass
elif self.change == "wL":
self.prob = self.prob * 0.8
self.currentState = "L"
self.stateList.append("L")
else:
self.prob = self.prob * 0.05
self.currentState = "W"
self.stateList.append("W")
elif self.currentState == "W": elif self.currentState == "W":
self.change = np.random.choice(self.transitionName[2], # Fill in
replace=True, pass
p=transitionMatrix[2])
if self.change == "WW":
self.prob = self.prob * 0.05
self.stateList.append("W")
pass
elif self.change == "WL":
self.prob = self.prob * 0.8
self.currentState = "L"
self.stateList.append("L")
else:
self.prob = self.prob * 0.15
self.currentState = "w"
self.stateList.append("w")
i += 1 i += 1
print(f'Possible states: {self.stateList}') print(f'Possible states: {self.stateList}')
print(f'End state after {self.steps} steps: {self.currentState}') print(f'End state after {self.steps} steps: {self.currentState}')
print(f'Probability of all the possible sequence of states:' print(f'Probability of all the possible sequence of states:'
f' {self.prob}') f' {self.prob}\n')
return self.stateList return self.stateList
def main(*args, **kwargs): def main(*args, **kwargs):
global startingState
try: try:
simNum = kwargs['simNum'] simNum = kwargs['simNum']
except KeyError: except KeyError:
@@ -149,11 +142,20 @@ def main(*args, **kwargs):
count = 0 count = 0
# Simulate Multiple Times # Simulate Multiple Times
if setSim: if setSim:
for _ in itertools.repeat(None, simNum): if initial_dist is not None:
markovChain = markov(states, transitionName, startingState = np.random.choice(states, p=initial_dist)
transitionMatrix, startingState, for _ in itertools.repeat(None, simNum):
stepTime) markovChain = markov(states, transitionName,
list_state.append(markovChain.forecast()) transitionMatrix, startingState,
stepTime)
list_state.append(markovChain.forecast())
startingState = np.random.choice(states, p=initial_dist)
else:
for _ in itertools.repeat(None, simNum):
markovChain = markov(states, transitionName,
transitionMatrix, startingState,
stepTime)
list_state.append(markovChain.forecast())
else: else:
for _ in range(1, 2): for _ in range(1, 2):
list_state.append(markov(states, transitionName, list_state.append(markov(states, transitionName,
@@ -169,6 +171,11 @@ def main(*args, **kwargs):
simNum = 1 simNum = 1
print(f'\nThe probability of starting in {startingState} and finishing' print(f'\nThe probability of starting in {startingState} and finishing'
f' in {endState} after {stepTime} steps is {(count / simNum):.2%}') f' in {endState} after {stepTime} steps is {(count / simNum):.2%}')
if p_steps:
print(f'P_{stepTime} is '
f'{markov.p_steps(transitionMatrix, initial_dist, stepTime)}')
if stat_dist:
print(f'Stat dist is {markov.stationary_dist(transitionMatrix,initial_dist, stepTime)}')
if __name__ == '__main__': if __name__ == '__main__':