Files
python-VM/temp/numpy_test.py

173 lines
6.2 KiB
Python

import numpy as np
import itertools
import functools
""" Define our data """
# The Statespace
states = np.array(['Bonus', 'Ten', 'Fifty', 'Hundred', 'Five-Hundred'])
# Possible sequences of events
transitionName = np.array([['BB', 'BT', 'SI'],
['RS', 'RR', 'RI'],
['IS', 'IR', 'II']])
# Probabilities Matrix (transition matrix)
transitionMatrix = np.array([[0.2, 0.6, 0.2],
[0.1, 0.6, 0.3],
[0.2, 0.7, 0.1]])
# Starting state
startingState = 'Sleep'
# Steps to run
stepTime = 1
# End state you want to find probabilites of
endState = 'Run'
""" Set our parameters """
# Should we seed the results?
setSeed = False
seedNum = 27
""" Simulation parameters """
# Should we simulate more than once?
setSim = False
simNum = 100000
# A class that implements the Markov chain to forecast the state/mood:
class markov(object):
"""simulates a markov chain given its states, current state and
transition matrix.
Parameters:
states: list containing all the possible states
transitionName: a matrix (nested list in a list) containing a list
of the all possible state directions
transitionMatrix: a matrix (nested list in a list) containing all
the probabilites of moving to each state
currentState: a string indicating the starting state
days: an integer determining how many days (or times) to simulate"""
def __init__(self, states: np.array, transitionName: np.array,
transitionMatrix: np.array, currentState: str,
days: int):
super(markov, self).__init__()
self.states = states
self.list = list
self.transitionName = transitionName
self.transitionMatrix = transitionMatrix
self.currentState = currentState
self.days = days
@staticmethod
def setSeed(num: int):
return np.random.seed(num)
@functools.lru_cache(maxsize=128)
def forecast(self):
print(f'Start state: {self.currentState}')
# Shall store the sequence of states taken
self.stateList = [self.currentState]
i = 0
# To calculate the probability of the stateList
self.prob = 1
while i != self.days:
if self.currentState == 'Sleep':
self.change = np.random.choice(self.transitionName[0],
replace=True,
p=transitionMatrix[0])
if self.change == 'SS':
self.prob = self.prob * 0.2
self.stateList.append('Sleep')
pass
elif self.change == 'SR':
self.prob = self.prob * 0.6
self.currentState = 'Run'
self.stateList.append('Run')
else:
self.prob = self.prob * 0.2
self.currentState = "Icecream"
self.stateList.append("Icecream")
elif self.currentState == "Run":
self.change = np.random.choice(self.transitionName[1],
replace=True,
p=transitionMatrix[1])
if self.change == "RR":
self.prob = self.prob * 0.6
self.stateList.append("Run")
pass
elif self.change == "RS":
self.prob = self.prob * 0.1
self.currentState = "Sleep"
self.stateList.append("Sleep")
else:
self.prob = self.prob * 0.3
self.currentState = "Icecream"
self.stateList.append("Icecream")
elif self.currentState == "Icecream":
self.change = np.random.choice(self.transitionName[2],
replace=True,
p=transitionMatrix[2])
if self.change == "II":
self.prob = self.prob * 0.1
self.stateList.append("Icecream")
pass
elif self.change == "IS":
self.prob = self.prob * 0.2
self.currentState = "Sleep"
self.stateList.append("Sleep")
else:
self.prob = self.prob * 0.7
self.currentState = "Run"
self.stateList.append("Run")
i += 1
print(f'Possible states: {self.stateList}')
print(f'End state after {self.days} steps: {self.currentState}')
print(f'Probability of all the possible sequence of states:'
f' {self.prob}')
return self.stateList
def main(*args, **kwargs):
try:
simNum = kwargs['simNum']
except KeyError:
pass
sumTotal = 0
# Check validity of transitionMatrix
for i in range(len(transitionMatrix)):
sumTotal += sum(transitionMatrix[i])
if i != len(states) and i == len(transitionMatrix):
raise ValueError('Probabilities should add to 1')
# Set the seed so we can repeat with the same results
if setSeed:
markov.setSeed(seedNum)
# Save our simulations:
list_state = []
count = 0
# Simulate Multiple Times
if setSim:
for _ in itertools.repeat(None, simNum + 1):
markovChain = markov(states, transitionName,
transitionMatrix, startingState,
stepTime)
list_state.append(markovChain.forecast())
else:
for _ in range(1, 2):
list_state.append(markov(states, transitionName,
transitionMatrix, startingState,
stepTime).forecast())
for list in list_state:
if(list[-1] == f'{endState!s}'):
count += 1
if setSim is False:
simNum = 1
print(f'\nThe probability of starting in {startingState} and finishing'
f' in {endState} after {stepTime} steps is {(count / simNum):.2%}')
if __name__ == '__main__':
main()