1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
class Solution:
def runMarkovChain(self, start: str, num_steps: int, transitions: List[Tuple[str, str, float]]) -> Dict[str, int]:
# Build transition map
transition_map: Dict[str, List[Tuple[str, float]]] = {}
for from_state, to_state, prob in transitions:
if from_state not in transition_map:
transition_map[from_state] = []
transition_map[from_state].append((to_state, prob))
# Initialize state visit counts
visit_counts: Dict[str, int] = {}
for from_state, to_state, _ in transitions:
visit_counts[from_state] = 0
visit_counts[to_state] = 0
# Simulate the Markov chain
current_state = start
for _ in range(num_steps):
visit_counts[current_state] += 1
rand = random.random()
cumulative_probability = 0.0
for to_state, prob in transition_map[current_state]:
cumulative_probability += prob
if rand < cumulative_probability:
current_state = to_state
break
return visit_counts
# Example usage:
sol = Solution()
transitions = [
('a', 'a', 0.9),
('a', 'b', 0.075),
('a', 'c', 0.025),
('b', 'a', 0.15),
('b', 'b', 0.8),
('b', 'c', 0.05),
('c', 'a', 0.25),
('c', 'b', 0.25),
('c', 'c', 0.5)
]
result = sol.runMarkovChain('a', 5000, transitions)
print(result)
|