使用python实现HMM
一直想用隐马可夫模型做图像识别,但是python的scikit-learn组件包的hmm module已经不再支持了,需要安装hmmlearn的组件,不过hmmlearn的多项式hmm每次出来的结果都不一样,= =||,难道是我用错了??后来又只能去参考网上C语言的组件,模仿着把向前向后算法“复制”到python里了,废了好大功夫,总算结果一样了o(╯□╰)o。。把代码贴出来把,省的自己不小心啥时候删掉。。。
1 #-*-coding:UTF-8-*-
2 '''
3 Created on 2014年9月25日
4 @author: Ayumi Phoenix
5 '''
6 import numpy as np
7
8 class HMM:
9 def __init__(self, Ann, Bnm, pi1n):
10 self.A = np.array(Ann)
11 self.B = np.array(Bnm)
12 self.pi = np.array(pi1n)
13 self.N = self.A.shape
14 self.M = self.B.shape
15
16 def printhmm(self):
17 print "=================================================="
18 print "HMM content: N =",self.N,",M =",self.M
19 for i in range(self.N):
20 if i==0:
21 print "hmm.A ",self.A," hmm.B ",self.B
22 else:
23 print " ",self.A," ",self.B
24 print "hmm.pi",self.pi
25 print "=================================================="
26
27 # 函数名称:Forward *功能:前向算法估计参数 *参数:phmm:指向HMM的指针
28 # T:观察值序列的长度 O:观察值序列
29 # alpha:运算中用到的临时数组 pprob:返回值,所要求的概率
30 def Forward(self,T,O,alpha,pprob):
31 # 1. Initialization 初始化
32 for i in range(self.N):
33 alpha = self.pi*self.B]
34
35 # 2. Induction 递归
36 for t in range(T-1):
37 for j in range(self.N):
38 sum = 0.0
39 for i in range(self.N):
40 sum += alpha*self.A
41 alpha =sum*self.B]
42 # 3. Termination 终止
43 sum = 0.0
44 for i in range(self.N):
45 sum += alpha
46 pprob *= sum
47
48 # 带修正的前向算法
49 def ForwardWithScale(self,T,O,alpha,scale,pprob):
50 scale = 0.0
51 # 1. Initialization
52 for i in range(self.N):
53 alpha = self.pi*self.B]
54 scale += alpha
55
56 for i in range(self.N):
57 alpha /= scale
58
59 # 2. Induction
60 for t in range(T-1):
61 scale = 0.0
62 for j in range(self.N):
63 sum = 0.0
64 for i in range(self.N):
65 sum += alpha*self.A
66
67 alpha = sum * self.B]
68 scale += alpha
69 for j in range(self.N):
70 alpha /= scale
71
72 # 3. Termination
73 for t in range(T):
74 pprob += np.log(scale)
75
76 # 函数名称:Backward * 功能:后向算法估计参数 * 参数:phmm:指向HMM的指针
77 # T:观察值序列的长度 O:观察值序列
78 # beta:运算中用到的临时数组 pprob:返回值,所要求的概率
79 def Backword(self,T,O,beta,pprob):
80 # 1. Intialization
81 for i in range(self.N):
82 beta = 1.0
83 # 2. Induction
84 for t in range(T-2,-1,-1):
85 for i in range(self.N):
86 sum = 0.0
87 for j in range(self.N):
88 sum += self.A*self.B]*beta
89 beta = sum
90
91 # 3. Termination
92 pprob = 0.0
93 for i in range(self.N):
94 pprob += self.pi*self.B]*beta
95
96 # 带修正的后向算法
97 def BackwardWithScale(self,T,O,beta,scale):
98 # 1. Intialization
99 for i in range(self.N):
100 beta = 1.0
101
102 # 2. Induction
103 for t in range(T-2,-1,-1):
104 for i in range(self.N):
105 sum = 0.0
106 for j in range(self.N):
107 sum += self.A*self.B]*beta
108 beta = sum / scale
109
110 # Viterbi算法
111 # 输入:A,B,pi,O 输出P(O|lambda)最大时Poptimal的路径I
112 def viterbi(self,O):
113 T = len(O)
114 # 初始化
115 delta = np.zeros((T,self.N),np.float)
116 phi = np.zeros((T,self.N),np.float)
117 I = np.zeros(T)
118 for i in range(self.N):
119 delta = self.pi*self.B]
120 phi = 0
121 # 递推
122 for t in range(1,T):
123 for i in range(self.N):
124 delta = self.B]*np.array(*self.Afor j in range(self.N)]).max()
125 phi = np.array(*self.Afor j in range(self.N)]).argmax()
126 # 终结
127 prob = delta.max()
128 I = delta.argmax()
129 # 状态序列求取
130 for t in range(T-2,-1,-1):
131 I = phi]
132 return I,prob
133
134 # 计算gamma : 时刻t时马尔可夫链处于状态Si的概率
135 def ComputeGamma(self, T, alpha, beta, gamma):
136 for t in range(T):
137 denominator = 0.0
138 for j in range(self.N):
139 gamma = alpha*beta
140 denominator += gamma
141 for i in range(self.N):
142 gamma = gamma/denominator
143
144 # 计算sai(i,j) 为给定训练序列O和模型lambda时:
145 # 时刻t是马尔可夫链处于Si状态,二时刻t+1处于Sj状态的概率
146 def ComputeXi(self,T,O,alpha,beta,gamma,xi):
147 for t in range(T-1):
148 sum = 0.0
149 for i in range(self.N):
150 for j in range(self.N):
151 xi = alpha*beta*self.A*self.B]
152 sum += xi
153 for i in range(self.N):
154 for j in range(self.N):
155 xi /= sum
156
157 # Baum-Welch算法
158 # 输入 L个观察序列O,初始模型:HMM={A,B,pi,N,M}
159 def BaumWelch(self,L,T,O,alpha,beta,gamma):
160 print "BaumWelch"
161 DELTA = 0.01 ; round = 0 ; flag = 1 ; probf =
162 delta = 0.0 ; deltaprev = 0.0 ; probprev = 0.0 ; ratio = 0.0 ; deltaprev = 10e-70
163
164 xi = np.zeros((T,self.N,self.N))
165 pi = np.zeros((T),np.float)
166 denominatorA = np.zeros((self.N),np.float)
167 denominatorB = np.zeros((self.N),np.float)
168 numeratorA = np.zeros((self.N,self.N),np.float)
169 numeratorB = np.zeros((self.N,self.M),np.float)
170 scale = np.zeros((T),np.float)
171
172 while True :
173 probf = 0
174 # E - step
175 for l in range(L):
176 self.ForwardWithScale(T,O,alpha,scale,probf)
177 self.BackwardWithScale(T,O,beta,scale)
178 self.ComputeGamma(T,alpha,beta,gamma)
179 self.ComputeXi(T,O,alpha,beta,gamma,xi)
180 for i in range(self.N):
181 pi += gamma
182 for t in range(T-1):
183 denominatorA += gamma
184 denominatorB += gamma
185 denominatorB += gamma
186
187 for j in range(self.N):
188 for t in range(T-1):
189 numeratorA += xi
190 for k in range(self.M):
191 for t in range(T):
192 if O == k:
193 numeratorB += gamma
194
195 # M - step
196 # 重估状态转移矩阵 和 观察概率矩阵
197 for i in range(self.N):
198 self.pi = 0.001/self.N + 0.999*pi/L
199 for j in range(self.N):
200 self.A = 0.001/self.N + 0.999*numeratorA/denominatorA
201 numeratorA = 0.0
202
203 for k in range(self.M):
204 self.B = 0.001/self.M + 0.999*numeratorB/denominatorB
205 numeratorB = 0.0
206
207 pi=denominatorA=denominatorB=0.0;
208
209 if flag == 1:
210 flag = 0
211 probprev = probf
212 ratio = 1
213 continue
214
215 delta = probf - probprev
216 ratio = delta / deltaprev
217 probprev = probf
218 deltaprev = delta
219 round += 1
220
221 if ratio
页:
[1]