Python实现RNN
def do_train(self, case, label, dim=0, learn=0.1):input_updates = np.zeros_like(self.input_weights)
output_updates = np.zeros_like(self.output_weights)
hidden_updates = np.zeros_like(self.hidden_weights)
guess = np.zeros_like(label)
error = 0
output_deltas = []
hidden_layer_history = for i in range(dim):
x = np.array([ for c in case]])
y = np.array([]]).T
hidden_layer = sigmoid(np.dot(x, self.input_weights) + np.dot(hidden_layer_history[-1], self.hidden_weights))
output_layer = sigmoid(np.dot(hidden_layer, self.output_weights))
output_error = y - output_layer
output_deltas.append(output_error * sigmoid_derivative(output_layer))
error += np.abs(output_error)
guess = np.round(output_layer)
hidden_layer_history.append(copy.deepcopy(hidden_layer))
future_hidden_layer_delta = np.zeros(self.hidden_n) for i in range(dim):
x = np.array([ for c in case]])
hidden_layer = hidden_layer_history[-i - 1]
prev_hidden_layer = hidden_layer_history[-i - 2]
output_delta = output_deltas[-i - 1]
hidden_delta = (future_hidden_layer_delta.dot(self.hidden_weights.T) +
output_delta.dot(self.output_weights.T)) * sigmoid_derivative(hidden_layer)
output_updates += np.atleast_2d(hidden_layer).T.dot(output_delta)
hidden_updates += np.atleast_2d(prev_hidden_layer).T.dot(hidden_delta)
input_updates += x.T.dot(hidden_delta)
future_hidden_layer_delta = hidden_delta self.input_weights += input_updates * learn self.output_weights += output_updates * learn self.hidden_weights += hidden_updates * learn return guess, error
页:
[1]