Skip to content

Commit

Permalink
terrains + more than 2 layers
Browse files Browse the repository at this point in the history
  • Loading branch information
lmarzora committed May 7, 2017
1 parent 0d3a77d commit 47ed369
Show file tree
Hide file tree
Showing 12 changed files with 4,884 additions and 8 deletions.
30 changes: 22 additions & 8 deletions multilayer_perceptron.m
Original file line number Diff line number Diff line change
@@ -1,11 +1,25 @@
2;
function output = get_output(entry,weights,activation_func)
output = activation_func(weights * entry);
function output = get_output(entries, weights, neurons_per_layer, activation_func)
m = 0;
for i = 2:length(neurons_per_layer)
m = m + 1;
layer_entry{m} = [-1, zeros(1, neurons_per_layer(i-1))];
end
M = m;
i = 1;
for entry = entries
layer_entry{1}(2:end) = entry;
for m = 2:M
layer_entry{m}(2:end) = activation_func(weights{m-1} * layer_entry{m-1}');
end
output(i) = activation_func(weights{M} * layer_entry{M}');
i = i + 1;
end
end

function [weights,output,error] = multilayer_perceptron_learn(entries, expected_output, neurons_per_layer, activation_func, activation_der,
learning_factor=.5, max_iterations=1000, tolerance=1e-5)
n = length(entries(:, 1));
n = length(entries(1,:));
m = 0;
for i = 2:length(neurons_per_layer)
m = m + 1;
Expand All @@ -14,8 +28,8 @@
end
M = m;
for iteration = 1:max_iterations
for index = randperm(2^n);
layer_entry{1}(2:end) = entries(:, index);
for index = randperm(n);
layer_entry{1}(2:end) = entries(:, index);
for m = 2:M
layer_entry{m}(2:end) = activation_func(weights{m-1} * layer_entry{m-1}');
end
Expand All @@ -24,13 +38,13 @@
output(index);
d{M} = activation_der(weights{M}*layer_entry{M}')*(expected_output(index) - output(index));
for i = M-1:-1:1
d{i} = (weights{i+1}(:,2:end)*d{i+1}' * activation_der(weights{i}*layer_entry{i}'));
d{i} = (activation_der(weights{i}*layer_entry{i}')' .* sum(weights{i+1}(:,2:end) .* d{i+1}'))';
end
d;
weights;
for i = 1:M
delta_w = learning_factor * layer_entry{i}'*d{i};
weights{i} = weights{i} + delta_w';
delta_w = learning_factor * d{i} * layer_entry{i};
weights{i} = weights{i} + delta_w;
end
end
error(iteration) = sum(abs(expected_output - output));
Expand Down
Loading

0 comments on commit 47ed369

Please sign in to comment.