machine_learning_workbench 0.5.0 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA1:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: afdd4f1bf99c2abbe10f4c348531f4efabde3c73
|
|
4
|
+
data.tar.gz: 2334e9b7e5c4f276f94e75dd50164f8fa78cc699
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 4f51ca6077627401bb1c27dfaed60cb84e5d70bdebe2f7c6d366abae299040e32ec7a4a1ad1ee3dc8ccc9aaa536a4125e91c2e97eaf043115d21e5e41d6a8e70
|
|
7
|
+
data.tar.gz: 884493c20fde5e8ac707f6c00b442d924a7dab5d783cf011926c2425b92116eea2bd1f10116a026b9b55db2210d918aa533eb0a6bc259b7bd0704df217b0985b
|
data/examples/neuroevolution.rb
CHANGED
|
@@ -8,13 +8,13 @@ FFNN = WB::NeuralNetwork::FeedForward
|
|
|
8
8
|
|
|
9
9
|
# Let's address the XOR problem, as it requires nonlinear fitting
|
|
10
10
|
XOR = {[0,0] => 0, [1,0] => 1, [0,1] => 1, [1,1] => 0}
|
|
11
|
-
# A classic [2,2,1]
|
|
12
|
-
#
|
|
13
|
-
#
|
|
14
|
-
#
|
|
15
|
-
#
|
|
16
|
-
#
|
|
17
|
-
# BDNES
|
|
11
|
+
# A classic [2,2,1] (2 inputs, 2 hidden neurons, 1 output neurons) feed-forward
|
|
12
|
+
# network with nonlinear activations can solve this problem.
|
|
13
|
+
# To approximate more complex functions, keep the number of inputs and outputs
|
|
14
|
+
# fixed (they depend on the problem) and increase the number and/or size of
|
|
15
|
+
# hidden neurons. For example: [2, 10, 7, 4, 1].
|
|
16
|
+
# NOTE: If your network grows above few thousands of weights, XNES may be too slow.
|
|
17
|
+
# Try using SNES for large shallow networks or BDNES for deep networks.
|
|
18
18
|
NET = FFNN.new [2,2,1], act_fn: :logistic
|
|
19
19
|
# Note: the process is exactly the same, from instantiation to training, for recurrent
|
|
20
20
|
# networks using the class `WB::NeuralNetwork::Recursive`.
|
|
@@ -35,8 +35,8 @@ def fitness weights
|
|
|
35
35
|
# - observation: correct value, our target
|
|
36
36
|
pred_obs = XOR.map do |input, obs|
|
|
37
37
|
# The network can have an arbitrary number of output neurons
|
|
38
|
-
# Since here we have only one, we extract the value
|
|
39
|
-
output = NET.activate(input)
|
|
38
|
+
# Since here we have only one, we extract the value as the output
|
|
39
|
+
output = NET.activate(input)[0]
|
|
40
40
|
# Here we interpret the output as classification
|
|
41
41
|
pred = output > 0.5 ? 1 : 0
|
|
42
42
|
# Finally accumulate prediction-observation pairs
|
|
@@ -50,9 +50,12 @@ end
|
|
|
50
50
|
# Next comes initializing the black-box stochastic optimization algorithm
|
|
51
51
|
# We are searching for the network's weights, this gives us the search space dimensionality
|
|
52
52
|
# We'll use XNES as we are working with less than 100 dimensions (weights)
|
|
53
|
-
nes = XNES.new NET.nweights, method(:fitness), :max, rseed:
|
|
54
|
-
# Note
|
|
55
|
-
#
|
|
53
|
+
nes = XNES.new NET.nweights, method(:fitness), :max, rseed: 0
|
|
54
|
+
# Note BDNES requires `NET.nweights_per_layer` rather than `NET.nweights` in initialization:
|
|
55
|
+
# nes = WB::Optimizer::NaturalEvolutionStrategies::BDNES.new NET.nweights_per_layer,
|
|
56
|
+
# method(:fitness), :max, rseed: 10
|
|
57
|
+
# The random seed is fixed here to ensure a reproducible behavior
|
|
58
|
+
# In a real task, best using an oversized network, more iterations, and try several seeds
|
|
56
59
|
|
|
57
60
|
# NOTE: In practical applications it is best to delegate parallelization to the fitness
|
|
58
61
|
# function instead of computing the fitness of one individual at a time. This can be
|
|
@@ -60,16 +63,20 @@ nes = XNES.new NET.nweights, method(:fitness), :max, rseed: 15
|
|
|
60
63
|
# setting the `parallel_fit` switch to `true`:
|
|
61
64
|
# nes = XNES.new NET.nweights,
|
|
62
65
|
# -> (genotypes) { Parallel.map genotypes, &method(:fitness) },
|
|
63
|
-
# :max, rseed:
|
|
66
|
+
# :max, rseed: 0, parallel_fit: true
|
|
64
67
|
|
|
65
68
|
|
|
66
|
-
# Nothing left but to run the optimization algorithm
|
|
69
|
+
# Nothing left but to run the optimization algorithm
|
|
70
|
+
# Depending on the random seed (read: luck)few epochs here will suffice
|
|
67
71
|
50.times { nes.train }
|
|
68
72
|
# OK! now remember, `NET` currently holds the weights of the last evaluation
|
|
69
73
|
# Let's fetch the best individual found so far
|
|
70
74
|
best_fit, best_weights = nes.best
|
|
71
75
|
# Let's run them again to check they work
|
|
72
|
-
result = fitness best_weights
|
|
73
|
-
|
|
74
|
-
|
|
76
|
+
result = fitness best_weights
|
|
77
|
+
# Note if you defined a parallel fitness above you'll need instead
|
|
78
|
+
# result = fitness([best_weights])[0]
|
|
79
|
+
puts "The found network achieves a score of #{result} out of #{XOR.size} in the XOR task"
|
|
80
|
+
puts "Weights: #{best_weights.to_a}"
|
|
75
81
|
puts "Done!"
|
|
82
|
+
# That's it! 18 lines and you got a working neuroevolution algorithm, congrats :)
|
|
@@ -35,7 +35,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
|
35
35
|
|
|
36
36
|
# Box-Muller transform: generates standard (unit) normal distribution samples
|
|
37
37
|
# @return [Float] a single sample from a standard normal distribution
|
|
38
|
-
# @note Xumo::NArray implements this
|
|
38
|
+
# @note Xumo::NArray implements this but no random seed selection yet
|
|
39
39
|
def standard_normal_sample
|
|
40
40
|
rho = Math.sqrt(-2.0 * Math.log(rng.rand))
|
|
41
41
|
theta = 2 * Math::PI * rng.rand
|
|
@@ -80,7 +80,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
|
80
80
|
# Samples a standard normal distribution to construct a NArray of
|
|
81
81
|
# popsize multivariate samples of length ndims
|
|
82
82
|
# @return [NArray] standard normal samples
|
|
83
|
-
# @note Xumo::NArray implements this
|
|
83
|
+
# @note Xumo::NArray implements this but no random seed selection yet
|
|
84
84
|
def standard_normal_samples
|
|
85
85
|
NArray.zeros([popsize, ndims]).tap do |ret|
|
|
86
86
|
ret.each_with_index { |_,*i| ret[*i] = standard_normal_sample }
|
|
@@ -104,8 +104,9 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
|
104
104
|
# matched with individuals sorted by INCREASING fitness. Then reverse order for minimization.
|
|
105
105
|
# @return standard normal samples sorted by the respective individuals' fitnesses
|
|
106
106
|
def sorted_inds
|
|
107
|
-
#
|
|
108
|
-
samples =
|
|
107
|
+
# Xumo::NArray implements the Box-Muller, but no random seed (yet)
|
|
108
|
+
samples = standard_normal_samples
|
|
109
|
+
# samples = NArray.new([popsize, ndims]).rand_norm(0,1)
|
|
109
110
|
inds = move_inds(samples)
|
|
110
111
|
fits = parallel_fit ? obj_fn.call(inds) : inds.map(&obj_fn)
|
|
111
112
|
# Quick cure for NaN fitnesses
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: machine_learning_workbench
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.5.
|
|
4
|
+
version: 0.5.1
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Giuseppe Cuccu
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: bin
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date: 2018-04-
|
|
11
|
+
date: 2018-04-06 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
name: bundler
|