machine_learning_workbench 0.2.0 → 0.2.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 8107a7a0b51fc50d183e08c6ef60e43bd96d4ea1
4
- data.tar.gz: 82f7af0fd0938433d749696122e9ddc588f49aa0
3
+ metadata.gz: f393f2183c3371081f694e47e35a14cf93997098
4
+ data.tar.gz: 754c861e440af0a40a5e328dfdde143a5e1bff59
5
5
  SHA512:
6
- metadata.gz: 07a342404b87066ab60217ad7e2bde8cbf89d23bb7d49b504a280ce1142456a70eb24b44b6f6c76b4a4f10dfddf219acd8c7f37e85e7f91548b24771752a798c
7
- data.tar.gz: bc99aec41e7f9365d47068e69398462e04bcc8dd38dda090ba46e00d27a1e5485bf96a6d55f270c1ffcd6fe8d93223c52014d479634f8e0d48673ebe2eb00bb6
6
+ metadata.gz: 9ed7f6be2d1ed63dd00f26dc8d4b4e47c3ece23c80192bc877c4acaa1c03e1f37a34a64e32b7c6f4af2993439492b39e20f17110f50a14add762345685485fff
7
+ data.tar.gz: e6d116d1a8011da42a24ad3b10209e3764cb3195e6f3149659b9d91a637a029b0ffe9b50f47f7cbed4fd9970ceb7a112884b54f0c566561b9121c434e1455d4c
@@ -17,23 +17,24 @@ module MachineLearningWorkbench::NeuralNetwork
17
17
  # @!attribute [r] struct
18
18
  # list of number of (inputs or) neurons in each layer
19
19
  # @return [Array<Integer>] structure of the network
20
- attr_reader :layers, :state, :act_fn, :struct
20
+ attr_reader :layers, :state, :act_fn, :struct, :dtype
21
21
 
22
22
 
23
23
  ## Initialization
24
24
 
25
25
  # @param struct [Array<Integer>] list of layer sizes
26
26
  # @param act_fn [Symbol] choice of activation function for the neurons
27
- def initialize struct, act_fn: nil
27
+ # @param dtype [NMatrix dtype] NMatrix dtype for weights and states
28
+ def initialize struct, act_fn: nil, dtype: :float32
28
29
  @struct = struct
29
- @act_fn = self.class.act_fn(act_fn || :sigmoid)
30
+ @act_fn = self.get_act_fn(act_fn || :sigmoid)
30
31
  # @state holds both inputs, possibly recurrency, and bias
31
32
  # it is a complete input for the next layer, hence size from layer sizes
32
33
  @state = layer_row_sizes.collect do |size|
33
- NMatrix.zeros([1, size], dtype: :float64)
34
+ NMatrix.zeros([1, size], dtype: dtype)
34
35
  end
35
36
  # to this, append a matrix to hold the final network output
36
- @state.push NMatrix.zeros([1, nneurs(-1)], dtype: :float64)
37
+ @state.push NMatrix.zeros([1, nneurs(-1)], dtype: dtype)
37
38
  reset_state
38
39
  end
39
40
 
@@ -126,7 +127,7 @@ module MachineLearningWorkbench::NeuralNetwork
126
127
  raise ArgumentError unless weights.size == nweights
127
128
  weights_iter = weights.each
128
129
  @layers = layer_shapes.collect do |shape|
129
- NMatrix.new(shape, dtype: :float64) { weights_iter.next }
130
+ NMatrix.new(shape, dtype: dtype) { weights_iter.next }
130
131
  end
131
132
  reset_state
132
133
  return true
@@ -137,7 +138,7 @@ module MachineLearningWorkbench::NeuralNetwork
137
138
 
138
139
  # The "fixed `1`" used in the layer's input
139
140
  def bias
140
- @bias ||= NMatrix[[1], dtype: :float64]
141
+ @bias ||= NMatrix[[1], dtype: dtype]
141
142
  end
142
143
 
143
144
  # Activate the network on a given input
@@ -168,10 +169,10 @@ module MachineLearningWorkbench::NeuralNetwork
168
169
 
169
170
  # Activation function caller. Allows to cleanly define the activation function as one-dimensional, by calling it over the inputs and building a NMatrix to return.
170
171
  # @return [NMatrix] activations for one layer
171
- def self.act_fn type, *args
172
+ def get_act_fn type, *args
172
173
  fn = send(type,*args)
173
174
  lambda do |inputs|
174
- NMatrix.new([1, inputs.size], dtype: :float64) do |_,i|
175
+ NMatrix.new([1, inputs.size], dtype: dtype) do |_,i|
175
176
  # single-row matrix, indices are columns
176
177
  fn.call inputs[i]
177
178
  end
@@ -179,14 +180,14 @@ module MachineLearningWorkbench::NeuralNetwork
179
180
  end
180
181
 
181
182
  # Traditional sigmoid with variable steepness
182
- def self.sigmoid k=0.5
183
+ def sigmoid k=0.5
183
184
  # k is steepness: 0<k<1 is flatter, 1<k is flatter
184
185
  # flatter makes activation less sensitive, better with large number of inputs
185
186
  lambda { |x| 1.0 / (Math.exp(-k * x) + 1.0) }
186
187
  end
187
188
 
188
189
  # Traditional logistic
189
- def self.logistic
190
+ def logistic
190
191
  lambda { |x|
191
192
  exp = Math.exp(x)
192
193
  exp.infinite? ? exp : exp / (1.0 + exp)
@@ -195,7 +196,7 @@ module MachineLearningWorkbench::NeuralNetwork
195
196
 
196
197
  # LeCun hyperbolic activation
197
198
  # @see http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf Section 4.4
198
- def self.lecun_hyperbolic
199
+ def lecun_hyperbolic
199
200
  lambda { |x| 1.7159 * Math.tanh(2.0*x/3.0) + 1e-3*x }
200
201
  end
201
202
 
@@ -208,4 +209,4 @@ module MachineLearningWorkbench::NeuralNetwork
208
209
  end
209
210
  end
210
211
  end
211
- end
212
+ end
@@ -13,8 +13,8 @@ module MachineLearningWorkbench::NeuralNetwork
13
13
  # Activates a layer of the network
14
14
  # @param i [Integer] the layer to activate, zero-indexed
15
15
  def activate_layer i
16
- act_fn.call( state[i].dot layers[i] )
16
+ act_fn.call(state[i].dot layers[i])
17
17
  end
18
18
 
19
19
  end
20
- end
20
+ end
@@ -2,7 +2,7 @@
2
2
  module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
3
3
  # Natural Evolution Strategies base class
4
4
  class Base
5
- attr_reader :ndims, :mu, :sigma, :opt_type, :obj_fn, :parallel_fit, :id, :rng, :last_fits, :best, :rescale_popsize, :rescale_lrate
5
+ attr_reader :ndims, :mu, :sigma, :opt_type, :obj_fn, :parallel_fit, :id, :rng, :last_fits, :best, :rescale_popsize, :rescale_lrate, :dtype
6
6
 
7
7
  # NES object initialization
8
8
  # @param ndims [Integer] number of parameters to optimize
@@ -11,22 +11,27 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
11
11
  # @param rseed [Integer] allow for deterministic execution on rseed provided
12
12
  # @param mu_init [Numeric] values to initalize the distribution's mean
13
13
  # @param sigma_init [Numeric] values to initialize the distribution's covariance
14
- # @param parallel_fit [boolean] whether the `obj_fn` should be passed all the individuals
15
- # together. In the canonical case the fitness function always scores a single individual;
16
- # in practical cases though it is easier to delegate the scoring parallelization to the
17
- # external fitness function. Turning this to `true` will make the algorithm pass _an
18
- # Array_ of individuals to the fitness function, rather than a single instance.
19
- def initialize ndims, obj_fn, opt_type, rseed: nil, mu_init: 0, sigma_init: 1, parallel_fit: false, rescale_popsize: 1, rescale_lrate: 1
14
+ # @param parallel_fit [boolean] whether the `obj_fn` should be passed all the
15
+ # individuals together. In the canonical case the fitness function always scores a
16
+ # single individual; in practical cases though it is easier to delegate the scoring
17
+ # parallelization to the external fitness function. Turning this to `true` will make
18
+ # the algorithm pass _an Array_ of individuals to the fitness function, rather than
19
+ # a single instance.
20
+ # @param rescale_popsize [Float] scaling for the default population size
21
+ # @param rescale_lrate [Float] scaling for the default learning rate
22
+ # @param dtype [NMatrix dtype] NMatrix dtype for all matrix computation
23
+ def initialize ndims, obj_fn, opt_type, rseed: nil, mu_init: 0, sigma_init: 1, parallel_fit: false, rescale_popsize: 1, rescale_lrate: 1, dtype: :float64
20
24
  raise ArgumentError unless [:min, :max].include? opt_type
21
25
  raise ArgumentError unless obj_fn.respond_to? :call
22
26
  @ndims, @opt_type, @obj_fn, @parallel_fit = ndims, opt_type, obj_fn, parallel_fit
23
27
  @rescale_popsize, @rescale_lrate = rescale_popsize, rescale_lrate
24
- @id = NMatrix.identity(ndims, dtype: :float64)
28
+ @id = NMatrix.identity(ndims, dtype: dtype)
25
29
  rseed ||= Random.new_seed
26
30
  # puts "NES rseed: #{s}" # currently disabled
27
31
  @rng = Random.new rseed
28
32
  @best = [(opt_type==:max ? -1 : 1) * Float::INFINITY, nil]
29
33
  @last_fits = []
34
+ @dtype = dtype
30
35
  initialize_distribution mu_init: mu_init, sigma_init: sigma_init
31
36
  end
32
37
 
@@ -58,7 +63,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
58
63
  total = log_range.reduce(:+)
59
64
  buf = 1.0/popsize
60
65
  vals = log_range.collect { |v| v / total - buf }.reverse
61
- NMatrix[vals, dtype: :float64]
66
+ NMatrix[vals, dtype: dtype]
62
67
  end
63
68
 
64
69
  # (see #cmaes_utilities)
@@ -77,7 +82,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
77
82
  # popsize multivariate samples of length ndims
78
83
  # @return [NMatrix] standard normal samples
79
84
  def standard_normal_samples
80
- NMatrix.new([popsize, ndims], dtype: :float64) { standard_normal_sample }
85
+ NMatrix.new([popsize, ndims], dtype: dtype) { standard_normal_sample }
81
86
  end
82
87
 
83
88
  # Move standard normal samples to current distribution
@@ -85,7 +90,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
85
90
  def move_inds inds
86
91
  # TODO: can we reduce the transpositions?
87
92
  # sigma.dot(inds.transpose).map(&mu.method(:+)).transpose
88
- multi_mu = NMatrix[*inds.rows.times.collect {mu.to_a}, dtype: :float64].transpose
93
+ multi_mu = NMatrix[*inds.rows.times.collect {mu.to_a}, dtype: dtype].transpose
89
94
  (multi_mu + sigma.dot(inds.transpose)).transpose
90
95
  # sigma.dot(inds.transpose).transpose + inds.rows.times.collect {mu.to_a}.to_nm
91
96
  end
@@ -106,7 +111,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
106
111
  this_best = sorted.last.take(2)
107
112
  opt_cmp_fn = opt_type==:min ? :< : :>
108
113
  @best = this_best if this_best.first.send(opt_cmp_fn, best.first)
109
- NMatrix[*sorted.map(&:last), dtype: :float64]
114
+ NMatrix[*sorted.map(&:last), dtype: dtype]
110
115
  end
111
116
 
112
117
  # @!method interface_methods
@@ -72,7 +72,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
72
72
  block_samples = sorted_samples.transpose
73
73
 
74
74
  # then back to NMatrix for usage in training
75
- block_samples.map { |sample| NMatrix[*sample, dtype: :float64] }
75
+ block_samples.map { |sample| NMatrix[*sample, dtype: dtype] }
76
76
  end
77
77
 
78
78
  # duck-type the interface: [:train, :mu, :convergence, :save, :load]
@@ -105,4 +105,4 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
105
105
  end
106
106
  end
107
107
  end
108
- end
108
+ end
@@ -6,10 +6,10 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
6
6
  attr_reader :variances
7
7
 
8
8
  def initialize_distribution mu_init: 0, sigma_init: 1
9
- @mu = NMatrix.new([1, ndims], mu_init, dtype: :float64)
9
+ @mu = NMatrix.new([1, ndims], mu_init, dtype: dtype)
10
10
  sigma_init = [sigma_init]*ndims unless sigma_init.kind_of? Enumerable
11
- @variances = NMatrix.new([1,ndims], sigma_init, dtype: :float64)
12
- @sigma = NMatrix.diagonal(variances, dtype: :float64)
11
+ @variances = NMatrix.new([1,ndims], sigma_init, dtype: dtype)
12
+ @sigma = NMatrix.diagonal(variances, dtype: dtype)
13
13
  end
14
14
 
15
15
  def train picks: sorted_inds
@@ -17,7 +17,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
17
17
  g_sigma = utils.dot(picks**2 - 1)
18
18
  @mu += sigma.dot(g_mu.transpose).transpose * lrate
19
19
  @variances *= (g_sigma * lrate / 2).exponential
20
- @sigma = NMatrix.diagonal(variances, dtype: :float64)
20
+ @sigma = NMatrix.diagonal(variances, dtype: dtype)
21
21
  end
22
22
 
23
23
  # Estimate algorithm convergence as total variance
@@ -32,9 +32,9 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
32
32
  def load data
33
33
  raise ArgumentError unless data.size == 2
34
34
  mu_ary, variances_ary = data
35
- @mu = NMatrix[*mu_ary, dtype: :float64]
36
- @variances = NMatrix[*variances_ary, dtype: :float64]
37
- @sigma = NMatrix.diagonal(variances, dtype: :float64)
35
+ @mu = NMatrix[*mu_ary, dtype: dtype]
36
+ @variances = NMatrix[*variances_ary, dtype: dtype]
37
+ @sigma = NMatrix.diagonal(variances, dtype: dtype)
38
38
  end
39
39
  end
40
40
  end
@@ -5,12 +5,12 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
5
5
  attr_reader :log_sigma
6
6
 
7
7
  def initialize_distribution mu_init: 0, sigma_init: 1
8
- @mu = NMatrix.new([1, ndims], mu_init, dtype: :float64)
8
+ @mu = NMatrix.new([1, ndims], mu_init, dtype: dtype)
9
9
  sigma_init = [sigma_init]*ndims unless sigma_init.kind_of? Enumerable
10
- @sigma = NMatrix.diag(sigma_init, dtype: :float64)
10
+ @sigma = NMatrix.diag(sigma_init, dtype: dtype)
11
11
  # Works with the log of sigma to avoid continuous decompositions (thanks Sun Yi)
12
12
  log_sigma_init = sigma_init.map &Math.method(:log)
13
- @log_sigma = NMatrix.diag(log_sigma_init, dtype: :float64)
13
+ @log_sigma = NMatrix.diag(log_sigma_init, dtype: dtype)
14
14
  end
15
15
 
16
16
  def train picks: sorted_inds
@@ -38,8 +38,8 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
38
38
  def load data
39
39
  raise ArgumentError unless data.size == 2
40
40
  mu_ary, log_sigma_ary = data
41
- @mu = NMatrix[*mu_ary, dtype: :float64]
42
- @log_sigma = NMatrix[*log_sigma_ary, dtype: :float64]
41
+ @mu = NMatrix[*mu_ary, dtype: dtype]
42
+ @log_sigma = NMatrix[*log_sigma_ary, dtype: dtype]
43
43
  @sigma = log_sigma.exponential
44
44
  end
45
45
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: machine_learning_workbench
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 0.2.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Giuseppe Cuccu